1919from __future__ import division
2020from __future__ import print_function
2121
22- import numpy as np
22+ # Dependency imports
2323
24- from tensorflow_compression . python . layers import entropybottleneck
24+ import numpy as np
2525
2626from tensorflow .python .framework import dtypes
2727from tensorflow .python .ops import array_ops
3030from tensorflow .python .platform import test
3131from tensorflow .python .training import gradient_descent
3232
33+ from tensorflow_compression .python .layers import entropy_models
34+
3335
3436class EntropyBottleneckTest (test .TestCase ):
3537
3638 def test_noise (self ):
3739 # Tests that the noise added is uniform noise between -0.5 and 0.5.
3840 inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
39- layer = entropybottleneck .EntropyBottleneck ()
41+ layer = entropy_models .EntropyBottleneck ()
4042 noisy , _ = layer (inputs , training = True )
4143 with self .test_session () as sess :
4244 sess .run (variables .global_variables_initializer ())
@@ -49,7 +51,7 @@ def test_quantization(self):
4951 # Tests that inputs are quantized to full integer values, even after
5052 # quantiles have been updated.
5153 inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
52- layer = entropybottleneck .EntropyBottleneck (optimize_integer_offset = False )
54+ layer = entropy_models .EntropyBottleneck (optimize_integer_offset = False )
5355 quantized , _ = layer (inputs , training = False )
5456 opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
5557 self .assertTrue (len (layer .losses ) == 1 )
@@ -66,7 +68,7 @@ def test_quantization_optimized_offset(self):
6668 # have been updated. However, the difference between input and output should
6769 # be between -0.5 and 0.5, and the offset must be consistent.
6870 inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
69- layer = entropybottleneck .EntropyBottleneck (optimize_integer_offset = True )
71+ layer = entropy_models .EntropyBottleneck (optimize_integer_offset = True )
7072 quantized , _ = layer (inputs , training = False )
7173 opt = gradient_descent .GradientDescentOptimizer (learning_rate = 1 )
7274 self .assertTrue (len (layer .losses ) == 1 )
@@ -85,7 +87,7 @@ def test_codec(self):
8587 # Tests that inputs are compressed and decompressed correctly, and quantized
8688 # to full integer values, even after quantiles have been updated.
8789 inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
88- layer = entropybottleneck .EntropyBottleneck (
90+ layer = entropy_models .EntropyBottleneck (
8991 data_format = "channels_last" , init_scale = 60 ,
9092 optimize_integer_offset = False )
9193 bitstrings = layer .compress (inputs )
@@ -108,7 +110,7 @@ def test_codec_optimized_offset(self):
108110 # However, the difference between input and output should be between -0.5
109111 # and 0.5, and the offset must be consistent.
110112 inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
111- layer = entropybottleneck .EntropyBottleneck (
113+ layer = entropy_models .EntropyBottleneck (
112114 data_format = "channels_last" , init_scale = 60 ,
113115 optimize_integer_offset = True )
114116 bitstrings = layer .compress (inputs )
@@ -132,7 +134,7 @@ def test_codec_clipping(self):
132134 # Tests that inputs are compressed and decompressed correctly, and clipped
133135 # to the expected range.
134136 inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
135- layer = entropybottleneck .EntropyBottleneck (
137+ layer = entropy_models .EntropyBottleneck (
136138 data_format = "channels_last" , init_scale = 40 )
137139 bitstrings = layer .compress (inputs )
138140 decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
@@ -149,7 +151,7 @@ def test_channels_last(self):
149151 # Test the layer with more than one channel and multiple input dimensions,
150152 # with the channels in the last dimension.
151153 inputs = array_ops .placeholder (dtypes .float32 , (None , None , None , 2 ))
152- layer = entropybottleneck .EntropyBottleneck (
154+ layer = entropy_models .EntropyBottleneck (
153155 data_format = "channels_last" , init_scale = 50 )
154156 noisy , _ = layer (inputs , training = True )
155157 quantized , _ = layer (inputs , training = False )
@@ -170,7 +172,7 @@ def test_channels_first(self):
170172 # Test the layer with more than one channel and multiple input dimensions,
171173 # with the channel dimension right after the batch dimension.
172174 inputs = array_ops .placeholder (dtypes .float32 , (None , 3 , None , None ))
173- layer = entropybottleneck .EntropyBottleneck (
175+ layer = entropy_models .EntropyBottleneck (
174176 data_format = "channels_first" , init_scale = 50 )
175177 noisy , _ = layer (inputs , training = True )
176178 quantized , _ = layer (inputs , training = False )
@@ -192,7 +194,7 @@ def test_compress(self):
192194 # `test_decompress`. If you set the constant at the end to `True`, this test
193195 # will fail and the log will contain the new test data.
194196 inputs = array_ops .placeholder (dtypes .float32 , (2 , 3 , 10 ))
195- layer = entropybottleneck .EntropyBottleneck (
197+ layer = entropy_models .EntropyBottleneck (
196198 data_format = "channels_first" , filters = (), init_scale = 2 )
197199 bitstrings = layer .compress (inputs )
198200 decoded = layer .decompress (bitstrings , array_ops .shape (inputs )[1 :])
@@ -237,7 +239,7 @@ def test_decompress(self):
237239 bitstrings = array_ops .placeholder (dtypes .string )
238240 input_shape = array_ops .placeholder (dtypes .int32 )
239241 quantized_cdf = array_ops .placeholder (dtypes .int32 )
240- layer = entropybottleneck .EntropyBottleneck (
242+ layer = entropy_models .EntropyBottleneck (
241243 data_format = "channels_first" , filters = (), dtype = dtypes .float32 )
242244 layer .build (self .expected .shape )
243245 layer ._quantized_cdf = quantized_cdf
@@ -253,13 +255,13 @@ def test_build_decompress(self):
253255 # Test that layer can be built when `decompress` is the first call to it.
254256 bitstrings = array_ops .placeholder (dtypes .string )
255257 input_shape = array_ops .placeholder (dtypes .int32 , shape = [3 ])
256- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
258+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
257259 layer .decompress (bitstrings , input_shape [1 :], channels = 5 )
258260 self .assertTrue (layer .built )
259261
260262 def test_pmf_normalization (self ):
261263 # Test that probability mass functions are normalized correctly.
262- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
264+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
263265 layer .build ((None , 10 ))
264266 with self .test_session () as sess :
265267 sess .run (variables .global_variables_initializer ())
@@ -268,7 +270,7 @@ def test_pmf_normalization(self):
268270
269271 def test_visualize (self ):
270272 # Test that summary op can be constructed.
271- layer = entropybottleneck .EntropyBottleneck (dtype = dtypes .float32 )
273+ layer = entropy_models .EntropyBottleneck (dtype = dtypes .float32 )
272274 layer .build ((None , 10 ))
273275 summary = layer .visualize ()
274276 with self .test_session () as sess :
@@ -278,7 +280,7 @@ def test_visualize(self):
278280 def test_normalization (self ):
279281 # Test that densities are normalized correctly.
280282 inputs = array_ops .placeholder (dtypes .float32 , (None , 1 ))
281- layer = entropybottleneck .EntropyBottleneck (filters = (2 ,))
283+ layer = entropy_models .EntropyBottleneck (filters = (2 ,))
282284 _ , likelihood = layer (inputs , training = True )
283285 with self .test_session () as sess :
284286 sess .run (variables .global_variables_initializer ())
@@ -291,7 +293,7 @@ def test_normalization(self):
291293 def test_entropy_estimates (self ):
292294 # Test that entropy estimates match actual range coding.
293295 inputs = array_ops .placeholder (dtypes .float32 , (1 , None , 1 ))
294- layer = entropybottleneck .EntropyBottleneck (
296+ layer = entropy_models .EntropyBottleneck (
295297 filters = (2 , 3 ), data_format = "channels_last" )
296298 _ , likelihood = layer (inputs , training = True )
297299 diff_entropy = math_ops .reduce_sum (math_ops .log (likelihood )) / - np .log (2 )
0 commit comments