@@ -44,7 +44,7 @@ def round_st(inputs, offset=None):
4444
4545
4646def soft_round (x , alpha , eps = 1e-3 ):
47- """Differentiable approximation to round() .
47+ """Differentiable approximation to ` round` .
4848
4949 Larger alphas correspond to closer approximations of the round function.
5050 If alpha is close to zero, this function reduces to the identity.
@@ -55,12 +55,12 @@ def soft_round(x, alpha, eps=1e-3):
5555 > https://arxiv.org/abs/2006.09952
5656
5757 Args:
58- x: tf.Tensor. Inputs to the rounding function.
59- alpha: Float or tf.Tensor. Controls smoothness of the approximation.
60- eps: Float. Threshold below which soft_round() will return identity.
58+ x: ` tf.Tensor` . Inputs to the rounding function.
59+ alpha: Float or ` tf.Tensor` . Controls smoothness of the approximation.
60+ eps: Float. Threshold below which ` soft_round` will return identity.
6161
6262 Returns:
63- tf.Tensor
63+ ` tf.Tensor`
6464 """
6565 # This guards the gradient of tf.where below against NaNs, while maintaining
6666 # correctness, as for alpha < eps the result is ignored.
@@ -76,21 +76,21 @@ def soft_round(x, alpha, eps=1e-3):
7676
7777
7878def soft_round_inverse (y , alpha , eps = 1e-3 ):
79- """Inverse of soft_round() .
79+ """Inverse of ` soft_round` .
8080
8181 This is described in Sec. 4.1. in the paper
8282 > "Universally Quantized Neural Compression"<br />
8383 > Eirikur Agustsson & Lucas Theis<br />
8484 > https://arxiv.org/abs/2006.09952
8585
8686 Args:
87- y: tf.Tensor. Inputs to this function.
88- alpha: Float or tf.Tensor. Controls smoothness of the approximation.
89- eps: Float. Threshold below which soft_round() is assumed to equal the
87+ y: ` tf.Tensor` . Inputs to this function.
88+ alpha: Float or ` tf.Tensor` . Controls smoothness of the approximation.
89+ eps: Float. Threshold below which ` soft_round` is assumed to equal the
9090 identity function.
9191
9292 Returns:
93- tf.Tensor
93+ ` tf.Tensor`
9494 """
9595 # This guards the gradient of tf.where below against NaNs, while maintaining
9696 # correctness, as for alpha < eps the result is ignored.
@@ -108,11 +108,11 @@ def soft_round_inverse(y, alpha, eps=1e-3):
108108 return tf .where (alpha < eps , y , m + r , name = "soft_round_inverse" )
109109
110110
111- def soft_round_conditional_mean (inputs , alpha ):
111+ def soft_round_conditional_mean (y , alpha ):
112112 """Conditional mean of inputs given noisy soft rounded values.
113113
114114 Computes g(z) = E[Y | s(Y) + U = z] where s is the soft-rounding function,
115- U is uniform between -0.5 and 0.5 and `Y` is considered uniform when truncated
115+ U is uniform between -0.5 and 0.5 and Y is considered uniform when truncated
116116 to the interval [z-0.5, z+0.5].
117117
118118 This is described in Sec. 4.1. in the paper
@@ -121,10 +121,10 @@ def soft_round_conditional_mean(inputs, alpha):
121121 > https://arxiv.org/abs/2006.09952
122122
123123 Args:
124- inputs: The input tensor .
125- alpha: The softround alpha .
124+ y: `tf.Tensor`. Inputs to this function .
125+ alpha: Float or `tf.Tensor`. Controls smoothness of the approximation .
126126
127127 Returns:
128128 The conditional mean, of same shape as `inputs`.
129129 """
130- return soft_round_inverse (inputs - .5 , alpha ) + .5
130+ return soft_round_inverse (y - .5 , alpha ) + .5
0 commit comments