Skip to content

Commit f8d26f2

Browse files
committed
use preview 6.1
1 parent aee1790 commit f8d26f2

File tree

13 files changed

+14
-13
lines changed

13 files changed

+14
-13
lines changed

BasicMath/BasicMath.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
</PropertyGroup>
99

1010
<ItemGroup>
11-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
11+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1212
</ItemGroup>
1313

1414
</Project>

CharRNN/CharRNN.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
<ItemGroup>
1717
<PackageReference Include="CommandLineParser" Version="2.3.0" />
18-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
18+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1919
<PackageReference Include="Newtonsoft.Json" Version="12.0.1" />
2020
</ItemGroup>
2121

FSharp/BasicMathF/BasicMathF.fsproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
<ItemGroup>
1313
<PackageReference Include="FSharp.Interop.Dynamic" Version="4.0.3.130" />
14-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
14+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1515
</ItemGroup>
1616

1717
</Project>

FSharp/FashionMnistF/FashionMnistF.fsproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,6 @@
1111

1212
<ItemGroup>
1313
<PackageReference Include="FSharp.Interop.Dynamic" Version="4.0.3.130" />
14-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
14+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1515
</ItemGroup>
1616
</Project>

FashionMnistClassification/FashionMnistClassification.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
</PropertyGroup>
1010

1111
<ItemGroup>
12-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
12+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1313
</ItemGroup>
1414

1515
</Project>

GPT-2/GPT-2.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
<ItemGroup>
2323
<PackageReference Include="CsvHelper" Version="12.1.2" />
24-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
24+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
2525
<PackageReference Include="ManyConsole.CommandLineUtils" Version="1.0.3-alpha" />
2626
<PackageReference Include="morelinq" Version="3.1.0" />
2727
<PackageReference Include="Newtonsoft.Json" Version="12.0.1" />

GPT-2/Gpt2Model.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ static Tensor Softmax(Tensor input, int axis = -1)
3939
}
4040

4141
static Tensor GeLU(Tensor input) =>
42-
((dynamic)input * 0.5) * (tf.tanh_dyn((input + tf.pow(input, 3) * 0.044715) * Math.Sqrt(2 / Math.PI)) + 1);
42+
((dynamic)input * 0.5) * (tf.tanh((input + tf.pow(input, 3) * 0.044715) * Math.Sqrt(2 / Math.PI)) + 1);
4343

4444
/// <summary>
4545
/// Normalize to mean = 0, std = 1, then do a diagonal affine transform.

GPT-2/Gpt2Sampler.cs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
using numpy;
88
using tensorflow;
99
using tensorflow.contrib.training;
10+
using tensorflow.python.framework.dtypes;
1011
using tensorflow.python.ops.variable_scope;
1112

1213
public static class Gpt2Sampler
@@ -67,7 +68,7 @@ SortedDictionary<string, dynamic> Step(HParams @params, Tensor tokens, dynamic p
6768
Tensor[] Body(object past, dynamic prev, object output)
6869
{
6970
var nextOutputs = Step(hParams, prev[Range.All, tf.newaxis], past: past);
70-
Tensor logits = nextOutputs["logits"][Range.All, -1, Range.All] / tf.to_float(temperature);
71+
Tensor logits = nextOutputs["logits"][Range.All, -1, Range.All] / tf.constant(temperature, dtypes.float32_ref);
7172
logits = TopLogits(logits, topK: topK);
7273
var samples = tf.multinomial_dyn(logits, num_samples: 1, output_dtype: tf.int32);
7374
return new Tensor[]

LinearSVM/LinearSVM.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
</PropertyGroup>
88

99
<ItemGroup>
10-
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6" />
10+
<PackageReference Include="Gradient" Version="0.1.10-tech-preview6.1" />
1111
<PackageReference Include="ManyConsole.CommandLineUtils" Version="1.0.3-alpha" />
1212
</ItemGroup>
1313

LinearSVM/LinearSvmProgram.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ public int Run()
109109
dynamic Loss(dynamic W, dynamic b, dynamic inputData, dynamic targetData) {
110110
var logits = tf.subtract(tf.matmul(inputData, W), b);
111111
var normTerm = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W), W)), 2);
112-
var classificationLoss = tf.reduce_mean(tf.maximum(0.0, tf.subtract(this.flags.Delta, tf.multiply(logits, targetData))));
112+
var classificationLoss = tf.reduce_mean(tf.maximum(tf.constant(0.0), tf.subtract(this.flags.Delta, tf.multiply(logits, targetData))));
113113
var totalLoss = tf.add_dyn(tf.multiply(this.flags.C, classificationLoss), tf.multiply(this.flags.Reg, normTerm));
114114
return totalLoss;
115115
}

0 commit comments

Comments
 (0)