Skip to content

Commit 06fc0be

Browse files
arjungtensorflow-copybara
authored andcommitted
Several updates to tutorials.
- Add buttons for docs compliance. Only the tutorials shown on tensorflow.org/neural_structured_learning have the 'download' button. - Update the license header to include 'The TensorFlow Neural Structured Learning Authors' for tutorials shown on tf.org for docs compliance. - Don't include sigmoid/softmax as an activation function for the last layer of the model. Instead change the loss function so that it operates on unscaled logits to avoid numerical instability. - Update graph_keras_cnn_flowers.ipynb to accept named inputs. This tutorial was broken before. - Update adversarial_cnn_transfer_learning_fashionmnist.ipynb to use tf.image.resize instead of cv2.resize because the latter doesn't exist in certain environments by default. Also fix an undefined variable error in one of the functions. PiperOrigin-RevId: 419622832
1 parent dbbe59b commit 06fc0be

File tree

5 files changed

+441
-588
lines changed

5 files changed

+441
-588
lines changed

g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"id": "ZwZNOAMZcxl3"
77
},
88
"source": [
9-
"##### Copyright 2019 Google LLC"
9+
"##### Copyright 2019 The TensorFlow Neural Structured Learning Authors"
1010
]
1111
},
1212
{
@@ -56,6 +56,9 @@
5656
" \u003ctd\u003e\n",
5757
" \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n",
5858
" \u003c/td\u003e\n",
59+
" \u003ctd\u003e\n",
60+
" \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/neural-structured-learning/g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n",
61+
" \u003c/td\u003e\n",
5962
"\u003c/table\u003e"
6063
]
6164
},
@@ -404,7 +407,7 @@
404407
" x = tf.keras.layers.Flatten()(x)\n",
405408
" for num_units in hparams.num_fc_units:\n",
406409
" x = tf.keras.layers.Dense(num_units, activation='relu')(x)\n",
407-
" pred = tf.keras.layers.Dense(hparams.num_classes, activation='softmax')(x)\n",
410+
" pred = tf.keras.layers.Dense(hparams.num_classes)(x)\n",
408411
" model = tf.keras.Model(inputs=inputs, outputs=pred)\n",
409412
" return model"
410413
]
@@ -438,8 +441,10 @@
438441
},
439442
"outputs": [],
440443
"source": [
441-
"base_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n",
442-
" metrics=['acc'])\n",
444+
"base_model.compile(\n",
445+
" optimizer='adam',\n",
446+
" loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
447+
" metrics=['acc'])\n",
443448
"base_model.fit(train_dataset, epochs=HPARAMS.epochs)"
444449
]
445450
},
@@ -565,8 +570,10 @@
565570
},
566571
"outputs": [],
567572
"source": [
568-
"adv_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n",
569-
" metrics=['acc'])\n",
573+
"adv_model.compile(\n",
574+
" optimizer='adam',\n",
575+
" loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
576+
" metrics=['acc'])\n",
570577
"adv_model.fit(train_set_for_adv_model, epochs=HPARAMS.epochs)"
571578
]
572579
},
@@ -620,12 +627,10 @@
620627
"outputs": [],
621628
"source": [
622629
"reference_model = nsl.keras.AdversarialRegularization(\n",
623-
" base_model,\n",
624-
" label_keys=[LABEL_INPUT_NAME],\n",
625-
" adv_config=adv_config)\n",
630+
" base_model, label_keys=[LABEL_INPUT_NAME], adv_config=adv_config)\n",
626631
"reference_model.compile(\n",
627632
" optimizer='adam',\n",
628-
" loss='sparse_categorical_crossentropy',\n",
633+
" loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
629634
" metrics=['acc'])"
630635
]
631636
},
@@ -685,7 +690,7 @@
685690
"for batch in test_set_for_adv_model:\n",
686691
" perturbed_batch = reference_model.perturb_on_batch(batch)\n",
687692
" # Clipping makes perturbed examples have the same range as regular ones.\n",
688-
" perturbed_batch[IMAGE_INPUT_NAME] = tf.clip_by_value( \n",
693+
" perturbed_batch[IMAGE_INPUT_NAME] = tf.clip_by_value(\n",
689694
" perturbed_batch[IMAGE_INPUT_NAME], 0.0, 1.0)\n",
690695
" y_true = perturbed_batch.pop(LABEL_INPUT_NAME)\n",
691696
" perturbed_images.append(perturbed_batch[IMAGE_INPUT_NAME].numpy())\n",
@@ -742,7 +747,7 @@
742747
"\n",
743748
"batch_size = HPARAMS.batch_size\n",
744749
"n_col = 4\n",
745-
"n_row = (batch_size + n_col - 1) / n_col\n",
750+
"n_row = (batch_size + n_col - 1) // n_col\n",
746751
"\n",
747752
"print('accuracy in batch %d:' % batch_index)\n",
748753
"for name, pred in batch_pred.items():\n",

0 commit comments

Comments
 (0)