|
150 | 150 | "%pip install --upgrade --quiet google-genai pandas google-cloud-storage google-cloud-bigquery" |
151 | 151 | ] |
152 | 152 | }, |
153 | | - { |
154 | | - "cell_type": "markdown", |
155 | | - "metadata": { |
156 | | - "id": "R5Xep4W9lq-Z" |
157 | | - }, |
158 | | - "source": [ |
159 | | - "### Restart runtime\n", |
160 | | - "\n", |
161 | | - "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", |
162 | | - "\n", |
163 | | - "The restart might take a minute or longer. After it's restarted, continue to the next step." |
164 | | - ] |
165 | | - }, |
166 | | - { |
167 | | - "cell_type": "code", |
168 | | - "execution_count": null, |
169 | | - "metadata": { |
170 | | - "id": "XRvKdaPDTznN" |
171 | | - }, |
172 | | - "outputs": [], |
173 | | - "source": [ |
174 | | - "import IPython\n", |
175 | | - "\n", |
176 | | - "app = IPython.Application.instance()\n", |
177 | | - "app.kernel.do_shutdown(True)" |
178 | | - ] |
179 | | - }, |
180 | | - { |
181 | | - "cell_type": "markdown", |
182 | | - "metadata": { |
183 | | - "id": "SbmM4z7FOBpM" |
184 | | - }, |
185 | | - "source": [ |
186 | | - "<div class=\"alert alert-block alert-warning\">\n", |
187 | | - "<b>⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️</b>\n", |
188 | | - "</div>\n" |
189 | | - ] |
190 | | - }, |
191 | 153 | { |
192 | 154 | "cell_type": "markdown", |
193 | 155 | "metadata": { |
|
269 | 231 | "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n", |
270 | 232 | " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", |
271 | 233 | "\n", |
272 | | - "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")" |
| 234 | + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"global\")" |
273 | 235 | ] |
274 | 236 | }, |
275 | 237 | { |
|
293 | 255 | "\n", |
294 | 256 | "You can find a list of the Gemini models that support batch predictions in the [Multimodal models that support batch predictions](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini#multimodal_models_that_support_batch_predictions) page.\n", |
295 | 257 | "\n", |
296 | | - "This tutorial uses Gemini 2.0 Flash (`gemini-2.0-flash-001`) model." |
| 258 | + "This tutorial uses Gemini 2.5 Flash (`gemini-2.5-flash`) model." |
297 | 259 | ] |
298 | 260 | }, |
299 | 261 | { |
|
304 | 266 | }, |
305 | 267 | "outputs": [], |
306 | 268 | "source": [ |
307 | | - "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type:\"string\", isTemplate: true}" |
| 269 | + "MODEL_ID = \"gemini-2.5-flash\" # @param {type:\"string\", isTemplate: true}" |
308 | 270 | ] |
309 | 271 | }, |
310 | 272 | { |
|
378 | 340 | "outputs": [], |
379 | 341 | "source": [ |
380 | 342 | "BUCKET_URI = \"[your-cloud-storage-bucket]\" # @param {type:\"string\"}\n", |
| 343 | + "GCS_LOCATION = \"us-central1\" # @param {type:\"string\"}\n", |
381 | 344 | "\n", |
382 | 345 | "if BUCKET_URI == \"[your-cloud-storage-bucket]\":\n", |
383 | 346 | " TIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n", |
384 | 347 | " BUCKET_URI = f\"gs://{PROJECT_ID}-{TIMESTAMP}\"\n", |
385 | 348 | "\n", |
386 | | - " ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" |
| 349 | + " ! gsutil mb -l {GCS_LOCATION} -p {PROJECT_ID} {BUCKET_URI}" |
387 | 350 | ] |
388 | 351 | }, |
389 | 352 | { |
|
477 | 440 | "outputs": [], |
478 | 441 | "source": [ |
479 | 442 | "# Refresh the job until complete\n", |
480 | | - "while gcs_batch_job.state == \"JOB_STATE_RUNNING\":\n", |
| 443 | + "while gcs_batch_job.state in (\n", |
| 444 | + " \"JOB_STATE_RUNNING\",\n", |
| 445 | + " \"JOB_STATE_PENDING\",\n", |
| 446 | + " \"JOB_STATE_QUEUED\",\n", |
| 447 | + "):\n", |
481 | 448 | " time.sleep(5)\n", |
482 | 449 | " gcs_batch_job = client.batches.get(name=gcs_batch_job.name)\n", |
483 | 450 | "\n", |
|
544 | 511 | "id": "bfb2a462a7c6" |
545 | 512 | }, |
546 | 513 | "source": [ |
547 | | - "## BigQuery" |
| 514 | + "## BigQuery\n", |
| 515 | + "\n", |
| 516 | + "⚠️ Batch predictions using BigQuery currently does not support Global endpoints. " |
| 517 | + ] |
| 518 | + }, |
| 519 | + { |
| 520 | + "cell_type": "code", |
| 521 | + "execution_count": null, |
| 522 | + "metadata": { |
| 523 | + "id": "9c694724e069" |
| 524 | + }, |
| 525 | + "outputs": [], |
| 526 | + "source": [ |
| 527 | + "client = genai.Client(vertexai=True, project=PROJECT_ID, location=\"us-central1\")" |
548 | 528 | ] |
549 | 529 | }, |
550 | 530 | { |
|
755 | 735 | "outputs": [], |
756 | 736 | "source": [ |
757 | 737 | "# Refresh the job until complete\n", |
758 | | - "while bq_batch_job.state == \"JOB_STATE_RUNNING\":\n", |
| 738 | + "while bq_batch_job.state in (\n", |
| 739 | + " \"JOB_STATE_RUNNING\",\n", |
| 740 | + " \"JOB_STATE_PENDING\",\n", |
| 741 | + " \"JOB_STATE_QUEUED\",\n", |
| 742 | + "):\n", |
759 | 743 | " time.sleep(5)\n", |
760 | 744 | " bq_batch_job = client.batches.get(name=bq_batch_job.name)\n", |
761 | 745 | "\n", |
|
0 commit comments