@@ -88,7 +88,7 @@ def generate(
8888 or pandas Series.
8989 connection_id (str, optional):
9090 Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`.
91- If not provided, the connection from the current session will be used .
91+ If not provided, the query uses your end-user credential .
9292 endpoint (str, optional):
9393 Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any
9494 generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and
@@ -131,7 +131,7 @@ def generate(
131131
132132 operator = ai_ops .AIGenerate (
133133 prompt_context = tuple (prompt_context ),
134- connection_id = _resolve_connection_id ( series_list [ 0 ], connection_id ) ,
134+ connection_id = connection_id ,
135135 endpoint = endpoint ,
136136 request_type = request_type ,
137137 model_params = json .dumps (model_params ) if model_params else None ,
@@ -186,7 +186,7 @@ def generate_bool(
186186 or pandas Series.
187187 connection_id (str, optional):
188188 Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`.
189- If not provided, the connection from the current session will be used .
189+ If not provided, the query uses your end-user credential .
190190 endpoint (str, optional):
191191 Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any
192192 generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and
@@ -216,7 +216,7 @@ def generate_bool(
216216
217217 operator = ai_ops .AIGenerateBool (
218218 prompt_context = tuple (prompt_context ),
219- connection_id = _resolve_connection_id ( series_list [ 0 ], connection_id ) ,
219+ connection_id = connection_id ,
220220 endpoint = endpoint ,
221221 request_type = request_type ,
222222 model_params = json .dumps (model_params ) if model_params else None ,
@@ -267,7 +267,7 @@ def generate_int(
267267 or pandas Series.
268268 connection_id (str, optional):
269269 Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`.
270- If not provided, the connection from the current session will be used .
270+ If not provided, the query uses your end-user credential .
271271 endpoint (str, optional):
272272 Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any
273273 generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and
@@ -297,7 +297,7 @@ def generate_int(
297297
298298 operator = ai_ops .AIGenerateInt (
299299 prompt_context = tuple (prompt_context ),
300- connection_id = _resolve_connection_id ( series_list [ 0 ], connection_id ) ,
300+ connection_id = connection_id ,
301301 endpoint = endpoint ,
302302 request_type = request_type ,
303303 model_params = json .dumps (model_params ) if model_params else None ,
@@ -348,7 +348,7 @@ def generate_double(
348348 or pandas Series.
349349 connection_id (str, optional):
350350 Specifies the connection to use to communicate with the model. For example, `myproject.us.myconnection`.
351- If not provided, the connection from the current session will be used .
351+ If not provided, the query uses your end-user credential .
352352 endpoint (str, optional):
353353 Specifies the Vertex AI endpoint to use for the model. For example `"gemini-2.5-flash"`. You can specify any
354354 generally available or preview Gemini model. If you specify the model name, BigQuery ML automatically identifies and
@@ -378,7 +378,7 @@ def generate_double(
378378
379379 operator = ai_ops .AIGenerateDouble (
380380 prompt_context = tuple (prompt_context ),
381- connection_id = _resolve_connection_id ( series_list [ 0 ], connection_id ) ,
381+ connection_id = connection_id ,
382382 endpoint = endpoint ,
383383 request_type = request_type ,
384384 model_params = json .dumps (model_params ) if model_params else None ,
0 commit comments