Skip to content

Commit 5f0b01e

Browse files
sararobcopybara-github
authored andcommitted
chore: GenAI SDK client - introduce subnamespaces for types
PiperOrigin-RevId: 818740807
1 parent d4e211d commit 5f0b01e

17 files changed

+2840
-456
lines changed

tests/unit/vertexai/genai/replays/conftest.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from google.cloud import storage, bigquery
2626
from google.genai import _replay_api_client
2727
from google.genai import client as google_genai_client_module
28-
from vertexai._genai import _evals_utils
28+
from vertexai._genai import _gcs_utils
2929
from vertexai._genai import prompt_optimizer
3030
import pytest
3131

@@ -246,7 +246,7 @@ def client(use_vertex, replays_prefix, http_options, request):
246246
mock_bigquery_client.return_value = mock.MagicMock()
247247

248248
with mock.patch.object(
249-
_evals_utils.GcsUtils, "read_file_contents"
249+
_gcs_utils.GcsUtils, "read_file_contents"
250250
) as mock_read_file_contents:
251251
mock_read_file_contents.side_effect = (
252252
_mock_read_file_contents_side_effect

tests/unit/vertexai/genai/replays/test_evaluate_instances.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -103,9 +103,9 @@ def test_pointwise_metric_with_agent_data(client):
103103
"""Tests the _evaluate_instances method with PointwiseMetricInput and agent_data."""
104104
instance_dict = {"prompt": "What is the capital of France?", "response": "Paris"}
105105
json_instance = json.dumps(instance_dict)
106-
agent_data = types.AgentData(
107-
agent_config=types.AgentConfig(
108-
tools=types.Tools(
106+
agent_data = types.evals.AgentData(
107+
agent_config=types.evals.AgentConfig(
108+
tools=types.evals.Tools(
109109
tool=[
110110
genai_types.Tool(
111111
function_declarations=[
@@ -114,15 +114,15 @@ def test_pointwise_metric_with_agent_data(client):
114114
)
115115
]
116116
),
117-
developer_instruction=types.InstanceData(text="instruction"),
117+
developer_instruction=types.evals.InstanceData(text="instruction"),
118118
),
119-
events=types.Events(
119+
events=types.evals.Events(
120120
event=[genai_types.Content(parts=[genai_types.Part(text="hello")])]
121121
),
122122
)
123123
instance = types.EvaluationInstance(
124-
prompt=types.InstanceData(text="What is the capital of France?"),
125-
response=types.InstanceData(text="Paris"),
124+
prompt=types.evals.InstanceData(text="What is the capital of France?"),
125+
response=types.evals.InstanceData(text="Paris"),
126126
agent_data=agent_data,
127127
)
128128

@@ -144,9 +144,9 @@ def test_pointwise_metric_with_agent_data(client):
144144

145145
def test_predefined_metric_with_agent_data(client):
146146
"""Tests the _evaluate_instances method with predefined metric and agent_data."""
147-
agent_data = types.AgentData(
148-
agent_config=types.AgentConfig(
149-
tools=types.Tools(
147+
agent_data = types.evals.AgentData(
148+
agent_config=types.evals.AgentConfig(
149+
tools=types.evals.Tools(
150150
tool=[
151151
genai_types.Tool(
152152
function_declarations=[
@@ -155,16 +155,16 @@ def test_predefined_metric_with_agent_data(client):
155155
)
156156
]
157157
),
158-
developer_instruction=types.InstanceData(text="instruction"),
158+
developer_instruction=types.evals.InstanceData(text="instruction"),
159159
),
160-
events=types.Events(
160+
events=types.evals.Events(
161161
event=[genai_types.Content(parts=[genai_types.Part(text="hello")])]
162162
),
163163
)
164164
instance = types.EvaluationInstance(
165-
prompt=types.InstanceData(text="What is the capital of France?"),
166-
response=types.InstanceData(text="Paris"),
167-
reference=types.InstanceData(text="Paris"),
165+
prompt=types.evals.InstanceData(text="What is the capital of France?"),
166+
response=types.evals.InstanceData(text="Paris"),
167+
reference=types.evals.InstanceData(text="Paris"),
168168
agent_data=agent_data,
169169
)
170170

tests/unit/vertexai/genai/test_evals.py

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
from vertexai._genai import _evals_data_converters
2828
from vertexai._genai import _evals_metric_handlers
2929
from vertexai._genai import _evals_visualization
30+
from vertexai._genai import _evals_metric_loaders
31+
from vertexai._genai import _gcs_utils
3032
from vertexai._genai import _observability_data_converter
3133
from vertexai._genai import evals
3234
from vertexai._genai import types as vertexai_genai_types
@@ -76,9 +78,9 @@ def mock_eval_dependencies(mock_api_client_fixture):
7678
) as mock_bq_client, mock.patch(
7779
"vertexai._genai.evals.Evals.evaluate_instances"
7880
) as mock_evaluate_instances, mock.patch(
79-
"vertexai._genai._evals_utils.GcsUtils.upload_json_to_prefix"
81+
"vertexai._genai._gcs_utils.GcsUtils.upload_json_to_prefix"
8082
) as mock_upload_to_gcs, mock.patch(
81-
"vertexai._genai._evals_utils.LazyLoadedPrebuiltMetric._fetch_and_parse"
83+
"vertexai._genai._evals_metric_loaders.LazyLoadedPrebuiltMetric._fetch_and_parse"
8284
) as mock_fetch_prebuilt_metric:
8385

8486
def mock_evaluate_instances_side_effect(*args, **kwargs):
@@ -235,7 +237,7 @@ def test_display_evaluation_result_with_agent_trace_prefixes(self, mock_is_ipyth
235237
)
236238
eval_result = vertexai_genai_types.EvaluationResult(
237239
evaluation_dataset=[eval_dataset],
238-
agent_info=vertexai_genai_types.AgentInfo(name="test_agent"),
240+
agent_info=vertexai_genai_types.evals.AgentInfo(name="test_agent"),
239241
eval_case_results=[
240242
vertexai_genai_types.EvalCaseResult(
241243
eval_case_index=0,
@@ -284,7 +286,7 @@ def setup_method(self):
284286
self.client = vertexai.Client(project=_TEST_PROJECT, location=_TEST_LOCATION)
285287

286288
@mock.patch.object(_evals_common, "Models")
287-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
289+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
288290
def test_inference_with_string_model_success(
289291
self, mock_eval_dataset_loader, mock_models
290292
):
@@ -327,7 +329,7 @@ def test_inference_with_string_model_success(
327329
assert inference_result.candidate_name == "gemini-pro"
328330
assert inference_result.gcs_source is None
329331

330-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
332+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
331333
def test_inference_with_callable_model_sets_candidate_name(
332334
self, mock_eval_dataset_loader
333335
):
@@ -346,7 +348,7 @@ def my_model_fn(contents):
346348
assert inference_result.candidate_name == "my_model_fn"
347349
assert inference_result.gcs_source is None
348350

349-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
351+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
350352
def test_inference_with_lambda_model_candidate_name_is_none(
351353
self, mock_eval_dataset_loader
352354
):
@@ -368,7 +370,7 @@ def test_inference_with_lambda_model_candidate_name_is_none(
368370
)
369371
assert inference_result.gcs_source is None
370372

371-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
373+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
372374
def test_inference_with_callable_model_success(self, mock_eval_dataset_loader):
373375
mock_df = pd.DataFrame({"prompt": ["test prompt"]})
374376
mock_eval_dataset_loader.return_value.load.return_value = mock_df.to_dict(
@@ -396,7 +398,7 @@ def mock_model_fn(contents):
396398
assert inference_result.gcs_source is None
397399

398400
@mock.patch.object(_evals_common, "Models")
399-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
401+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
400402
def test_inference_with_prompt_template(
401403
self, mock_eval_dataset_loader, mock_models
402404
):
@@ -443,8 +445,8 @@ def test_inference_with_prompt_template(
443445
assert inference_result.gcs_source is None
444446

445447
@mock.patch.object(_evals_common, "Models")
446-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
447-
@mock.patch.object(_evals_utils, "GcsUtils")
448+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
449+
@mock.patch.object(_gcs_utils, "GcsUtils")
448450
def test_inference_with_gcs_destination(
449451
self, mock_gcs_utils, mock_eval_dataset_loader, mock_models
450452
):
@@ -497,7 +499,7 @@ def test_inference_with_gcs_destination(
497499
)
498500

499501
@mock.patch.object(_evals_common, "Models")
500-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
502+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
501503
@mock.patch("pandas.DataFrame.to_json")
502504
@mock.patch("os.makedirs")
503505
def test_inference_with_local_destination(
@@ -549,7 +551,7 @@ def test_inference_with_local_destination(
549551
assert inference_result.gcs_source is None
550552

551553
@mock.patch.object(_evals_common, "Models")
552-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
554+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
553555
def test_inference_from_request_column_save_to_local_dir(
554556
self, mock_eval_dataset_loader, mock_models
555557
):
@@ -783,7 +785,7 @@ def test_inference_from_local_csv_file(self, mock_models):
783785
assert inference_result.gcs_source is None
784786

785787
@mock.patch.object(_evals_common, "Models")
786-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
788+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
787789
def test_inference_with_row_level_config_overrides(
788790
self, mock_eval_dataset_loader, mock_models
789791
):
@@ -968,7 +970,7 @@ def mock_generate_content_logic(*args, **kwargs):
968970
assert inference_result.gcs_source is None
969971

970972
@mock.patch.object(_evals_common, "Models")
971-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
973+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
972974
def test_inference_with_multimodal_content(
973975
self, mock_eval_dataset_loader, mock_models
974976
):
@@ -1044,7 +1046,7 @@ def test_inference_with_multimodal_content(
10441046
assert inference_result.candidate_name == "gemini-pro"
10451047
assert inference_result.gcs_source is None
10461048

1047-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
1049+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
10481050
@mock.patch("vertexai._genai._evals_common.vertexai.Client")
10491051
def test_run_inference_with_agent_engine_and_session_inputs_dict(
10501052
self,
@@ -1140,7 +1142,7 @@ async def _async_iterator(iterable):
11401142
assert inference_result.candidate_name == "agent"
11411143
assert inference_result.gcs_source is None
11421144

1143-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
1145+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
11441146
@mock.patch("vertexai._genai._evals_common.vertexai.Client")
11451147
def test_run_inference_with_agent_engine_and_session_inputs_literal_string(
11461148
self,
@@ -1423,7 +1425,7 @@ def test_run_inference_with_litellm_import_error(self, mock_api_client_fixture):
14231425
@mock.patch.object(_evals_common, "_is_gemini_model")
14241426
@mock.patch.object(_evals_common, "_is_litellm_model")
14251427
@mock.patch.object(_evals_common, "_is_litellm_vertex_maas_model")
1426-
@mock.patch.object(_evals_utils, "EvalDatasetLoader")
1428+
@mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader")
14271429
def test_run_inference_with_litellm_parsing(
14281430
self,
14291431
mock_eval_dataset_loader,
@@ -2783,7 +2785,7 @@ def test_agent_info_creation(self):
27832785
)
27842786
]
27852787
)
2786-
agent_info = vertexai_genai_types.AgentInfo(
2788+
agent_info = vertexai_genai_types.evals.AgentInfo(
27872789
name="agent1",
27882790
instruction="instruction1",
27892791
description="description1",
@@ -2827,7 +2829,7 @@ def test_eval_case_with_agent_eval_fields(self):
28272829
)
28282830
]
28292831
)
2830-
agent_info = vertexai_genai_types.AgentInfo(
2832+
agent_info = vertexai_genai_types.evals.AgentInfo(
28312833
name="agent1",
28322834
instruction="instruction1",
28332835
tool_declarations=[tool],
@@ -2933,7 +2935,7 @@ def test_metric_name_validation_lowercase(self):
29332935
metric = vertexai_genai_types.Metric(name="UPPERCASEMetric")
29342936
assert metric.name == "uppercasemetric"
29352937

2936-
@mock.patch("vertexai._genai.types.yaml.dump")
2938+
@mock.patch("vertexai._genai.types.common.yaml.dump")
29372939
@mock.patch("builtins.open", new_callable=mock.mock_open)
29382940
def test_metric_to_yaml_file_with_version_and_set_fields(
29392941
self, mock_open_file, mock_yaml_dump
@@ -2970,7 +2972,7 @@ def test_metric_to_yaml_file_with_version_and_set_fields(
29702972
allow_unicode=True,
29712973
)
29722974

2973-
@mock.patch("vertexai._genai.types.yaml.dump")
2975+
@mock.patch("vertexai._genai.types.common.yaml.dump")
29742976
@mock.patch("builtins.open", new_callable=mock.mock_open)
29752977
def test_metric_to_yaml_file_without_version_minimal_fields(
29762978
self, mock_open_file, mock_yaml_dump
@@ -2991,7 +2993,7 @@ def test_metric_to_yaml_file_without_version_minimal_fields(
29912993
allow_unicode=True,
29922994
)
29932995

2994-
@mock.patch("vertexai._genai.types.yaml", None)
2996+
@mock.patch("vertexai._genai.types.common.yaml", None)
29952997
def test_metric_to_yaml_file_raises_importerror_if_yaml_is_none(self):
29962998
metric_obj = vertexai_genai_types.Metric(name="ErrorMetric")
29972999
with pytest.raises(
@@ -3699,7 +3701,7 @@ def test_eval_case_to_agent_data(self):
36993701
)
37003702
]
37013703
)
3702-
agent_info = vertexai_genai_types.AgentInfo(
3704+
agent_info = vertexai_genai_types.evals.AgentInfo(
37033705
name="agent1",
37043706
instruction="instruction1",
37053707
tool_declarations=[tool],
@@ -3797,7 +3799,6 @@ def setup_method(self):
37973799
importlib.reload(aiplatform_initializer)
37983800
importlib.reload(aiplatform)
37993801
importlib.reload(vertexai)
3800-
importlib.reload(genai_types)
38013802
importlib.reload(vertexai_genai_types)
38023803
importlib.reload(_evals_data_converters)
38033804
importlib.reload(_evals_metric_handlers)
@@ -4346,7 +4347,7 @@ def test_execute_evaluation_with_openai_schema(
43464347
name="test_metric", prompt_template="Evaluate: {response}"
43474348
)
43484349

4349-
with mock.patch.object(_evals_utils, "EvalDatasetLoader") as mock_loader_class:
4350+
with mock.patch.object(_evals_metric_loaders, "EvalDatasetLoader") as mock_loader_class:
43504351
mock_loader_instance = mock_loader_class.return_value
43514352
mock_loader_instance.load.return_value = mock_openai_raw_data
43524353

@@ -4599,7 +4600,7 @@ def test_execute_evaluation_lazy_loaded_prebuilt_metric_instance(
45994600
eval_dataset_df=dataset_df
46004601
)
46014602

4602-
lazy_metric_instance = _evals_utils.LazyLoadedPrebuiltMetric(
4603+
lazy_metric_instance = _evals_metric_loaders.LazyLoadedPrebuiltMetric(
46034604
name="fluency", version="v1"
46044605
)
46054606

@@ -4799,7 +4800,7 @@ def test_execute_evaluation_adds_creation_timestamp(
47994800
class TestEvaluationDataset:
48004801
"""Contains set of tests for the EvaluationDataset class methods."""
48014802

4802-
@mock.patch.object(_evals_utils, "GcsUtils")
4803+
@mock.patch.object(_gcs_utils, "GcsUtils")
48034804
def test_load_from_observability_eval_cases(self, mock_gcs_utils):
48044805
"""Tests that load_from_observability_eval_cases reads data from GCS."""
48054806

@@ -4851,7 +4852,7 @@ def read_file_contents_side_effect(src: str) -> str:
48514852
),
48524853
)
48534854

4854-
@mock.patch.object(_evals_utils, "GcsUtils")
4855+
@mock.patch.object(_gcs_utils, "GcsUtils")
48554856
def test_load_from_observability_eval_cases_no_system_instruction(
48564857
self, mock_gcs_utils
48574858
):
@@ -4903,7 +4904,7 @@ def read_file_contents_side_effect(src: str) -> str:
49034904
),
49044905
)
49054906

4906-
@mock.patch.object(_evals_utils, "GcsUtils")
4907+
@mock.patch.object(_gcs_utils, "GcsUtils")
49074908
def test_load_from_observability_eval_cases_multiple_cases(self, mock_gcs_utils):
49084909
"""Test load_from_observability_eval_cases can handle multiple cases."""
49094910

vertexai/_genai/_bigquery_utils.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
#
15+
16+
import logging
17+
18+
from google.cloud import bigquery
19+
from google.genai._api_client import BaseApiClient
20+
import pandas as pd
21+
22+
23+
logger = logging.getLogger(__name__)
24+
25+
26+
class BigQueryUtils:
27+
"""Handles BigQuery operations."""
28+
29+
def __init__(self, api_client: BaseApiClient):
30+
self.api_client = api_client
31+
self.bigquery_client = bigquery.Client(
32+
project=self.api_client.project,
33+
credentials=self.api_client._credentials,
34+
)
35+
36+
def load_bigquery_to_dataframe(self, table_uri: str) -> "pd.DataFrame":
37+
"""Loads data from a BigQuery table into a DataFrame."""
38+
table = self.bigquery_client.get_table(table_uri)
39+
return self.bigquery_client.list_rows(table).to_dataframe()
40+
41+
def upload_dataframe_to_bigquery(
42+
self, df: "pd.DataFrame", bq_table_uri: str
43+
) -> None:
44+
"""Uploads a Pandas DataFrame to a BigQuery table."""
45+
job = self.bigquery_client.load_table_from_dataframe(df, bq_table_uri)
46+
job.result()
47+
logger.info(
48+
f"DataFrame successfully uploaded to BigQuery table: {bq_table_uri}"
49+
)

0 commit comments

Comments
 (0)