Skip to content

Commit b4d37b6

Browse files
sararobcopybara-github
authored andcommitted
chore!: GenAI SDK client - remove duplicate types for Content, Part, and evals
PiperOrigin-RevId: 798282497
1 parent 3bb8100 commit b4d37b6

File tree

9 files changed

+119
-960
lines changed

9 files changed

+119
-960
lines changed

tests/unit/vertexai/genai/replays/test_evaluate_instances.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
from tests.unit.vertexai.genai.replays import pytest_helper
2020
from vertexai._genai import types
21+
from google.genai import types as genai_types
2122
import pandas as pd
2223
import pytest
2324

@@ -30,7 +31,7 @@ def test_bleu_metric(client):
3031
prediction="A fast brown fox leaps over a lazy dog.",
3132
)
3233
],
33-
metric_spec=types.BleuSpec(),
34+
metric_spec=genai_types.BleuSpec(),
3435
)
3536
response = client.evals.evaluate_instances(
3637
metric_config=types._EvaluateInstancesRequestParameters(
@@ -68,7 +69,7 @@ def test_rouge_metric(client):
6869
reference="The quick brown fox jumps over the lazy dog.",
6970
)
7071
],
71-
metric_spec=types.RougeSpec(rouge_type="rougeL"),
72+
metric_spec=genai_types.RougeSpec(rouge_type="rougeL"),
7273
)
7374
response = client.evals.evaluate_instances(
7475
metric_config=types._EvaluateInstancesRequestParameters(
@@ -85,7 +86,7 @@ def test_pointwise_metric(client):
8586

8687
test_input = types.PointwiseMetricInput(
8788
instance=types.PointwiseMetricInstance(json_instance=json_instance),
88-
metric_spec=types.PointwiseMetricSpec(
89+
metric_spec=genai_types.PointwiseMetricSpec(
8990
metric_prompt_template="Evaluate if the response '{response}' correctly answers the prompt '{prompt}'."
9091
),
9192
)
@@ -109,11 +110,11 @@ def test_pairwise_metric_with_autorater(client):
109110

110111
test_input = types.PairwiseMetricInput(
111112
instance=types.PairwiseMetricInstance(json_instance=json_instance),
112-
metric_spec=types.PairwiseMetricSpec(
113+
metric_spec=genai_types.PairwiseMetricSpec(
113114
metric_prompt_template="Which response is a better summary? Baseline: '{baseline_response}' or Candidate: '{candidate_response}'"
114115
),
115116
)
116-
autorater_config = types.AutoraterConfig(sampling_count=2)
117+
autorater_config = genai_types.AutoraterConfig(sampling_count=2)
117118

118119
response = client.evals.evaluate_instances(
119120
metric_config=types._EvaluateInstancesRequestParameters(
@@ -178,7 +179,7 @@ async def test_bleu_metric_async(client):
178179
prediction="A fast brown fox leaps over a lazy dog.",
179180
)
180181
],
181-
metric_spec=types.BleuSpec(),
182+
metric_spec=genai_types.BleuSpec(),
182183
)
183184
response = await client.aio.evals.evaluate_instances(
184185
metric_config=types._EvaluateInstancesRequestParameters(

tests/unit/vertexai/genai/replays/test_generate_agent_engine_memories.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
from tests.unit.vertexai.genai.replays import pytest_helper
1818
from vertexai._genai import types
19+
from google.genai import types as genai_types
1920

2021

2122
def test_generate_memories(client):
@@ -31,10 +32,10 @@ def test_generate_memories(client):
3132
direct_contents_source=types.GenerateMemoriesRequestDirectContentsSource(
3233
events=[
3334
types.GenerateMemoriesRequestDirectContentsSourceEvent(
34-
content=types.Content(
35+
content=genai_types.Content(
3536
role="model",
3637
parts=[
37-
types.Part(
38+
genai_types.Part(
3839
text="I am a software engineer focusing in security"
3940
)
4041
],

tests/unit/vertexai/genai/replays/test_internal_generate_rubrics.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
from tests.unit.vertexai.genai.replays import pytest_helper
1919
from vertexai._genai import types
20+
from google.genai import types as genai_types
2021

2122
_TEST_RUBRIC_GENERATION_PROMPT = """SPECIAL INSTRUCTION: think silently. Silent thinking token budget: 16384.
2223
@@ -146,9 +147,9 @@
146147
def test_internal_method_generate_rubrics(client):
147148
"""Tests the internal _generate_rubrics method."""
148149
test_contents = [
149-
types.Content(
150+
genai_types.Content(
150151
parts=[
151-
types.Part(
152+
genai_types.Part(
152153
text="Generate a short story about a friendly dragon.",
153154
),
154155
],

vertexai/_genai/_evals_metric_handlers.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -529,19 +529,25 @@ def _build_pointwise_input(
529529
role = msg_obj.content.role or msg_obj.author or "user"
530530
history_texts.append(f"{role}: {msg_text}")
531531
content_list_to_serialize = [
532-
types.Content(parts=[types.Part(text="\n".join(history_texts))])
532+
genai_types.Content(
533+
parts=[genai_types.Part(text="\n".join(history_texts))]
534+
)
533535
]
534536
else:
535537
content_list_to_serialize = [
536-
types.Content(parts=[types.Part(text=json.dumps(value))])
538+
genai_types.Content(
539+
parts=[genai_types.Part(text=json.dumps(value))]
540+
)
537541
]
538542
elif isinstance(value, dict):
539543
content_list_to_serialize = [
540-
types.Content(parts=[types.Part(text=json.dumps(value))])
544+
genai_types.Content(
545+
parts=[genai_types.Part(text=json.dumps(value))]
546+
)
541547
]
542548
else:
543549
content_list_to_serialize = [
544-
types.Content(parts=[types.Part(text=str(value))])
550+
genai_types.Content(parts=[genai_types.Part(text=str(value))])
545551
]
546552

547553
content_map_values[key] = types.ContentMapContents(

0 commit comments

Comments
 (0)