Skip to content

Commit 6255ab4

Browse files
committed
Merge branch 'gemini_model_updates-pro' of github.com:GoogleCloudPlatform/python-docs-samples into gemini_model_updates-pro
2 parents 4a9b886 + 44d8fe8 commit 6255ab4

2 files changed

Lines changed: 176 additions & 0 deletions

File tree

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# Copyright 2024 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
import os
15+
16+
from vertexai.preview.evaluation import EvalResult
17+
18+
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
19+
20+
21+
def evaluate_output() -> EvalResult:
22+
# [START generativeaionvertexai_evaluation_pairwise_summarization_quality]
23+
import pandas as pd
24+
25+
import vertexai
26+
from vertexai.generative_models import GenerativeModel
27+
from vertexai.evaluation import (
28+
EvalTask,
29+
PairwiseMetric,
30+
MetricPromptTemplateExamples,
31+
)
32+
33+
# TODO(developer): Update & uncomment line below
34+
# PROJECT_ID = "your-project-id"
35+
vertexai.init(project=PROJECT_ID, location="us-central1")
36+
37+
prompt = """
38+
Summarize the text such that a five-year-old can understand.
39+
40+
# Text
41+
42+
As part of a comprehensive initiative to tackle urban congestion and foster
43+
sustainable urban living, a major city has revealed ambitious plans for an
44+
extensive overhaul of its public transportation system. The project aims not
45+
only to improve the efficiency and reliability of public transit but also to
46+
reduce the city\'s carbon footprint and promote eco-friendly commuting options.
47+
City officials anticipate that this strategic investment will enhance
48+
accessibility for residents and visitors alike, ushering in a new era of
49+
efficient, environmentally conscious urban transportation.
50+
"""
51+
52+
eval_dataset = pd.DataFrame({"prompt": [prompt]})
53+
54+
# Baseline model for pairwise comparison
55+
baseline_model = GenerativeModel("gemini-2.0-flash-lite-001")
56+
57+
# Candidate model for pairwise comparison
58+
candidate_model = GenerativeModel(
59+
"gemini-2.5-pro-exp-03-25", generation_config={"temperature": 0.4}
60+
)
61+
62+
prompt_template = MetricPromptTemplateExamples.get_prompt_template(
63+
"pairwise_summarization_quality"
64+
)
65+
66+
summarization_quality_metric = PairwiseMetric(
67+
metric="pairwise_summarization_quality",
68+
metric_prompt_template=prompt_template,
69+
baseline_model=baseline_model,
70+
)
71+
72+
eval_task = EvalTask(
73+
dataset=eval_dataset,
74+
metrics=[summarization_quality_metric],
75+
experiment="pairwise-experiment",
76+
)
77+
result = eval_task.evaluate(model=candidate_model)
78+
79+
baseline_model_response = result.metrics_table["baseline_model_response"].iloc[0]
80+
candidate_model_response = result.metrics_table["response"].iloc[0]
81+
winner_model = result.metrics_table[
82+
"pairwise_summarization_quality/pairwise_choice"
83+
].iloc[0]
84+
explanation = result.metrics_table[
85+
"pairwise_summarization_quality/explanation"
86+
].iloc[0]
87+
88+
print(f"Baseline's story:\n{baseline_model_response}")
89+
print(f"Candidate's story:\n{candidate_model_response}")
90+
print(f"Winner: {winner_model}")
91+
print(f"Explanation: {explanation}")
92+
# Example response:
93+
# Baseline's story:
94+
# A big city wants to make it easier for people to get around without using cars! They're going to make buses and trains ...
95+
#
96+
# Candidate's story:
97+
# A big city wants to make it easier for people to get around without using cars! ... This will help keep the air clean ...
98+
#
99+
# Winner: CANDIDATE
100+
# Explanation: Both responses adhere to the prompt's constraints, are grounded in the provided text, and ... However, Response B ...
101+
# [END generativeaionvertexai_evaluation_pairwise_summarization_quality]
102+
return result
103+
104+
105+
if __name__ == "__main__":
106+
evaluate_output()
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# Copyright 2024 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
import os
15+
16+
from vertexai.preview.prompts import Prompt
17+
18+
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
19+
20+
21+
def prompt_create() -> Prompt:
22+
"""Create a local prompt, generates content and saves prompt"""
23+
24+
# [START generativeaionvertexai_prompt_template_create_generate_save]
25+
import vertexai
26+
from vertexai.preview import prompts
27+
from vertexai.preview.prompts import Prompt
28+
29+
# from vertexai.generative_models import GenerationConfig, SafetySetting # Optional
30+
31+
# Initialize vertexai
32+
vertexai.init(project=PROJECT_ID, location="us-central1")
33+
34+
# Create local Prompt
35+
local_prompt = Prompt(
36+
prompt_name="movie-critic",
37+
prompt_data="Compare the movies {movie1} and {movie2}.",
38+
variables=[
39+
{"movie1": "The Lion King", "movie2": "Frozen"},
40+
{"movie1": "Inception", "movie2": "Interstellar"},
41+
],
42+
model_name="gemini-2.0-flash-lite-001",
43+
system_instruction="You are a movie critic. Answer in a short sentence.",
44+
# generation_config=GenerationConfig, # Optional,
45+
# safety_settings=SafetySetting, # Optional,
46+
)
47+
48+
# Generate content using the assembled prompt for each variable set.
49+
for i in range(len(local_prompt.variables)):
50+
response = local_prompt.generate_content(
51+
contents=local_prompt.assemble_contents(**local_prompt.variables[i])
52+
)
53+
print(response)
54+
55+
# Save a version
56+
prompt1 = prompts.create_version(prompt=local_prompt)
57+
58+
print(prompt1)
59+
60+
# Example response
61+
# Assembled prompt replacing: 1 instances of variable movie1, 1 instances of variable movie2
62+
# Assembled prompt replacing: 1 instances of variable movie1, 1 instances of variable movie2
63+
# Created prompt resource with id 12345678910.....
64+
65+
# [END generativeaionvertexai_prompt_template_create_generate_save]
66+
return prompt1
67+
68+
69+
if __name__ == "__main__":
70+
prompt_create()

0 commit comments

Comments
 (0)