Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 26 additions & 26 deletions vertexai/_genai/evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,9 +276,6 @@ def _EvaluateInstancesRequestParameters_to_vertex(
_EvaluationInstance_to_vertex(getv(from_object, ["instance"]), to_object),
)

if getv(from_object, ["config"]) is not None:
setv(to_object, ["config"], getv(from_object, ["config"]))

if getv(from_object, ["metric_sources"]) is not None:
setv(
to_object,
Expand All @@ -289,6 +286,9 @@ def _EvaluateInstancesRequestParameters_to_vertex(
],
)

if getv(from_object, ["config"]) is not None:
setv(to_object, ["config"], getv(from_object, ["config"]))

return to_object


Expand Down Expand Up @@ -450,18 +450,18 @@ def _EvaluationRunMetric_from_vertex(
if getv(from_object, ["metric"]) is not None:
setv(to_object, ["metric"], getv(from_object, ["metric"]))

if getv(from_object, ["metricConfig"]) is not None:
if getv(from_object, ["metricResourceName"]) is not None:
setv(
to_object,
["metric_config"],
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
["metric_resource_name"],
getv(from_object, ["metricResourceName"]),
)

if getv(from_object, ["metricResourceName"]) is not None:
if getv(from_object, ["metricConfig"]) is not None:
setv(
to_object,
["metric_resource_name"],
getv(from_object, ["metricResourceName"]),
["metric_config"],
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
)

return to_object
Expand All @@ -475,18 +475,18 @@ def _EvaluationRunMetric_to_vertex(
if getv(from_object, ["metric"]) is not None:
setv(to_object, ["metric"], getv(from_object, ["metric"]))

if getv(from_object, ["metric_config"]) is not None:
if getv(from_object, ["metric_resource_name"]) is not None:
setv(
to_object,
["metricConfig"],
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
["metricResourceName"],
getv(from_object, ["metric_resource_name"]),
)

if getv(from_object, ["metric_resource_name"]) is not None:
if getv(from_object, ["metric_config"]) is not None:
setv(
to_object,
["metricResourceName"],
getv(from_object, ["metric_resource_name"]),
["metricConfig"],
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
)

return to_object
Expand Down Expand Up @@ -582,16 +582,16 @@ def _GenerateInstanceRubricsRequest_to_vertex(
getv(from_object, ["rubric_generation_spec"]),
)

if getv(from_object, ["config"]) is not None:
setv(to_object, ["config"], getv(from_object, ["config"]))

if getv(from_object, ["metric_resource_name"]) is not None:
setv(
to_object,
["metricResourceName"],
getv(from_object, ["metric_resource_name"]),
)

if getv(from_object, ["config"]) is not None:
setv(to_object, ["config"], getv(from_object, ["config"]))

return to_object


Expand Down Expand Up @@ -1240,8 +1240,8 @@ def _evaluate_instances(
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
metrics: Optional[list[types.MetricOrDict]] = None,
instance: Optional[types.EvaluationInstanceOrDict] = None,
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
) -> types.EvaluateInstancesResponse:
"""
Evaluates instances based on a given metric.
Expand All @@ -1261,8 +1261,8 @@ def _evaluate_instances(
autorater_config=autorater_config,
metrics=metrics,
instance=instance,
config=config,
metric_sources=metric_sources,
config=config,
)

request_url_dict: Optional[dict[str, str]]
Expand Down Expand Up @@ -1484,8 +1484,8 @@ def _generate_rubrics(
genai_types.PredefinedMetricSpecOrDict
] = None,
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
metric_resource_name: Optional[str] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
) -> types.GenerateInstanceRubricsResponse:
"""
Generates rubrics for a given prompt.
Expand All @@ -1495,8 +1495,8 @@ def _generate_rubrics(
contents=contents,
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
rubric_generation_spec=rubric_generation_spec,
config=config,
metric_resource_name=metric_resource_name,
config=config,
)

request_url_dict: Optional[dict[str, str]]
Expand Down Expand Up @@ -3167,8 +3167,8 @@ async def _evaluate_instances(
autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None,
metrics: Optional[list[types.MetricOrDict]] = None,
instance: Optional[types.EvaluationInstanceOrDict] = None,
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
metric_sources: Optional[list[types.MetricSourceOrDict]] = None,
config: Optional[types.EvaluateInstancesConfigOrDict] = None,
) -> types.EvaluateInstancesResponse:
"""
Evaluates instances based on a given metric.
Expand All @@ -3188,8 +3188,8 @@ async def _evaluate_instances(
autorater_config=autorater_config,
metrics=metrics,
instance=instance,
config=config,
metric_sources=metric_sources,
config=config,
)

request_url_dict: Optional[dict[str, str]]
Expand Down Expand Up @@ -3417,8 +3417,8 @@ async def _generate_rubrics(
genai_types.PredefinedMetricSpecOrDict
] = None,
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
metric_resource_name: Optional[str] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
) -> types.GenerateInstanceRubricsResponse:
"""
Generates rubrics for a given prompt.
Expand All @@ -3428,8 +3428,8 @@ async def _generate_rubrics(
contents=contents,
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
rubric_generation_spec=rubric_generation_spec,
config=config,
metric_resource_name=metric_resource_name,
config=config,
)

request_url_dict: Optional[dict[str, str]]
Expand Down
28 changes: 14 additions & 14 deletions vertexai/_genai/types/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2217,13 +2217,13 @@ class EvaluationRunMetric(_common.BaseModel):
metric: Optional[str] = Field(
default=None, description="""The name of the metric."""
)
metric_config: Optional[UnifiedMetric] = Field(
default=None, description="""The unified metric used for evaluation run."""
)
metric_resource_name: Optional[str] = Field(
default=None,
description="""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}""",
)
metric_config: Optional[UnifiedMetric] = Field(
default=None, description="""The unified metric used for evaluation run."""
)


class EvaluationRunMetricDict(TypedDict, total=False):
Expand All @@ -2232,12 +2232,12 @@ class EvaluationRunMetricDict(TypedDict, total=False):
metric: Optional[str]
"""The name of the metric."""

metric_config: Optional[UnifiedMetricDict]
"""The unified metric used for evaluation run."""

metric_resource_name: Optional[str]
"""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}"""

metric_config: Optional[UnifiedMetricDict]
"""The unified metric used for evaluation run."""


EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict]

Expand Down Expand Up @@ -4166,10 +4166,10 @@ class _EvaluateInstancesRequestParameters(_common.BaseModel):
instance: Optional[EvaluationInstance] = Field(
default=None, description="""The instance to be evaluated."""
)
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")
metric_sources: Optional[list[MetricSource]] = Field(
default=None, description="""The metrics used for evaluation."""
)
config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""")


class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
Expand Down Expand Up @@ -4216,12 +4216,12 @@ class _EvaluateInstancesRequestParametersDict(TypedDict, total=False):
instance: Optional[EvaluationInstanceDict]
"""The instance to be evaluated."""

config: Optional[EvaluateInstancesConfigDict]
""""""

metric_sources: Optional[list[MetricSourceDict]]
"""The metrics used for evaluation."""

config: Optional[EvaluateInstancesConfigDict]
""""""


_EvaluateInstancesRequestParametersOrDict = Union[
_EvaluateInstancesRequestParameters, _EvaluateInstancesRequestParametersDict
Expand Down Expand Up @@ -5124,11 +5124,11 @@ class _GenerateInstanceRubricsRequest(_common.BaseModel):
default=None,
description="""Specification for how the rubrics should be generated.""",
)
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
metric_resource_name: Optional[str] = Field(
default=None,
description="""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored.""",
)
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")


class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
Expand All @@ -5148,12 +5148,12 @@ class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecDict]
"""Specification for how the rubrics should be generated."""

config: Optional[RubricGenerationConfigDict]
""""""

metric_resource_name: Optional[str]
"""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored."""

config: Optional[RubricGenerationConfigDict]
""""""


_GenerateInstanceRubricsRequestOrDict = Union[
_GenerateInstanceRubricsRequest, _GenerateInstanceRubricsRequestDict
Expand Down
Loading