From 0aa95d6e9ae4cb6ae835f8f6c3981eec259bee8b Mon Sep 17 00:00:00 2001 From: Jaycee Li Date: Tue, 7 Apr 2026 16:15:17 -0700 Subject: [PATCH] chore: Internal cleanup PiperOrigin-RevId: 896138222 --- vertexai/_genai/evals.py | 52 ++++++++++++++++----------------- vertexai/_genai/types/common.py | 28 +++++++++--------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/vertexai/_genai/evals.py b/vertexai/_genai/evals.py index fe98d8c667..3689436852 100644 --- a/vertexai/_genai/evals.py +++ b/vertexai/_genai/evals.py @@ -276,9 +276,6 @@ def _EvaluateInstancesRequestParameters_to_vertex( _EvaluationInstance_to_vertex(getv(from_object, ["instance"]), to_object), ) - if getv(from_object, ["config"]) is not None: - setv(to_object, ["config"], getv(from_object, ["config"])) - if getv(from_object, ["metric_sources"]) is not None: setv( to_object, @@ -289,6 +286,9 @@ def _EvaluateInstancesRequestParameters_to_vertex( ], ) + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + return to_object @@ -450,18 +450,18 @@ def _EvaluationRunMetric_from_vertex( if getv(from_object, ["metric"]) is not None: setv(to_object, ["metric"], getv(from_object, ["metric"])) - if getv(from_object, ["metricConfig"]) is not None: + if getv(from_object, ["metricResourceName"]) is not None: setv( to_object, - ["metric_config"], - _UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object), + ["metric_resource_name"], + getv(from_object, ["metricResourceName"]), ) - if getv(from_object, ["metricResourceName"]) is not None: + if getv(from_object, ["metricConfig"]) is not None: setv( to_object, - ["metric_resource_name"], - getv(from_object, ["metricResourceName"]), + ["metric_config"], + _UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object), ) return to_object @@ -475,18 +475,18 @@ def _EvaluationRunMetric_to_vertex( if getv(from_object, ["metric"]) is not None: setv(to_object, ["metric"], getv(from_object, ["metric"])) - if getv(from_object, ["metric_config"]) is not None: + if getv(from_object, ["metric_resource_name"]) is not None: setv( to_object, - ["metricConfig"], - _UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object), + ["metricResourceName"], + getv(from_object, ["metric_resource_name"]), ) - if getv(from_object, ["metric_resource_name"]) is not None: + if getv(from_object, ["metric_config"]) is not None: setv( to_object, - ["metricResourceName"], - getv(from_object, ["metric_resource_name"]), + ["metricConfig"], + _UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object), ) return to_object @@ -582,9 +582,6 @@ def _GenerateInstanceRubricsRequest_to_vertex( getv(from_object, ["rubric_generation_spec"]), ) - if getv(from_object, ["config"]) is not None: - setv(to_object, ["config"], getv(from_object, ["config"])) - if getv(from_object, ["metric_resource_name"]) is not None: setv( to_object, @@ -592,6 +589,9 @@ def _GenerateInstanceRubricsRequest_to_vertex( getv(from_object, ["metric_resource_name"]), ) + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + return to_object @@ -1240,8 +1240,8 @@ def _evaluate_instances( autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None, metrics: Optional[list[types.MetricOrDict]] = None, instance: Optional[types.EvaluationInstanceOrDict] = None, - config: Optional[types.EvaluateInstancesConfigOrDict] = None, metric_sources: Optional[list[types.MetricSourceOrDict]] = None, + config: Optional[types.EvaluateInstancesConfigOrDict] = None, ) -> types.EvaluateInstancesResponse: """ Evaluates instances based on a given metric. @@ -1261,8 +1261,8 @@ def _evaluate_instances( autorater_config=autorater_config, metrics=metrics, instance=instance, - config=config, metric_sources=metric_sources, + config=config, ) request_url_dict: Optional[dict[str, str]] @@ -1484,8 +1484,8 @@ def _generate_rubrics( genai_types.PredefinedMetricSpecOrDict ] = None, rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, - config: Optional[types.RubricGenerationConfigOrDict] = None, metric_resource_name: Optional[str] = None, + config: Optional[types.RubricGenerationConfigOrDict] = None, ) -> types.GenerateInstanceRubricsResponse: """ Generates rubrics for a given prompt. @@ -1495,8 +1495,8 @@ def _generate_rubrics( contents=contents, predefined_rubric_generation_spec=predefined_rubric_generation_spec, rubric_generation_spec=rubric_generation_spec, - config=config, metric_resource_name=metric_resource_name, + config=config, ) request_url_dict: Optional[dict[str, str]] @@ -3167,8 +3167,8 @@ async def _evaluate_instances( autorater_config: Optional[genai_types.AutoraterConfigOrDict] = None, metrics: Optional[list[types.MetricOrDict]] = None, instance: Optional[types.EvaluationInstanceOrDict] = None, - config: Optional[types.EvaluateInstancesConfigOrDict] = None, metric_sources: Optional[list[types.MetricSourceOrDict]] = None, + config: Optional[types.EvaluateInstancesConfigOrDict] = None, ) -> types.EvaluateInstancesResponse: """ Evaluates instances based on a given metric. @@ -3188,8 +3188,8 @@ async def _evaluate_instances( autorater_config=autorater_config, metrics=metrics, instance=instance, - config=config, metric_sources=metric_sources, + config=config, ) request_url_dict: Optional[dict[str, str]] @@ -3417,8 +3417,8 @@ async def _generate_rubrics( genai_types.PredefinedMetricSpecOrDict ] = None, rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None, - config: Optional[types.RubricGenerationConfigOrDict] = None, metric_resource_name: Optional[str] = None, + config: Optional[types.RubricGenerationConfigOrDict] = None, ) -> types.GenerateInstanceRubricsResponse: """ Generates rubrics for a given prompt. @@ -3428,8 +3428,8 @@ async def _generate_rubrics( contents=contents, predefined_rubric_generation_spec=predefined_rubric_generation_spec, rubric_generation_spec=rubric_generation_spec, - config=config, metric_resource_name=metric_resource_name, + config=config, ) request_url_dict: Optional[dict[str, str]] diff --git a/vertexai/_genai/types/common.py b/vertexai/_genai/types/common.py index 97ecc9c3eb..079043202a 100644 --- a/vertexai/_genai/types/common.py +++ b/vertexai/_genai/types/common.py @@ -2217,13 +2217,13 @@ class EvaluationRunMetric(_common.BaseModel): metric: Optional[str] = Field( default=None, description="""The name of the metric.""" ) - metric_config: Optional[UnifiedMetric] = Field( - default=None, description="""The unified metric used for evaluation run.""" - ) metric_resource_name: Optional[str] = Field( default=None, description="""The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}""", ) + metric_config: Optional[UnifiedMetric] = Field( + default=None, description="""The unified metric used for evaluation run.""" + ) class EvaluationRunMetricDict(TypedDict, total=False): @@ -2232,12 +2232,12 @@ class EvaluationRunMetricDict(TypedDict, total=False): metric: Optional[str] """The name of the metric.""" - metric_config: Optional[UnifiedMetricDict] - """The unified metric used for evaluation run.""" - metric_resource_name: Optional[str] """The resource name of the metric definition. Example: projects/{project}/locations/{location}/evaluationMetrics/{evaluation_metric_id}""" + metric_config: Optional[UnifiedMetricDict] + """The unified metric used for evaluation run.""" + EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict] @@ -4166,10 +4166,10 @@ class _EvaluateInstancesRequestParameters(_common.BaseModel): instance: Optional[EvaluationInstance] = Field( default=None, description="""The instance to be evaluated.""" ) - config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""") metric_sources: Optional[list[MetricSource]] = Field( default=None, description="""The metrics used for evaluation.""" ) + config: Optional[EvaluateInstancesConfig] = Field(default=None, description="""""") class _EvaluateInstancesRequestParametersDict(TypedDict, total=False): @@ -4216,12 +4216,12 @@ class _EvaluateInstancesRequestParametersDict(TypedDict, total=False): instance: Optional[EvaluationInstanceDict] """The instance to be evaluated.""" - config: Optional[EvaluateInstancesConfigDict] - """""" - metric_sources: Optional[list[MetricSourceDict]] """The metrics used for evaluation.""" + config: Optional[EvaluateInstancesConfigDict] + """""" + _EvaluateInstancesRequestParametersOrDict = Union[ _EvaluateInstancesRequestParameters, _EvaluateInstancesRequestParametersDict @@ -5124,11 +5124,11 @@ class _GenerateInstanceRubricsRequest(_common.BaseModel): default=None, description="""Specification for how the rubrics should be generated.""", ) - config: Optional[RubricGenerationConfig] = Field(default=None, description="""""") metric_resource_name: Optional[str] = Field( default=None, description="""Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored.""", ) + config: Optional[RubricGenerationConfig] = Field(default=None, description="""""") class _GenerateInstanceRubricsRequestDict(TypedDict, total=False): @@ -5148,12 +5148,12 @@ class _GenerateInstanceRubricsRequestDict(TypedDict, total=False): rubric_generation_spec: Optional[genai_types.RubricGenerationSpecDict] """Specification for how the rubrics should be generated.""" - config: Optional[RubricGenerationConfigDict] - """""" - metric_resource_name: Optional[str] """Registered metric resource name. If this field is set, the configuration provided in this field is used for rubric generation. The `predefined_rubric_generation_spec` and `rubric_generation_spec` fields will be ignored.""" + config: Optional[RubricGenerationConfigDict] + """""" + _GenerateInstanceRubricsRequestOrDict = Union[ _GenerateInstanceRubricsRequest, _GenerateInstanceRubricsRequestDict