Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions tests/unit/vertexai/genai/replays/test_create_evaluation_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,24 +22,24 @@
GCS_DEST = "gs://lakeyk-limited-bucket/eval_run_output"
GENERAL_QUALITY_METRIC = types.EvaluationRunMetric(
metric="general_quality_v1",
metric_config=types.UnifiedMetric(
predefined_metric_spec=types.PredefinedMetricSpec(
metric_config=genai_types.UnifiedMetric(
predefined_metric_spec=genai_types.PredefinedMetricSpec(
metric_spec_name="general_quality_v1",
)
),
)
FINAL_RESPONSE_QUALITY_METRIC = types.EvaluationRunMetric(
metric="final_response_quality_v1",
metric_config=types.UnifiedMetric(
predefined_metric_spec=types.PredefinedMetricSpec(
metric_config=genai_types.UnifiedMetric(
predefined_metric_spec=genai_types.PredefinedMetricSpec(
metric_spec_name="final_response_quality_v1",
)
),
)
LLM_METRIC = types.EvaluationRunMetric(
metric="llm_metric",
metric_config=types.UnifiedMetric(
llm_based_metric_spec=types.LLMBasedMetricSpec(
metric_config=genai_types.UnifiedMetric(
llm_based_metric_spec=genai_types.LLMBasedMetricSpec(
metric_prompt_template=(
"\nEvaluate the fluency of the response. Provide a score from 1-5."
)
Expand All @@ -48,17 +48,17 @@
)
EXACT_MATCH_COMPUTATION_BASED_METRIC = types.EvaluationRunMetric(
metric="exact_match",
metric_config=types.UnifiedMetric(
computation_based_metric_spec=types.ComputationBasedMetricSpec(
type=types.ComputationBasedMetricType.EXACT_MATCH,
metric_config=genai_types.UnifiedMetric(
computation_based_metric_spec=genai_types.ComputationBasedMetricSpec(
type=genai_types.ComputationBasedMetricType.EXACT_MATCH,
)
),
)
BLEU_COMPUTATION_BASED_METRIC = types.EvaluationRunMetric(
metric="exact_match_2",
metric_config=types.UnifiedMetric(
computation_based_metric_spec=types.ComputationBasedMetricSpec(
type=types.ComputationBasedMetricType.BLEU,
metric_config=genai_types.UnifiedMetric(
computation_based_metric_spec=genai_types.ComputationBasedMetricSpec(
type=genai_types.ComputationBasedMetricType.BLEU,
parameters={"use_effective_order": True},
)
),
Expand Down
8 changes: 4 additions & 4 deletions vertexai/_genai/_evals_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1041,8 +1041,8 @@ def _resolve_evaluation_run_metrics(
resolved_metrics_list.append(
types.EvaluationRunMetric(
metric=resolved_metric.name,
metric_config=types.UnifiedMetric(
predefined_metric_spec=types.PredefinedMetricSpec(
metric_config=genai_types.UnifiedMetric(
predefined_metric_spec=genai_types.PredefinedMetricSpec(
metric_spec_name=resolved_metric.name,
)
),
Expand Down Expand Up @@ -1072,8 +1072,8 @@ def _resolve_evaluation_run_metrics(
resolved_metrics_list.append(
types.EvaluationRunMetric(
metric=resolved_metric.name,
metric_config=types.UnifiedMetric(
predefined_metric_spec=types.PredefinedMetricSpec(
metric_config=genai_types.UnifiedMetric(
predefined_metric_spec=genai_types.PredefinedMetricSpec(
metric_spec_name=resolved_metric.name,
)
),
Expand Down
238 changes: 5 additions & 233 deletions vertexai/_genai/evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,7 @@ def _CreateEvaluationRunParameters_to_vertex(
setv(to_object, ["dataSource"], getv(from_object, ["data_source"]))

if getv(from_object, ["evaluation_config"]) is not None:
setv(
to_object,
["evaluationConfig"],
_EvaluationRunConfig_to_vertex(
getv(from_object, ["evaluation_config"]), to_object
),
)
setv(to_object, ["evaluationConfig"], getv(from_object, ["evaluation_config"]))

if getv(from_object, ["labels"]) is not None:
setv(to_object, ["labels"], getv(from_object, ["labels"]))
Expand Down Expand Up @@ -118,36 +112,6 @@ def _CreateEvaluationSetParameters_to_vertex(
return to_object


def _CustomCodeExecutionSpec_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["evaluation_function"]) is not None:
setv(
to_object,
["remote_custom_function"],
getv(from_object, ["evaluation_function"]),
)

return to_object


def _CustomCodeExecutionSpec_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["remote_custom_function"]) is not None:
setv(
to_object,
["evaluation_function"],
getv(from_object, ["remote_custom_function"]),
)

return to_object


def _EvaluateInstancesRequestParameters_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
Expand Down Expand Up @@ -232,90 +196,6 @@ def _EvaluateInstancesRequestParameters_to_vertex(
return to_object


def _EvaluationRunConfig_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["metrics"]) is not None:
setv(
to_object,
["metrics"],
[
_EvaluationRunMetric_from_vertex(item, to_object)
for item in getv(from_object, ["metrics"])
],
)

if getv(from_object, ["outputConfig"]) is not None:
setv(to_object, ["output_config"], getv(from_object, ["outputConfig"]))

if getv(from_object, ["autoraterConfig"]) is not None:
setv(to_object, ["autorater_config"], getv(from_object, ["autoraterConfig"]))

return to_object


def _EvaluationRunConfig_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["metrics"]) is not None:
setv(
to_object,
["metrics"],
[
_EvaluationRunMetric_to_vertex(item, to_object)
for item in getv(from_object, ["metrics"])
],
)

if getv(from_object, ["output_config"]) is not None:
setv(to_object, ["outputConfig"], getv(from_object, ["output_config"]))

if getv(from_object, ["autorater_config"]) is not None:
setv(to_object, ["autoraterConfig"], getv(from_object, ["autorater_config"]))

return to_object


def _EvaluationRunMetric_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["metric"]) is not None:
setv(to_object, ["metric"], getv(from_object, ["metric"]))

if getv(from_object, ["metricConfig"]) is not None:
setv(
to_object,
["metric_config"],
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
)

return to_object


def _EvaluationRunMetric_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["metric"]) is not None:
setv(to_object, ["metric"], getv(from_object, ["metric"]))

if getv(from_object, ["metric_config"]) is not None:
setv(
to_object,
["metricConfig"],
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
)

return to_object


def _EvaluationRun_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
Expand Down Expand Up @@ -360,13 +240,7 @@ def _EvaluationRun_from_vertex(
)

if getv(from_object, ["evaluationConfig"]) is not None:
setv(
to_object,
["evaluation_config"],
_EvaluationRunConfig_from_vertex(
getv(from_object, ["evaluationConfig"]), to_object
),
)
setv(to_object, ["evaluation_config"], getv(from_object, ["evaluationConfig"]))

if getv(from_object, ["inferenceConfigs"]) is not None:
setv(to_object, ["inference_configs"], getv(from_object, ["inferenceConfigs"]))
Expand Down Expand Up @@ -536,108 +410,6 @@ def _RubricGenerationSpec_to_vertex(
return to_object


def _UnifiedMetric_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["bleuSpec"]) is not None:
setv(to_object, ["bleu_spec"], getv(from_object, ["bleuSpec"]))

if getv(from_object, ["rougeSpec"]) is not None:
setv(to_object, ["rouge_spec"], getv(from_object, ["rougeSpec"]))

if getv(from_object, ["pointwiseMetricSpec"]) is not None:
setv(
to_object,
["pointwise_metric_spec"],
getv(from_object, ["pointwiseMetricSpec"]),
)

if getv(from_object, ["llmBasedMetricSpec"]) is not None:
setv(
to_object,
["llm_based_metric_spec"],
getv(from_object, ["llmBasedMetricSpec"]),
)

if getv(from_object, ["customCodeExecutionSpec"]) is not None:
setv(
to_object,
["custom_code_execution_spec"],
_CustomCodeExecutionSpec_from_vertex(
getv(from_object, ["customCodeExecutionSpec"]), to_object
),
)

if getv(from_object, ["predefinedMetricSpec"]) is not None:
setv(
to_object,
["predefined_metric_spec"],
getv(from_object, ["predefinedMetricSpec"]),
)

if getv(from_object, ["computationBasedMetricSpec"]) is not None:
setv(
to_object,
["computation_based_metric_spec"],
getv(from_object, ["computationBasedMetricSpec"]),
)

return to_object


def _UnifiedMetric_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["bleu_spec"]) is not None:
setv(to_object, ["bleuSpec"], getv(from_object, ["bleu_spec"]))

if getv(from_object, ["rouge_spec"]) is not None:
setv(to_object, ["rougeSpec"], getv(from_object, ["rouge_spec"]))

if getv(from_object, ["pointwise_metric_spec"]) is not None:
setv(
to_object,
["pointwiseMetricSpec"],
getv(from_object, ["pointwise_metric_spec"]),
)

if getv(from_object, ["llm_based_metric_spec"]) is not None:
setv(
to_object,
["llmBasedMetricSpec"],
getv(from_object, ["llm_based_metric_spec"]),
)

if getv(from_object, ["custom_code_execution_spec"]) is not None:
setv(
to_object,
["customCodeExecutionSpec"],
_CustomCodeExecutionSpec_to_vertex(
getv(from_object, ["custom_code_execution_spec"]), to_object
),
)

if getv(from_object, ["predefined_metric_spec"]) is not None:
setv(
to_object,
["predefinedMetricSpec"],
getv(from_object, ["predefined_metric_spec"]),
)

if getv(from_object, ["computation_based_metric_spec"]) is not None:
setv(
to_object,
["computationBasedMetricSpec"],
getv(from_object, ["computation_based_metric_spec"]),
)

return to_object


class Evals(_api_module.BaseModule):

def _create_evaluation_item(
Expand Down Expand Up @@ -908,7 +680,7 @@ def _generate_rubrics(
*,
contents: list[genai_types.ContentOrDict],
predefined_rubric_generation_spec: Optional[
types.PredefinedMetricSpecOrDict
genai_types.PredefinedMetricSpecOrDict
] = None,
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
Expand Down Expand Up @@ -1480,7 +1252,7 @@ def generate_rubrics(
"Could not determine metric_spec_name from predefined_spec_name"
)

predefined_spec = types.PredefinedMetricSpec(
predefined_spec = genai_types.PredefinedMetricSpec(
metric_spec_name=actual_predefined_spec_name,
metric_spec_parameters=metric_spec_parameters,
)
Expand Down Expand Up @@ -2107,7 +1879,7 @@ async def _generate_rubrics(
*,
contents: list[genai_types.ContentOrDict],
predefined_rubric_generation_spec: Optional[
types.PredefinedMetricSpecOrDict
genai_types.PredefinedMetricSpecOrDict
] = None,
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
config: Optional[types.RubricGenerationConfigOrDict] = None,
Expand Down
Loading
Loading