diff --git a/vertexai/_genai/a2a_tasks.py b/vertexai/_genai/a2a_tasks.py new file mode 100644 index 0000000000..edd6272e98 --- /dev/null +++ b/vertexai/_genai/a2a_tasks.py @@ -0,0 +1,544 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +import json +import logging +from typing import Any, Optional, Union +from urllib.parse import urlencode + +from google.genai import _api_module +from google.genai import _common +from google.genai._common import get_value_by_path as getv +from google.genai._common import set_value_by_path as setv + +from . import types + + +logger = logging.getLogger("vertexai_genai.a2atasks") + + +def _CreateAgentEngineTaskRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["a2a_task_id"]) is not None: + setv(to_object, ["_query", "a2a_task_id"], getv(from_object, ["a2a_task_id"])) + + if getv(from_object, ["a2a_task"]) is not None: + setv(to_object, ["a2aTask"], getv(from_object, ["a2a_task"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + +def _GetAgentEngineTaskRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + +def _ListReasoningEngineTasksConfig_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(from_object, ["page_size"]) is not None: + setv(parent_object, ["_query", "pageSize"], getv(from_object, ["page_size"])) + + if getv(from_object, ["page_token"]) is not None: + setv(parent_object, ["_query", "pageToken"], getv(from_object, ["page_token"])) + + if getv(from_object, ["filter"]) is not None: + setv(parent_object, ["_query", "filter"], getv(from_object, ["filter"])) + + return to_object + + +def _ListReasoningEngineTasksRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv( + to_object, + ["config"], + _ListReasoningEngineTasksConfig_to_vertex( + getv(from_object, ["config"]), to_object + ), + ) + + return to_object + + +class A2aTasks(_api_module.BaseModule): + + def get( + self, + *, + name: str, + config: Optional[types.GetAgentEngineTaskConfigOrDict] = None, + ) -> types.A2aTask: + """ + Gets an agent engine task. + + Args: + name (str): Required. The name of the Agent Engine task to get. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/a2aTasks/{task_id}`. + config (GetAgentEngineTaskConfig): + Optional. Additional configurations for getting the Agent Engine task. + + Returns: + AgentEngineTask: The requested Agent Engine task. + + """ + + parameter_model = types._GetAgentEngineTaskRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetAgentEngineTaskRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}".format_map(request_url_dict) + else: + path = "{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.A2aTask._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def _list( + self, + *, + name: str, + config: Optional[types.ListReasoningEngineTasksConfigOrDict] = None, + ) -> types.ListReasoningEngineTasksResponse: + """ + Lists Agent Engine tasks. + + Args: + name (str): Required. The name of the Agent Engine to list tasks for. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + config (ListAgentEngineTasksConfig): + Optional. Additional configurations for listing the Agent Engine tasks. + + Returns: + ListAgentEngineTasksResponse: The requested Agent Engine tasks. + + """ + + parameter_model = types._ListReasoningEngineTasksRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _ListReasoningEngineTasksRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/a2aTasks".format_map(request_url_dict) + else: + path = "{name}/a2aTasks" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.ListReasoningEngineTasksResponse._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def create( + self, + *, + name: str, + a2a_task_id: str, + a2a_task: Optional[types.A2aTaskOrDict] = None, + config: Optional[types.CreateAgentEngineTaskConfigOrDict] = None, + ) -> types.A2aTask: + """ + Creates a new task in the Agent Engine. + + Args: + name (str): Required. The name of the Agent Engine to create the task under. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + a2a_task_id (str): Required. The user ID of the task. + context_id (str): Required. The ID of the context to use for the task. + config (CreateAgentEngineTaskConfig): + Optional. Additional configurations for creating the Agent Engine task. + + Returns: + AgentEngineTaskOperation: The operation for creating the Agent Engine task. + + """ + + parameter_model = types._CreateAgentEngineTaskRequestParameters( + name=name, + a2a_task_id=a2a_task_id, + a2a_task=a2a_task, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _CreateAgentEngineTaskRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/a2aTasks".format_map(request_url_dict) + else: + path = "{name}/a2aTasks" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("post", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.A2aTask._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + pass + + +# def create( +# self, +# *, +# name: str, +# spec: Optional[types.SandboxEnvironmentSpecOrDict] = None, +# config: Optional[types.CreateAgentEngineTaskConfig] = None, +# ) -> types.A2aTask: +# """Creates a new sandbox in the Agent Engine. +# +# Args: +# name (str): +# Required. The name of the agent engine to create task for. +# projects/{project}/locations/{location}/reasoningEngines/{resource_id} +# config (Optional[CreateAgentEngineTaskConfig]): +# Optional. The configuration for the sandbox. +# +# Returns: +# AgentEngineSandboxOperation: The operation for creating the sandbox. +# """ +# return self._create( +# name=name, +# config=config, +# ) + + +class AsyncA2aTasks(_api_module.BaseModule): + + async def get( + self, + *, + name: str, + config: Optional[types.GetAgentEngineTaskConfigOrDict] = None, + ) -> types.A2aTask: + """ + Gets an agent engine task. + + Args: + name (str): Required. The name of the Agent Engine task to get. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/a2aTasks/{task_id}`. + config (GetAgentEngineTaskConfig): + Optional. Additional configurations for getting the Agent Engine task. + + Returns: + AgentEngineTask: The requested Agent Engine task. + + """ + + parameter_model = types._GetAgentEngineTaskRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetAgentEngineTaskRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}".format_map(request_url_dict) + else: + path = "{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.A2aTask._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def _list( + self, + *, + name: str, + config: Optional[types.ListReasoningEngineTasksConfigOrDict] = None, + ) -> types.ListReasoningEngineTasksResponse: + """ + Lists Agent Engine tasks. + + Args: + name (str): Required. The name of the Agent Engine to list tasks for. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + config (ListAgentEngineTasksConfig): + Optional. Additional configurations for listing the Agent Engine tasks. + + Returns: + ListAgentEngineTasksResponse: The requested Agent Engine tasks. + + """ + + parameter_model = types._ListReasoningEngineTasksRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _ListReasoningEngineTasksRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/a2aTasks".format_map(request_url_dict) + else: + path = "{name}/a2aTasks" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.ListReasoningEngineTasksResponse._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def create( + self, + *, + name: str, + a2a_task_id: str, + a2a_task: Optional[types.A2aTaskOrDict] = None, + config: Optional[types.CreateAgentEngineTaskConfigOrDict] = None, + ) -> types.A2aTask: + """ + Creates a new task in the Agent Engine. + + Args: + name (str): Required. The name of the Agent Engine to create the task under. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + a2a_task_id (str): Required. The user ID of the task. + context_id (str): Required. The ID of the context to use for the task. + config (CreateAgentEngineTaskConfig): + Optional. Additional configurations for creating the Agent Engine task. + + Returns: + AgentEngineTaskOperation: The operation for creating the Agent Engine task. + + """ + + parameter_model = types._CreateAgentEngineTaskRequestParameters( + name=name, + a2a_task_id=a2a_task_id, + a2a_task=a2a_task, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _CreateAgentEngineTaskRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/a2aTasks".format_map(request_url_dict) + else: + path = "{name}/a2aTasks" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "post", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.A2aTask._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value diff --git a/vertexai/_genai/agent_engines.py b/vertexai/_genai/agent_engines.py index 0b4d8ecb9a..335573eb76 100644 --- a/vertexai/_genai/agent_engines.py +++ b/vertexai/_genai/agent_engines.py @@ -706,10 +706,26 @@ def _update( self._api_client._verify_response(return_value) return return_value + _a2a_tasks = None _memories = None _sandboxes = None _sessions = None + @property + def a2a_tasks(self) -> Any: + if self._a2a_tasks is None: + try: + # We need to lazy load the a2a_tasks module to handle the + # possibility of ImportError when dependencies are not installed. + self._a2a_tasks = importlib.import_module(".a2a_tasks", __package__) + except ImportError as e: + raise ImportError( + "The 'agent_engines.a2a_tasks' module requires additional " + "packages. Please install them using pip install " + "google-cloud-aiplatform[agent_engines]" + ) from e + return self._a2a_tasks.A2aTasks(self._api_client) # type: ignore[no-any-return] + @property def memories(self) -> "memories_module.Memories": if self._memories is None: diff --git a/vertexai/_genai/types/__init__.py b/vertexai/_genai/types/__init__.py index 19c3b021bb..b9680e9826 100644 --- a/vertexai/_genai/types/__init__.py +++ b/vertexai/_genai/types/__init__.py @@ -29,6 +29,7 @@ from .common import _CreateAgentEngineRequestParameters from .common import _CreateAgentEngineSandboxRequestParameters from .common import _CreateAgentEngineSessionRequestParameters +from .common import _CreateAgentEngineTaskRequestParameters from .common import _CreateDatasetParameters from .common import _CreateDatasetVersionParameters from .common import _CreateEvaluationItemParameters @@ -58,6 +59,7 @@ from .common import _GetAgentEngineSandboxRequestParameters from .common import _GetAgentEngineSessionOperationParameters from .common import _GetAgentEngineSessionRequestParameters +from .common import _GetAgentEngineTaskRequestParameters from .common import _GetCustomJobParameters from .common import _GetCustomJobParameters from .common import _GetDatasetOperationParameters @@ -77,6 +79,7 @@ from .common import _ListDatasetsRequestParameters from .common import _ListDatasetVersionsRequestParameters from .common import _ListMultimodalDatasetsRequestParameters +from .common import _ListReasoningEngineTasksRequestParameters from .common import _OptimizeRequestParameters from .common import _OptimizeRequestParameters from .common import _PurgeAgentEngineMemoriesRequestParameters @@ -89,6 +92,9 @@ from .common import _UpdateAgentEngineSessionRequestParameters from .common import _UpdateDatasetParameters from .common import _UpdateMultimodalDatasetParameters +from .common import A2aTask +from .common import A2aTaskDict +from .common import A2aTaskOrDict from .common import AcceleratorType from .common import AgentEngine from .common import AgentEngineConfig @@ -169,6 +175,9 @@ from .common import BleuResults from .common import BleuResultsDict from .common import BleuResultsOrDict +from .common import Blob +from .common import BlobDict +from .common import BlobOrDict from .common import CandidateResponse from .common import CandidateResponseDict from .common import CandidateResponseOrDict @@ -177,6 +186,9 @@ from .common import Chunk from .common import ChunkDict from .common import ChunkOrDict +from .common import CodeExecutionResult +from .common import CodeExecutionResultDict +from .common import CodeExecutionResultOrDict from .common import CometResult from .common import CometResultDict from .common import CometResultOrDict @@ -202,6 +214,9 @@ from .common import CreateAgentEngineSessionConfig from .common import CreateAgentEngineSessionConfigDict from .common import CreateAgentEngineSessionConfigOrDict +from .common import CreateAgentEngineTaskConfig +from .common import CreateAgentEngineTaskConfigDict +from .common import CreateAgentEngineTaskConfigOrDict from .common import CreateDatasetConfig from .common import CreateDatasetConfigDict from .common import CreateDatasetConfigOrDict @@ -392,12 +407,24 @@ from .common import ExactMatchSpec from .common import ExactMatchSpecDict from .common import ExactMatchSpecOrDict +from .common import ExecutableCode +from .common import ExecutableCodeDict +from .common import ExecutableCodeOrDict from .common import ExecuteCodeAgentEngineSandboxConfig from .common import ExecuteCodeAgentEngineSandboxConfigDict from .common import ExecuteCodeAgentEngineSandboxConfigOrDict from .common import ExecuteSandboxEnvironmentResponse from .common import ExecuteSandboxEnvironmentResponseDict from .common import ExecuteSandboxEnvironmentResponseOrDict +from .common import FileData +from .common import FileDataDict +from .common import FileDataOrDict +from .common import FunctionCall +from .common import FunctionCallDict +from .common import FunctionCallOrDict +from .common import FunctionResponse +from .common import FunctionResponseDict +from .common import FunctionResponseOrDict from .common import GcsSource from .common import GcsSourceDict from .common import GcsSourceOrDict @@ -456,6 +483,9 @@ from .common import GetAgentEngineSessionConfig from .common import GetAgentEngineSessionConfigDict from .common import GetAgentEngineSessionConfigOrDict +from .common import GetAgentEngineTaskConfig +from .common import GetAgentEngineTaskConfigDict +from .common import GetAgentEngineTaskConfigOrDict from .common import GetDatasetOperationConfig from .common import GetDatasetOperationConfigDict from .common import GetDatasetOperationConfigOrDict @@ -532,6 +562,12 @@ from .common import ListReasoningEnginesSessionsResponse from .common import ListReasoningEnginesSessionsResponseDict from .common import ListReasoningEnginesSessionsResponseOrDict +from .common import ListReasoningEngineTasksConfig +from .common import ListReasoningEngineTasksConfigDict +from .common import ListReasoningEngineTasksConfigOrDict +from .common import ListReasoningEngineTasksResponse +from .common import ListReasoningEngineTasksResponseDict +from .common import ListReasoningEngineTasksResponseOrDict from .common import LLMBasedMetricSpec from .common import LLMBasedMetricSpecDict from .common import LLMBasedMetricSpecOrDict @@ -648,6 +684,7 @@ from .common import OptimizeResponseEndpointOrDict from .common import OptimizeResponseOrDict from .common import OptimizeTarget +from .common import Outcome from .common import PairwiseChoice from .common import PairwiseMetricInput from .common import PairwiseMetricInputDict @@ -659,6 +696,12 @@ from .common import PairwiseMetricResultDict from .common import PairwiseMetricResultOrDict from .common import ParsedResponseUnion +from .common import Part +from .common import PartDict +from .common import PartialArg +from .common import PartialArgDict +from .common import PartialArgOrDict +from .common import PartOrDict from .common import PointwiseMetricInput from .common import PointwiseMetricInputDict from .common import PointwiseMetricInputOrDict @@ -948,6 +991,18 @@ from .common import SummaryMetric from .common import SummaryMetricDict from .common import SummaryMetricOrDict +from .common import TaskArtifact +from .common import TaskArtifactDict +from .common import TaskArtifactOrDict +from .common import TaskMessage +from .common import TaskMessageDict +from .common import TaskMessageOrDict +from .common import TaskOutput +from .common import TaskOutputDict +from .common import TaskOutputOrDict +from .common import TaskStatusDetails +from .common import TaskStatusDetailsDict +from .common import TaskStatusDetailsOrDict from .common import ToolCallValidInput from .common import ToolCallValidInputDict from .common import ToolCallValidInputOrDict @@ -1042,6 +1097,9 @@ from .common import VertexBaseConfig from .common import VertexBaseConfigDict from .common import VertexBaseConfigOrDict +from .common import VideoMetadata +from .common import VideoMetadataDict +from .common import VideoMetadataOrDict from .common import WinRateStats from .common import WinRateStatsDict from .common import WinRateStatsOrDict @@ -1050,6 +1108,60 @@ from .common import WorkerPoolSpecOrDict __all__ = [ + "GetAgentEngineTaskConfig", + "GetAgentEngineTaskConfigDict", + "GetAgentEngineTaskConfigOrDict", + "CodeExecutionResult", + "CodeExecutionResultDict", + "CodeExecutionResultOrDict", + "ExecutableCode", + "ExecutableCodeDict", + "ExecutableCodeOrDict", + "FileData", + "FileDataDict", + "FileDataOrDict", + "PartialArg", + "PartialArgDict", + "PartialArgOrDict", + "FunctionCall", + "FunctionCallDict", + "FunctionCallOrDict", + "FunctionResponse", + "FunctionResponseDict", + "FunctionResponseOrDict", + "Blob", + "BlobDict", + "BlobOrDict", + "VideoMetadata", + "VideoMetadataDict", + "VideoMetadataOrDict", + "Part", + "PartDict", + "PartOrDict", + "TaskArtifact", + "TaskArtifactDict", + "TaskArtifactOrDict", + "TaskOutput", + "TaskOutputDict", + "TaskOutputOrDict", + "TaskMessage", + "TaskMessageDict", + "TaskMessageOrDict", + "TaskStatusDetails", + "TaskStatusDetailsDict", + "TaskStatusDetailsOrDict", + "A2aTask", + "A2aTaskDict", + "A2aTaskOrDict", + "ListReasoningEngineTasksConfig", + "ListReasoningEngineTasksConfigDict", + "ListReasoningEngineTasksConfigOrDict", + "ListReasoningEngineTasksResponse", + "ListReasoningEngineTasksResponseDict", + "ListReasoningEngineTasksResponseOrDict", + "CreateAgentEngineTaskConfig", + "CreateAgentEngineTaskConfigDict", + "CreateAgentEngineTaskConfigOrDict", "CreateEvaluationItemConfig", "CreateEvaluationItemConfigDict", "CreateEvaluationItemConfigOrDict", @@ -1944,6 +2056,9 @@ "OptimizeJobConfig", "OptimizeJobConfigDict", "OptimizeJobConfigOrDict", + "State", + "Outcome", + "Language", "PairwiseChoice", "Strategy", "AcceleratorType", @@ -1953,9 +2068,7 @@ "AgentServerMode", "ManagedTopicEnum", "Operator", - "Language", "MachineConfig", - "State", "EvaluationItemType", "SamplingMethod", "RubricContentType", @@ -1987,6 +2100,9 @@ "MessageDict", "Importance", "ParsedResponseUnion", + "_GetAgentEngineTaskRequestParameters", + "_ListReasoningEngineTasksRequestParameters", + "_CreateAgentEngineTaskRequestParameters", "_CreateEvaluationItemParameters", "_CreateEvaluationRunParameters", "_CreateEvaluationSetParameters", diff --git a/vertexai/_genai/types/common.py b/vertexai/_genai/types/common.py index b56d563fdf..5f1417eb78 100644 --- a/vertexai/_genai/types/common.py +++ b/vertexai/_genai/types/common.py @@ -91,6 +91,53 @@ def _camel_key_to_snake(message: Any) -> Any: MetricSubclass = TypeVar("MetricSubclass", bound="Metric") +class State(_common.CaseInSensitiveEnum): + """Output only. The state of the task. The state of a new task is SUBMITTED by default. The state of a task can only be updated via AppendA2aTaskEvents API.""" + + STATE_UNSPECIFIED = "STATE_UNSPECIFIED" + """Task state unspecified. Default value if not set.""" + SUBMITTED = "SUBMITTED" + """Task is submitted and waiting to be processed.""" + WORKING = "WORKING" + """Task is actively being processed.""" + COMPLETED = "COMPLETED" + """Task is finished.""" + CANCELLED = "CANCELLED" + """Task is cancelled.""" + FAILED = "FAILED" + """Task has failed.""" + REJECTED = "REJECTED" + """Task is rejected by the system.""" + INPUT_REQUIRED = "INPUT_REQUIRED" + """Task requires input from the user.""" + AUTH_REQUIRED = "AUTH_REQUIRED" + """Task requires auth (e.g. OAuth) from the user.""" + PAUSED = "PAUSED" + """Task is paused.""" + + +class Outcome(_common.CaseInSensitiveEnum): + """Outcome of the code execution.""" + + OUTCOME_UNSPECIFIED = "OUTCOME_UNSPECIFIED" + """Unspecified status. This value should not be used.""" + OUTCOME_OK = "OUTCOME_OK" + """Code execution completed successfully.""" + OUTCOME_FAILED = "OUTCOME_FAILED" + """Code execution finished but with a failure. `stderr` should contain the reason.""" + OUTCOME_DEADLINE_EXCEEDED = "OUTCOME_DEADLINE_EXCEEDED" + """Code execution ran for too long, and was cancelled. There may or may not be a partial output present.""" + + +class Language(_common.CaseInSensitiveEnum): + """Programming language of the `code`.""" + + LANGUAGE_UNSPECIFIED = "LANGUAGE_UNSPECIFIED" + """Unspecified language. This value should not be used.""" + PYTHON = "PYTHON" + """Python >= 3.10, with numpy and simpy available.""" + + class PairwiseChoice(_common.CaseInSensitiveEnum): """Output only. Pairwise metric choice.""" @@ -256,17 +303,6 @@ class Operator(_common.CaseInSensitiveEnum): """Less than.""" -class Language(_common.CaseInSensitiveEnum): - """The coding language supported in this environment.""" - - LANGUAGE_UNSPECIFIED = "LANGUAGE_UNSPECIFIED" - """The default value. This value is unused.""" - LANGUAGE_PYTHON = "LANGUAGE_PYTHON" - """The coding language is Python.""" - LANGUAGE_JAVASCRIPT = "LANGUAGE_JAVASCRIPT" - """The coding language is JavaScript.""" - - class MachineConfig(_common.CaseInSensitiveEnum): """The machine config of the code execution environment.""" @@ -276,23 +312,6 @@ class MachineConfig(_common.CaseInSensitiveEnum): """The default value: milligcu 4000, memory 4 Gib""" -class State(_common.CaseInSensitiveEnum): - """Output only. The runtime state of the SandboxEnvironment.""" - - STATE_UNSPECIFIED = "STATE_UNSPECIFIED" - """The default value. This value is unused.""" - STATE_PROVISIONING = "STATE_PROVISIONING" - """Runtime resources are being allocated for the sandbox environment.""" - STATE_RUNNING = "STATE_RUNNING" - """Sandbox runtime is ready for serving.""" - STATE_DEPROVISIONING = "STATE_DEPROVISIONING" - """Sandbox runtime is halted, performing tear down tasks.""" - STATE_TERMINATED = "STATE_TERMINATED" - """Sandbox has terminated with underlying runtime failure.""" - STATE_DELETED = "STATE_DELETED" - """Sandbox runtime has been deleted.""" - - class EvaluationItemType(_common.CaseInSensitiveEnum): """The type of the EvaluationItem.""" @@ -420,6 +439,745 @@ class OptimizationMethod(_common.CaseInSensitiveEnum): """The data driven prompt optimizer designer for prompts from Android core API.""" +class GetAgentEngineTaskConfig(_common.BaseModel): + """Config for getting an Agent Engine Task.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetAgentEngineTaskConfigDict(TypedDict, total=False): + """Config for getting an Agent Engine Task.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetAgentEngineTaskConfigOrDict = Union[ + GetAgentEngineTaskConfig, GetAgentEngineTaskConfigDict +] + + +class _GetAgentEngineTaskRequestParameters(_common.BaseModel): + """Parameters for getting an agent engine task.""" + + name: Optional[str] = Field( + default=None, description="""Name of the agent engine task.""" + ) + config: Optional[GetAgentEngineTaskConfig] = Field(default=None, description="""""") + + +class _GetAgentEngineTaskRequestParametersDict(TypedDict, total=False): + """Parameters for getting an agent engine task.""" + + name: Optional[str] + """Name of the agent engine task.""" + + config: Optional[GetAgentEngineTaskConfigDict] + """""" + + +_GetAgentEngineTaskRequestParametersOrDict = Union[ + _GetAgentEngineTaskRequestParameters, _GetAgentEngineTaskRequestParametersDict +] + + +class CodeExecutionResult(_common.BaseModel): + """Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode].""" + + outcome: Optional[Outcome] = Field( + default=None, description="""Required. Outcome of the code execution.""" + ) + output: Optional[str] = Field( + default=None, + description="""Optional. Contains stdout when code execution is successful, stderr or other description otherwise.""", + ) + + +class CodeExecutionResultDict(TypedDict, total=False): + """Result of executing the [ExecutableCode]. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the [ExecutableCode].""" + + outcome: Optional[Outcome] + """Required. Outcome of the code execution.""" + + output: Optional[str] + """Optional. Contains stdout when code execution is successful, stderr or other description otherwise.""" + + +CodeExecutionResultOrDict = Union[CodeExecutionResult, CodeExecutionResultDict] + + +class ExecutableCode(_common.BaseModel): + """Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated.""" + + code: Optional[str] = Field( + default=None, description="""Required. The code to be executed.""" + ) + language: Optional[Language] = Field( + default=None, description="""Required. Programming language of the `code`.""" + ) + + +class ExecutableCodeDict(TypedDict, total=False): + """Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding [CodeExecutionResult] will also be generated.""" + + code: Optional[str] + """Required. The code to be executed.""" + + language: Optional[Language] + """Required. Programming language of the `code`.""" + + +ExecutableCodeOrDict = Union[ExecutableCode, ExecutableCodeDict] + + +class FileData(_common.BaseModel): + """URI based data.""" + + display_name: Optional[str] = Field( + default=None, + description="""Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.""", + ) + file_uri: Optional[str] = Field(default=None, description="""Required. URI.""") + mime_type: Optional[str] = Field( + default=None, + description="""Required. The IANA standard MIME type of the source data.""", + ) + + +class FileDataDict(TypedDict, total=False): + """URI based data.""" + + display_name: Optional[str] + """Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.""" + + file_uri: Optional[str] + """Required. URI.""" + + mime_type: Optional[str] + """Required. The IANA standard MIME type of the source data.""" + + +FileDataOrDict = Union[FileData, FileDataDict] + + +class PartialArg(_common.BaseModel): + """Partial argument value of the function call.""" + + null_value: Optional[Literal["NULL_VALUE"]] = Field( + default=None, description="""Optional. Represents a null value.""" + ) + number_value: Optional[float] = Field( + default=None, description="""Optional. Represents a double value.""" + ) + string_value: Optional[str] = Field( + default=None, description="""Optional. Represents a string value.""" + ) + bool_value: Optional[bool] = Field( + default=None, description="""Optional. Represents a boolean value.""" + ) + json_path: Optional[str] = Field( + default=None, + description="""Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""", + ) + will_continue: Optional[bool] = Field( + default=None, + description="""Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.""", + ) + + +class PartialArgDict(TypedDict, total=False): + """Partial argument value of the function call.""" + + null_value: Optional[Literal["NULL_VALUE"]] + """Optional. Represents a null value.""" + + number_value: Optional[float] + """Optional. Represents a double value.""" + + string_value: Optional[str] + """Optional. Represents a string value.""" + + bool_value: Optional[bool] + """Optional. Represents a boolean value.""" + + json_path: Optional[str] + """Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""" + + will_continue: Optional[bool] + """Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.""" + + +PartialArgOrDict = Union[PartialArg, PartialArgDict] + + +class FunctionCall(_common.BaseModel): + """A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values.""" + + args: Optional[dict[str, Any]] = Field( + default=None, + description="""Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.""", + ) + id: Optional[str] = Field( + default=None, + description="""Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.""", + ) + name: Optional[str] = Field( + default=None, + description="""Optional. The name of the function to call. Matches [FunctionDeclaration.name].""", + ) + partial_args: Optional[list[PartialArg]] = Field( + default=None, + description="""Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.""", + ) + will_continue: Optional[bool] = Field( + default=None, + description="""Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.""", + ) + + +class FunctionCallDict(TypedDict, total=False): + """A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values.""" + + args: Optional[dict[str, Any]] + """Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.""" + + id: Optional[str] + """Optional. The unique id of the function call. If populated, the client to execute the `function_call` and return the response with the matching `id`.""" + + name: Optional[str] + """Optional. The name of the function to call. Matches [FunctionDeclaration.name].""" + + partial_args: Optional[list[PartialArgDict]] + """Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.""" + + will_continue: Optional[bool] + """Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.""" + + +FunctionCallOrDict = Union[FunctionCall, FunctionCallDict] + + +class FunctionResponse(_common.BaseModel): + """The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction.""" + + id: Optional[str] = Field( + default=None, + description="""Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`.""", + ) + name: Optional[str] = Field( + default=None, + description="""Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].""", + ) + response: Optional[dict[str, Any]] = Field( + default=None, + description="""Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.""", + ) + + +class FunctionResponseDict(TypedDict, total=False): + """The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction.""" + + id: Optional[str] + """Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`.""" + + name: Optional[str] + """Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].""" + + response: Optional[dict[str, Any]] + """Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.""" + + +FunctionResponseOrDict = Union[FunctionResponse, FunctionResponseDict] + + +class Blob(_common.BaseModel): + """Content blob.""" + + data: Optional[bytes] = Field(default=None, description="""Required. Raw bytes.""") + display_name: Optional[str] = Field( + default=None, + description="""Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.""", + ) + mime_type: Optional[str] = Field( + default=None, + description="""Required. The IANA standard MIME type of the source data.""", + ) + + +class BlobDict(TypedDict, total=False): + """Content blob.""" + + data: Optional[bytes] + """Required. Raw bytes.""" + + display_name: Optional[str] + """Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.""" + + mime_type: Optional[str] + """Required. The IANA standard MIME type of the source data.""" + + +BlobOrDict = Union[Blob, BlobDict] + + +class VideoMetadata(_common.BaseModel): + """Metadata describes the input video content.""" + + end_offset: Optional[str] = Field( + default=None, description="""Optional. The end offset of the video.""" + ) + fps: Optional[float] = Field( + default=None, + description="""Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0].""", + ) + start_offset: Optional[str] = Field( + default=None, description="""Optional. The start offset of the video.""" + ) + + +class VideoMetadataDict(TypedDict, total=False): + """Metadata describes the input video content.""" + + end_offset: Optional[str] + """Optional. The end offset of the video.""" + + fps: Optional[float] + """Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0].""" + + start_offset: Optional[str] + """Optional. The start offset of the video.""" + + +VideoMetadataOrDict = Union[VideoMetadata, VideoMetadataDict] + + +class Part(_common.BaseModel): + """A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.""" + + code_execution_result: Optional[CodeExecutionResult] = Field( + default=None, + description="""Optional. Result of executing the [ExecutableCode].""", + ) + executable_code: Optional[ExecutableCode] = Field( + default=None, + description="""Optional. Code generated by the model that is meant to be executed.""", + ) + file_data: Optional[FileData] = Field( + default=None, description="""Optional. URI based data.""" + ) + function_call: Optional[FunctionCall] = Field( + default=None, + description="""Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""", + ) + function_response: Optional[FunctionResponse] = Field( + default=None, + description="""Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""", + ) + inline_data: Optional[Blob] = Field( + default=None, description="""Optional. Inlined bytes data.""" + ) + text: Optional[str] = Field( + default=None, description="""Optional. Text part (can be code).""" + ) + thought: Optional[bool] = Field( + default=None, + description="""Optional. Indicates if the part is thought from the model.""", + ) + thought_signature: Optional[bytes] = Field( + default=None, + description="""Optional. An opaque signature for the thought so it can be reused in subsequent requests.""", + ) + video_metadata: Optional[VideoMetadata] = Field( + default=None, + description="""Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.""", + ) + + +class PartDict(TypedDict, total=False): + """A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.""" + + code_execution_result: Optional[CodeExecutionResultDict] + """Optional. Result of executing the [ExecutableCode].""" + + executable_code: Optional[ExecutableCodeDict] + """Optional. Code generated by the model that is meant to be executed.""" + + file_data: Optional[FileDataDict] + """Optional. URI based data.""" + + function_call: Optional[FunctionCallDict] + """Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""" + + function_response: Optional[FunctionResponseDict] + """Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""" + + inline_data: Optional[BlobDict] + """Optional. Inlined bytes data.""" + + text: Optional[str] + """Optional. Text part (can be code).""" + + thought: Optional[bool] + """Optional. Indicates if the part is thought from the model.""" + + thought_signature: Optional[bytes] + """Optional. An opaque signature for the thought so it can be reused in subsequent requests.""" + + video_metadata: Optional[VideoMetadataDict] + """Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.""" + + +PartOrDict = Union[Part, PartDict] + + +class TaskArtifact(_common.BaseModel): + """Represents a single artifact produced by a task. sample: artifacts: { artifact_id: "image-12345" name: "Generated Sunset Image" description: "A beautiful sunset over the mountains, generated by the user's request." parts: { inline_data: { mime_type: "image/png" data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAA=" } } }""" + + artifact_id: Optional[str] = Field( + default=None, + description="""Required. The unique identifier of the artifact within the task. This id is provided by the creator of the artifact.""", + ) + description: Optional[str] = Field( + default=None, + description="""Optional. A human readable description of the artifact.""", + ) + display_name: Optional[str] = Field( + default=None, + description="""Optional. The human-readable name of the artifact provided by the creator.""", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="""Optional. Additional metadata for the artifact. For A2A, the URIs of the extensions that were used to produce this artifact will be stored here.""", + ) + parts: Optional[list[Part]] = Field( + default=None, description="""Required. The content of the artifact.""" + ) + + +class TaskArtifactDict(TypedDict, total=False): + """Represents a single artifact produced by a task. sample: artifacts: { artifact_id: "image-12345" name: "Generated Sunset Image" description: "A beautiful sunset over the mountains, generated by the user's request." parts: { inline_data: { mime_type: "image/png" data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAA=" } } }""" + + artifact_id: Optional[str] + """Required. The unique identifier of the artifact within the task. This id is provided by the creator of the artifact.""" + + description: Optional[str] + """Optional. A human readable description of the artifact.""" + + display_name: Optional[str] + """Optional. The human-readable name of the artifact provided by the creator.""" + + metadata: Optional[dict[str, Any]] + """Optional. Additional metadata for the artifact. For A2A, the URIs of the extensions that were used to produce this artifact will be stored here.""" + + parts: Optional[list[PartDict]] + """Required. The content of the artifact.""" + + +TaskArtifactOrDict = Union[TaskArtifact, TaskArtifactDict] + + +class TaskOutput(_common.BaseModel): + """Represents the final output of a task.""" + + artifacts: Optional[list[TaskArtifact]] = Field( + default=None, + description="""Optional. A list of artifacts (files, data) produced by the task.""", + ) + + +class TaskOutputDict(TypedDict, total=False): + """Represents the final output of a task.""" + + artifacts: Optional[list[TaskArtifactDict]] + """Optional. A list of artifacts (files, data) produced by the task.""" + + +TaskOutputOrDict = Union[TaskOutput, TaskOutputDict] + + +class TaskMessage(_common.BaseModel): + """Represents a single message in a conversation, compliant with the A2A specification.""" + + message_id: Optional[str] = Field( + default=None, description="""Required. The unique identifier of the message.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="""Optional. A2A message may have extension_uris or reference_task_ids. They will be stored under metadata.""", + ) + parts: Optional[list[Part]] = Field( + default=None, description="""Required. The parts of the message.""" + ) + role: Optional[str] = Field( + default=None, + description="""Required. The role of the sender of the message. e.g. "user", "agent".""", + ) + + +class TaskMessageDict(TypedDict, total=False): + """Represents a single message in a conversation, compliant with the A2A specification.""" + + message_id: Optional[str] + """Required. The unique identifier of the message.""" + + metadata: Optional[dict[str, Any]] + """Optional. A2A message may have extension_uris or reference_task_ids. They will be stored under metadata.""" + + parts: Optional[list[PartDict]] + """Required. The parts of the message.""" + + role: Optional[str] + """Required. The role of the sender of the message. e.g. "user", "agent".""" + + +TaskMessageOrDict = Union[TaskMessage, TaskMessageDict] + + +class TaskStatusDetails(_common.BaseModel): + """Represents the additional status details of a task.""" + + task_message: Optional[TaskMessage] = Field( + default=None, + description="""Optional. The message associated with the single-turn interaction between the user and the agent or agent and agent.""", + ) + + +class TaskStatusDetailsDict(TypedDict, total=False): + """Represents the additional status details of a task.""" + + task_message: Optional[TaskMessageDict] + """Optional. The message associated with the single-turn interaction between the user and the agent or agent and agent.""" + + +TaskStatusDetailsOrDict = Union[TaskStatusDetails, TaskStatusDetailsDict] + + +class A2aTask(_common.BaseModel): + """A task.""" + + context_id: Optional[str] = Field( + default=None, + description="""Optional. A generic identifier for grouping related tasks (e.g., session_id, workflow_id).""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""Output only. The creation timestamp of the task.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Optional. Arbitrary, user-defined metadata.""" + ) + name: Optional[str] = Field( + default=None, + description="""Identifier. The resource name of the task. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/a2aTasks/{a2a_task}`""", + ) + next_event_sequence_number: Optional[int] = Field( + default=None, + description="""Output only. The next event sequence number to be appended to the task. This value starts at 1 and is guaranteed to be monotonically increasing.""", + ) + output: Optional[TaskOutput] = Field( + default=None, description="""Optional. The final output of the task.""" + ) + state: Optional[State] = Field( + default=None, + description="""Output only. The state of the task. The state of a new task is SUBMITTED by default. The state of a task can only be updated via AppendA2aTaskEvents API.""", + ) + status_details: Optional[TaskStatusDetails] = Field( + default=None, description="""Optional. The status details of the task.""" + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. The last update timestamp of the task.""", + ) + + +class A2aTaskDict(TypedDict, total=False): + """A task.""" + + context_id: Optional[str] + """Optional. A generic identifier for grouping related tasks (e.g., session_id, workflow_id).""" + + create_time: Optional[datetime.datetime] + """Output only. The creation timestamp of the task.""" + + metadata: Optional[dict[str, Any]] + """Optional. Arbitrary, user-defined metadata.""" + + name: Optional[str] + """Identifier. The resource name of the task. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/a2aTasks/{a2a_task}`""" + + next_event_sequence_number: Optional[int] + """Output only. The next event sequence number to be appended to the task. This value starts at 1 and is guaranteed to be monotonically increasing.""" + + output: Optional[TaskOutputDict] + """Optional. The final output of the task.""" + + state: Optional[State] + """Output only. The state of the task. The state of a new task is SUBMITTED by default. The state of a task can only be updated via AppendA2aTaskEvents API.""" + + status_details: Optional[TaskStatusDetailsDict] + """Optional. The status details of the task.""" + + update_time: Optional[datetime.datetime] + """Output only. The last update timestamp of the task.""" + + +A2aTaskOrDict = Union[A2aTask, A2aTaskDict] + + +class ListReasoningEngineTasksConfig(_common.BaseModel): + """Config for listing agent engine tasks.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + filter: Optional[str] = Field( + default=None, + description="""An expression for filtering the results of the request. + For field names both snake_case and camelCase are supported.""", + ) + + +class ListReasoningEngineTasksConfigDict(TypedDict, total=False): + """Config for listing agent engine tasks.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + filter: Optional[str] + """An expression for filtering the results of the request. + For field names both snake_case and camelCase are supported.""" + + +ListReasoningEngineTasksConfigOrDict = Union[ + ListReasoningEngineTasksConfig, ListReasoningEngineTasksConfigDict +] + + +class _ListReasoningEngineTasksRequestParameters(_common.BaseModel): + """Parameters for listing agent engines.""" + + name: Optional[str] = Field( + default=None, description="""Name of the agent engine.""" + ) + config: Optional[ListReasoningEngineTasksConfig] = Field( + default=None, description="""""" + ) + + +class _ListReasoningEngineTasksRequestParametersDict(TypedDict, total=False): + """Parameters for listing agent engines.""" + + name: Optional[str] + """Name of the agent engine.""" + + config: Optional[ListReasoningEngineTasksConfigDict] + """""" + + +_ListReasoningEngineTasksRequestParametersOrDict = Union[ + _ListReasoningEngineTasksRequestParameters, + _ListReasoningEngineTasksRequestParametersDict, +] + + +class ListReasoningEngineTasksResponse(_common.BaseModel): + """Response for listing agent engine tasks.""" + + sdk_http_response: Optional[genai_types.HttpResponse] = Field( + default=None, description="""Used to retain the full HTTP response.""" + ) + next_page_token: Optional[str] = Field(default=None, description="""""") + tasks: Optional[list[A2aTask]] = Field( + default=None, description="""List of agent engine tasks.""" + ) + + +class ListReasoningEngineTasksResponseDict(TypedDict, total=False): + """Response for listing agent engine tasks.""" + + sdk_http_response: Optional[genai_types.HttpResponseDict] + """Used to retain the full HTTP response.""" + + next_page_token: Optional[str] + """""" + + tasks: Optional[list[A2aTaskDict]] + """List of agent engine tasks.""" + + +ListReasoningEngineTasksResponseOrDict = Union[ + ListReasoningEngineTasksResponse, ListReasoningEngineTasksResponseDict +] + + +class CreateAgentEngineTaskConfig(_common.BaseModel): + """Config for creating a Session.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class CreateAgentEngineTaskConfigDict(TypedDict, total=False): + """Config for creating a Session.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +CreateAgentEngineTaskConfigOrDict = Union[ + CreateAgentEngineTaskConfig, CreateAgentEngineTaskConfigDict +] + + +class _CreateAgentEngineTaskRequestParameters(_common.BaseModel): + """Parameters for creating Agent Engine Tasks.""" + + name: Optional[str] = Field( + default=None, + description="""Name of the agent engine to create the task under.""", + ) + a2a_task_id: Optional[str] = Field( + default=None, description="""The ID of the task.""" + ) + a2a_task: Optional[A2aTask] = Field( + default=None, description="""The task to create.""" + ) + config: Optional[CreateAgentEngineTaskConfig] = Field( + default=None, description="""""" + ) + + +class _CreateAgentEngineTaskRequestParametersDict(TypedDict, total=False): + """Parameters for creating Agent Engine Tasks.""" + + name: Optional[str] + """Name of the agent engine to create the task under.""" + + a2a_task_id: Optional[str] + """The ID of the task.""" + + a2a_task: Optional[A2aTaskDict] + """The task to create.""" + + config: Optional[CreateAgentEngineTaskConfigDict] + """""" + + +_CreateAgentEngineTaskRequestParametersOrDict = Union[ + _CreateAgentEngineTaskRequestParameters, _CreateAgentEngineTaskRequestParametersDict +] + + class CreateEvaluationItemConfig(_common.BaseModel): """Config to create an evaluation item."""