diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 3a9f001..0963167 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: c75567cf-9def-4617-8ae7-65af6f367b68 management: - docChecksum: 32083c7c0293bed80ed2dec2b52fc53f + docChecksum: a349bae08dffd8722915fe3e70c77eeb docVersion: 2.0.0 - speakeasyVersion: 1.648.0 - generationVersion: 2.737.0 - releaseVersion: 1.6.4 - configChecksum: 5e7ddbc1dd0d76c87f1e6822f99a63a7 + speakeasyVersion: 1.664.0 + generationVersion: 2.766.1 + releaseVersion: 1.6.5 + configChecksum: 7be41082871778bfb564435aecdc963a features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.23.4 + core: 5.23.13 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 devContainers: 3.0.0 @@ -29,7 +29,7 @@ features: nameOverrides: 3.0.1 nullables: 1.0.1 responseFormat: 1.0.1 - retries: 3.0.2 + retries: 3.0.3 sdkHooks: 1.1.0 serverEvents: 1.0.11 unions: 3.1.0 @@ -160,15 +160,16 @@ generatedFiles: - docs/models/deletespanspansspaniddeleterequest.md - docs/models/delta.md - docs/models/detail.md - - docs/models/evaluationconfig.md - docs/models/example.md - docs/models/examplein.md - docs/models/file.md + - docs/models/filedownloadurlresponse.md - docs/models/filefile.md - docs/models/filter_.md - docs/models/finishreason.md - docs/models/format_.md - - docs/models/functioncallconfiguration.md + - docs/models/functioncallconfigurationinput.md + - docs/models/functioncallconfigurationoutput.md - docs/models/functioncallinput.md - docs/models/functioncalloutput.md - docs/models/functiondefinition.md @@ -181,6 +182,7 @@ generatedFiles: - docs/models/getdatasetentriesresponse.md - docs/models/getdatasetentrydatasetsdatasetidentriesentryidgetrequest.md - docs/models/getdatasetentryresponse.md + - docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md - docs/models/getfunctionbynamefunctionsbynamenamegetrequest.md - docs/models/getfunctionbyrevisionfunctionsfunctionidrevisionsrevisionidgetrequest.md - docs/models/getfunctionfunctionsfunctionidgetrequest.md @@ -210,6 +212,8 @@ generatedFiles: - docs/models/listcustommodelsmodelscustomgetrequest.md - docs/models/listcustommodelsresponseitem.md - docs/models/listdatasetentriesdatasetsdatasetidentriesgetrequest.md + - docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md + - docs/models/listfilesresponse.md - docs/models/listfunctionrevisionresponse.md - docs/models/listfunctionrevisionsfunctionsfunctionidrevisionsgetrequest.md - docs/models/listfunctionsfunctionsgetrequest.md @@ -237,6 +241,7 @@ generatedFiles: - docs/models/openaitypeschatcompletioncreateparamsfunction.md - docs/models/paginatedresponsegetdatasetentriesresponse.md - docs/models/paginatedresponselistcustommodelsresponseitem.md + - docs/models/paginatedresponselistfilesresponse.md - docs/models/paginatedresponselistfunctionrevisionresponse.md - docs/models/paginatedresponselistfunctionsresponseitem.md - docs/models/paginatedresponselistknowledgebasesresponse.md @@ -267,10 +272,6 @@ generatedFiles: - docs/models/responseformattext.md - docs/models/savetodatasetresponse.md - docs/models/savetodatasetspansspanidsaveexamplespostrequest.md - - docs/models/scorersenum1.md - - docs/models/scorersenum2.md - - docs/models/scorersunion1.md - - docs/models/scorersunion2.md - docs/models/searchcontextsize.md - docs/models/security.md - docs/models/spandata.md @@ -439,21 +440,23 @@ generatedFiles: - src/opperai/models/delete_span_spans_span_id_deleteop.py - src/opperai/models/deleteknowledgebaserequest.py - src/opperai/models/deleteknowledgebaseresponse.py - - src/opperai/models/evaluationconfig.py - src/opperai/models/example.py - src/opperai/models/examplein.py - src/opperai/models/file.py + - src/opperai/models/filedownloadurlresponse.py - src/opperai/models/filefile.py - src/opperai/models/filter_.py - src/opperai/models/function_output.py - src/opperai/models/function_stream_call_stream_postop.py - src/opperai/models/functioncall_input.py - src/opperai/models/functioncall_output.py - - src/opperai/models/functioncallconfiguration.py + - src/opperai/models/functioncallconfiguration_input.py + - src/opperai/models/functioncallconfiguration_output.py - src/opperai/models/functiondefinition.py - src/opperai/models/get_custom_model_by_name_models_custom_by_name_name_getop.py - src/opperai/models/get_custom_model_models_custom_model_id_getop.py - src/opperai/models/get_dataset_entry_datasets_dataset_id_entries_entry_id_getop.py + - src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py - src/opperai/models/get_function_by_name_functions_by_name_name_getop.py - src/opperai/models/get_function_by_revision_functions_function_id_revisions_revision_id_getop.py - src/opperai/models/get_function_functions_function_id_getop.py @@ -482,6 +485,7 @@ generatedFiles: - src/opperai/models/jsonschema.py - src/opperai/models/list_custom_models_models_custom_getop.py - src/opperai/models/list_dataset_entries_datasets_dataset_id_entries_getop.py + - src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py - src/opperai/models/list_function_revisions_functions_function_id_revisions_getop.py - src/opperai/models/list_functions_functions_getop.py - src/opperai/models/list_knowledge_bases_knowledge_getop.py @@ -490,6 +494,7 @@ generatedFiles: - src/opperai/models/list_models_models_getop.py - src/opperai/models/list_traces_traces_getop.py - src/opperai/models/listcustommodelsresponseitem.py + - src/opperai/models/listfilesresponse.py - src/opperai/models/listfunctionrevisionresponse.py - src/opperai/models/listfunctionsresponseitem.py - src/opperai/models/listknowledgebasesresponse.py @@ -509,6 +514,7 @@ generatedFiles: - src/opperai/models/openai_types_chat_completion_create_params_function.py - src/opperai/models/paginatedresponse_getdatasetentriesresponse_.py - src/opperai/models/paginatedresponse_listcustommodelsresponseitem_.py + - src/opperai/models/paginatedresponse_listfilesresponse_.py - src/opperai/models/paginatedresponse_listfunctionrevisionresponse_.py - src/opperai/models/paginatedresponse_listfunctionsresponseitem_.py - src/opperai/models/paginatedresponse_listknowledgebasesresponse_.py @@ -601,7 +607,7 @@ examples: function_call_call_post: speakeasy-default-function-call-call-post: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": application/json: {"span_id": "174109ca-ebca-4823-8c0d-08c3789b2fea", "message": "The sum of 1 and 3 is 4", "json_payload": {"sum": 4}, "cached": true, "images": ["image_url"], "usage": {"input_tokens": 25, "output_tokens": 972, "output_tokens_details": {"reasoning_tokens": 704}, "total_tokens": 997}, "cost": {"generation": 0.0001, "platform": 0.00001, "total": 0.00011}} @@ -630,19 +636,19 @@ examples: text/event-stream: {"data": {"delta": "Hello! How can I assist you today?", "span_id": "123e4567-e89b-12d3-a456-426614174000"}} span_id_only: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"span_id": "123e4567-e89b-12d3-a456-426614174000"}} text_streaming_content: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"delta": "Hello! How can I assist you today?", "chunk_type": "text"}} structured_streaming_content: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"delta": "John Doe", "json_path": "people[0].name", "chunk_type": "json"}} @@ -875,7 +881,7 @@ examples: path: span_id: "77b258a2-45c1-4b87-a50c-9116bc8ed1d6" requestBody: - application/json: {"name": "my span", "start_time": "2025-11-04T12:46:20.444851Z", "type": "email_tool", "end_time": "2025-11-04T12:46:20.444947Z", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} + application/json: {"name": "my span", "start_time": "2025-11-28T13:52:31.359105Z", "type": "email_tool", "end_time": "2025-11-28T13:52:31.359201Z", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} responses: "200": application/json: {"name": "my span", "start_time": "2024-03-20T10:00:00+00:00", "id": "84d9ffc8-336c-4953-b92f-fe05d5405951", "trace_id": "123e4567-e89b-12d3-a456-426614174000", "parent_id": "123e4567-e89b-12d3-a456-426614174000", "type": "email_tool", "end_time": "2024-03-20T10:00:10+00:00", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} @@ -1079,10 +1085,10 @@ examples: create_function_functions_post: speakeasy-default-create-function-functions-post: requestBody: - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "201": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "a246e349-93f1-4ae1-9ed0-d3ca4f954a62"} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "a246e349-93f1-4ae1-9ed0-d3ca4f954a62", "observer_enabled": true} "409": application/json: {"type": "ConflictError", "message": "The resource already exists", "detail": "The resource already exists"} "422": @@ -1117,7 +1123,7 @@ examples: function_id: "42016421-16e8-4b50-a2d1-30fc3894763b" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "692b28e0-d3e6-4ea4-adf0-c629aeecfdab"} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "692b28e0-d3e6-4ea4-adf0-c629aeecfdab", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1132,10 +1138,10 @@ examples: path: function_id: "b0f067f3-9aa2-4ce2-aad5-c9832fbf4fa4" requestBody: - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"beta.evaluation": {"enabled": true, "scorers": "base"}, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false, "invocation.cache.ttl": 0, "invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5}} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false, "invocation.cache.ttl": 0, "invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5}} responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "7111f9fb-1c8a-4eee-bcaf-a0e751490b7f"} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "7111f9fb-1c8a-4eee-bcaf-a0e751490b7f", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1333,7 +1339,7 @@ examples: revision_id: "e60c7090-8545-4e1f-84d5-0d9e4be6c0d1" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "1af4e1a9-7f11-4e7c-962d-827d5bc836a1"} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "1af4e1a9-7f11-4e7c-962d-827d5bc836a1", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1568,7 +1574,7 @@ examples: name: "" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "beta.evaluation": {"enabled": true, "scorers": "base"}, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "6f887222-2826-417a-ba23-ddd2b6bc4cbf"} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "6f887222-2826-417a-ba23-ddd2b6bc4cbf", "observer_enabled": true} "400": application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} "401": @@ -1647,7 +1653,7 @@ examples: path: knowledge_base_id: "3c6931ec-d324-46b6-bec6-bf31a5f0623f" requestBody: - application/json: {"filename": "example.pdf", "file_id": "0dff5851-c155-4a46-8450-5b96eb017ae5", "content_type": "application/pdf"} + application/json: {"filename": "example.pdf", "file_id": "0dff5851-c155-4a46-8450-5b96eb017ae5", "content_type": "application/pdf", "metadata": {"category": "legal", "client": "acme"}} responses: "201": application/json: {"id": "8c8543c2-ed47-4e71-a474-8c7f688fd79c", "key": "", "original_filename": "", "document_id": 667726} @@ -1872,5 +1878,41 @@ examples: application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} + get_file_download_url_knowledge__knowledge_base_id__files__file_id__download_url_get: + speakeasy-default-get-file-download-url-knowledge-knowledge-base-id-files-file-id-download-url-get: + parameters: + path: + knowledge_base_id: "3d6f9cb0-cbf2-4c9a-8995-331034439b8d" + file_id: "d917ff5b-acf0-4e5c-943a-7d584204a9f3" + responses: + "200": + application/json: {"url": "https://best-availability.name/", "expires_in": 554035} + "400": + application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} + "401": + application/json: {"type": "UnauthorizedError", "message": "The request is unauthorized", "detail": "The API key is invalid"} + "404": + application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} + "422": + application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} + list_files_knowledge__knowledge_base_id__files_get: + speakeasy-default-list-files-knowledge-knowledge-base-id-files-get: + parameters: + path: + knowledge_base_id: "53b2ef93-22ff-4826-aac5-a53c7fa8e075" + query: + offset: 0 + limit: 100 + responses: + "200": + application/json: {"meta": {"total_count": 1}, "data": []} + "400": + application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} + "401": + application/json: {"type": "UnauthorizedError", "message": "The request is unauthorized", "detail": "The API key is invalid"} + "404": + application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} + "422": + application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} examplesVersion: 1.0.2 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index b0eba3b..379404c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -23,12 +23,13 @@ generation: sdkHooksConfigAccess: true schemas: allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.6.4 + version: 1.6.5 additionalDependencies: dev: {} main: {} @@ -58,11 +59,14 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input legacyPyright: true + license: "" maxMethodParams: 999 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output packageManager: poetry packageName: opperai diff --git a/.speakeasy/speakeasy-modifications-overlay.yaml b/.speakeasy/speakeasy-modifications-overlay.yaml index 8312cfc..c99bd6a 100644 --- a/.speakeasy/speakeasy-modifications-overlay.yaml +++ b/.speakeasy/speakeasy-modifications-overlay.yaml @@ -804,4 +804,30 @@ actions: before: sdk.knowledge.delete_documents_knowledge__knowledge_base_id__query_delete() created_at: 1762265966139 reviewed_at: 1762265978991 + - target: $["paths"]["/knowledge/{knowledge_base_id}/query"]["delete"] + update: + x-speakeasy-name-override: deleteDocuments + x-speakeasy-metadata: + after: sdk.knowledge.deleteDocuments() + before: sdk.knowledge.deleteDocumentsKnowledgeKnowledgeBaseIdQueryDelete() + created_at: 1751029113444 + reviewed_at: 1751029118113 + type: method-name + - target: $["paths"]["/knowledge/{knowledge_base_id}/files"]["get"] + update: + x-speakeasy-name-override: listFiles + x-speakeasy-metadata: + after: sdk.knowledge.listFiles() + before: sdk.knowledge.listFilesKnowledgeKnowledgeBaseIdFilesGet() + created_at: 1732439000000 + reviewed_at: 1732439000000 + type: method-name + - target: $["paths"]["/knowledge/{knowledge_base_id}/files/{file_id}/download_url"]["get"] + update: + x-speakeasy-name-override: getFileDownloadUrl + x-speakeasy-metadata: + after: sdk.knowledge.getFileDownloadUrl() + before: sdk.knowledge.get_file_download_url_knowledge__knowledge_base_id__files__file_id__download_url_get() + created_at: 1732872000000 + reviewed_at: 1732872000000 type: method-name diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 382d0f7..b6a05ad 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,9 +1,9 @@ -speakeasyVersion: 1.648.0 +speakeasyVersion: 1.664.0 sources: FastAPI: sourceNamespace: fast-api - sourceRevisionDigest: sha256:7f6113b631e591c5172b1fecf220daf1e60612c9d0b960fbf174ec61d4576e9f - sourceBlobDigest: sha256:d1c93e3507577ac57bb74e778dc551fdeb807d6e786fc7fd67d36857bc824926 + sourceRevisionDigest: sha256:13e03f4bb2d2e45398714ef484f5d060570653dbf652ca0bc2d3b03c051da7f1 + sourceBlobDigest: sha256:675e1b26aa256bad5e337faa07b1a3b907a34836d515f33542cee6b2b52ff195 tags: - latest - 2.0.0 @@ -11,10 +11,10 @@ targets: opper: source: FastAPI sourceNamespace: fast-api - sourceRevisionDigest: sha256:7f6113b631e591c5172b1fecf220daf1e60612c9d0b960fbf174ec61d4576e9f - sourceBlobDigest: sha256:d1c93e3507577ac57bb74e778dc551fdeb807d6e786fc7fd67d36857bc824926 + sourceRevisionDigest: sha256:13e03f4bb2d2e45398714ef484f5d060570653dbf652ca0bc2d3b03c051da7f1 + sourceBlobDigest: sha256:675e1b26aa256bad5e337faa07b1a3b907a34836d515f33542cee6b2b52ff195 codeSamplesNamespace: fast-api-python-code-samples - codeSamplesRevisionDigest: sha256:ff9823a33bef438987661b4235c4ebf0fb9b91189578dc6f5433524146185510 + codeSamplesRevisionDigest: sha256:0d7ea2f726f0c386f9ddb792d1f74d817de3821fc0c04618ffb9b20b7e86620d workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/USAGE.md b/USAGE.md index 16455cb..dc771ff 100644 --- a/USAGE.md +++ b/USAGE.md @@ -1,7 +1,7 @@ ```python # Synchronous Example -from opperai import Opper, models +from opperai import Opper import os @@ -55,11 +55,7 @@ with Opper( ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ "project": "project_456", "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + }, configuration={}) # Handle response print(res) @@ -72,7 +68,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from opperai import Opper, models +from opperai import Opper import os async def main(): @@ -127,11 +123,7 @@ async def main(): ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ "project": "project_456", "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + }, configuration={}) # Handle response print(res) diff --git a/docs/models/appapipublicv2functioncallcallfunctionrequest.md b/docs/models/appapipublicv2functioncallcallfunctionrequest.md index 910da82..f80e620 100644 --- a/docs/models/appapipublicv2functioncallcallfunctionrequest.md +++ b/docs/models/appapipublicv2functioncallcallfunctionrequest.md @@ -14,4 +14,4 @@ | `examples` | List[[models.Example](../models/example.md)] | :heavy_minus_sign: | Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. | [
{
"comment": "Adds two numbers",
"input": {
"x": 1,
"y": 3
},
"output": {
"sum": 4
}
}
] | | `parent_span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Optionally provide the parent span ID to add to the call event. This will automatically tie the call to a parent span in the UI. | 123e4567-e89b-12d3-a456-426614174000 | | `tags` | Dict[str, *str*] | :heavy_minus_sign: | Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension. | {
"project": "project_456",
"user": "company_123"
} | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/createfunctionrequest.md b/docs/models/createfunctionrequest.md index e68b6d8..1cb29bc 100644 --- a/docs/models/createfunctionrequest.md +++ b/docs/models/createfunctionrequest.md @@ -11,4 +11,4 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/createfunctionresponse.md b/docs/models/createfunctionresponse.md index 1b6dbe3..04987f5 100644 --- a/docs/models/createfunctionresponse.md +++ b/docs/models/createfunctionresponse.md @@ -11,7 +11,8 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | -| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | \ No newline at end of file +| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | +| `observer_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations. | | \ No newline at end of file diff --git a/docs/models/evaluationconfig.md b/docs/models/evaluationconfig.md deleted file mode 100644 index 4bb55bd..0000000 --- a/docs/models/evaluationconfig.md +++ /dev/null @@ -1,19 +0,0 @@ -# EvaluationConfig - -Configuration for evaluation features stored under 'beta.evaluation'. - -- enabled: master switch -- scorers: which evaluators to run. Accepts: - - string: "base" | "rubrics" | "toxicity" | "hallucination" | "qa" - - dict: { "rubrics": RubricDefinition-like payload } - - list[str | dict] - "base" is the default scorer. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| `enabled` | *Optional[bool]* | :heavy_minus_sign: | Enable evaluation features (base or rubrics). | -| `scorers` | [Optional[models.ScorersUnion2]](../models/scorersunion2.md) | :heavy_minus_sign: | Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them. | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filedownloadurlresponse.md b/docs/models/filedownloadurlresponse.md new file mode 100644 index 0000000..7a63556 --- /dev/null +++ b/docs/models/filedownloadurlresponse.md @@ -0,0 +1,9 @@ +# FileDownloadURLResponse + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `url` | *str* | :heavy_check_mark: | Presigned URL to download the file | +| `expires_in` | *int* | :heavy_check_mark: | Number of seconds until the URL expires | \ No newline at end of file diff --git a/docs/models/functioncallconfiguration.md b/docs/models/functioncallconfiguration.md deleted file mode 100644 index bb295de..0000000 --- a/docs/models/functioncallconfiguration.md +++ /dev/null @@ -1,13 +0,0 @@ -# FunctionCallConfiguration - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `invocation_few_shot_count` | *Optional[int]* | :heavy_minus_sign: | The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input. | -| `beta_evaluation` | [Optional[models.EvaluationConfig]](../models/evaluationconfig.md) | :heavy_minus_sign: | Configuration for evaluation features stored under 'beta.evaluation'.

- enabled: master switch
- scorers: which evaluators to run. Accepts:
- string: "base" \| "rubrics" \| "toxicity" \| "hallucination" \| "qa"
- dict: { "rubrics": RubricDefinition-like payload }
- list[str \| dict]
"base" is the default scorer. | -| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | -| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | -| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | -| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file diff --git a/docs/models/functioncallconfigurationinput.md b/docs/models/functioncallconfigurationinput.md new file mode 100644 index 0000000..bc49b65 --- /dev/null +++ b/docs/models/functioncallconfigurationinput.md @@ -0,0 +1,13 @@ +# FunctionCallConfigurationInput + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `invocation_few_shot_count` | *Optional[int]* | :heavy_minus_sign: | The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input. | +| `beta_evaluation` | *OptionalNullable[Any]* | :heavy_minus_sign: | [DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled. | +| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | +| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | +| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | +| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file diff --git a/docs/models/functioncallconfigurationoutput.md b/docs/models/functioncallconfigurationoutput.md new file mode 100644 index 0000000..5fa418d --- /dev/null +++ b/docs/models/functioncallconfigurationoutput.md @@ -0,0 +1,12 @@ +# FunctionCallConfigurationOutput + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `invocation_few_shot_count` | *Optional[int]* | :heavy_minus_sign: | The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input. | +| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | +| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | +| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | +| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file diff --git a/docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md b/docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md new file mode 100644 index 0000000..99ba8b2 --- /dev/null +++ b/docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md @@ -0,0 +1,9 @@ +# GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `knowledge_base_id` | *str* | :heavy_check_mark: | The id of the knowledge base | +| `file_id` | *str* | :heavy_check_mark: | The id of the file | \ No newline at end of file diff --git a/docs/models/getfunctionresponse.md b/docs/models/getfunctionresponse.md index 8f286b6..7ea1732 100644 --- a/docs/models/getfunctionresponse.md +++ b/docs/models/getfunctionresponse.md @@ -11,7 +11,8 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | -| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | \ No newline at end of file +| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | +| `observer_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations. | | \ No newline at end of file diff --git a/docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md b/docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md new file mode 100644 index 0000000..ba431df --- /dev/null +++ b/docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md @@ -0,0 +1,10 @@ +# ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------- | ----------------------------------------------- | ----------------------------------------------- | ----------------------------------------------- | +| `knowledge_base_id` | *str* | :heavy_check_mark: | The id of the knowledge base to list files from | +| `offset` | *Optional[int]* | :heavy_minus_sign: | The offset to start the list from | +| `limit` | *Optional[int]* | :heavy_minus_sign: | The number of files to return | \ No newline at end of file diff --git a/docs/models/listfilesresponse.md b/docs/models/listfilesresponse.md new file mode 100644 index 0000000..e63c4be --- /dev/null +++ b/docs/models/listfilesresponse.md @@ -0,0 +1,13 @@ +# ListFilesResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The id of the file | | +| `original_filename` | *str* | :heavy_check_mark: | The original filename | | +| `size` | *int* | :heavy_check_mark: | The size of the file in bytes | | +| `status` | *str* | :heavy_check_mark: | The indexing status of the file | | +| `document_id` | *int* | :heavy_check_mark: | The id of the associated document | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | The metadata attached to the file | {
"category": "legal",
"client": "acme"
} | \ No newline at end of file diff --git a/docs/models/paginatedresponselistfilesresponse.md b/docs/models/paginatedresponselistfilesresponse.md new file mode 100644 index 0000000..f552e0b --- /dev/null +++ b/docs/models/paginatedresponselistfilesresponse.md @@ -0,0 +1,9 @@ +# PaginatedResponseListFilesResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `meta` | [models.Meta](../models/meta.md) | :heavy_check_mark: | N/A | +| `data` | List[[models.ListFilesResponse](../models/listfilesresponse.md)] | :heavy_check_mark: | List of items returned in the response | \ No newline at end of file diff --git a/docs/models/registerfileuploadrequest.md b/docs/models/registerfileuploadrequest.md index 6a73b78..756f9c3 100644 --- a/docs/models/registerfileuploadrequest.md +++ b/docs/models/registerfileuploadrequest.md @@ -8,4 +8,5 @@ | `filename` | *str* | :heavy_check_mark: | The filename of the file to register | example.pdf | | `file_id` | *str* | :heavy_check_mark: | The id of the file to register | | | `content_type` | *str* | :heavy_check_mark: | The content type of the file to register | application/pdf | -| `configuration` | [OptionalNullable[models.TextProcessingConfiguration]](../models/textprocessingconfiguration.md) | :heavy_minus_sign: | The configuration for the file to register | | \ No newline at end of file +| `configuration` | [OptionalNullable[models.TextProcessingConfiguration]](../models/textprocessingconfiguration.md) | :heavy_minus_sign: | The configuration for the file to register | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Optional metadata to attach to the file | {
"category": "legal",
"client": "acme"
} | \ No newline at end of file diff --git a/docs/models/registerfileuploadresponse.md b/docs/models/registerfileuploadresponse.md index 826b435..afdc9e1 100644 --- a/docs/models/registerfileuploadresponse.md +++ b/docs/models/registerfileuploadresponse.md @@ -8,4 +8,5 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `key` | *str* | :heavy_check_mark: | N/A | | `original_filename` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `document_id` | *int* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/scorersenum1.md b/docs/models/scorersenum1.md deleted file mode 100644 index 8f59dc0..0000000 --- a/docs/models/scorersenum1.md +++ /dev/null @@ -1,15 +0,0 @@ -# ScorersEnum1 - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `BASE` | base | -| `RUBRICS` | rubrics | -| `TOXICITY` | toxicity | -| `HALLUCINATION` | hallucination | -| `QA` | qa | -| `AGENT_TOOL_SELECTION` | agent_tool_selection | -| `REGEX` | regex | -| `MAX_LENGTH` | max_length | \ No newline at end of file diff --git a/docs/models/scorersenum2.md b/docs/models/scorersenum2.md deleted file mode 100644 index 08e2451..0000000 --- a/docs/models/scorersenum2.md +++ /dev/null @@ -1,15 +0,0 @@ -# ScorersEnum2 - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `BASE` | base | -| `RUBRICS` | rubrics | -| `TOXICITY` | toxicity | -| `HALLUCINATION` | hallucination | -| `QA` | qa | -| `AGENT_TOOL_SELECTION` | agent_tool_selection | -| `REGEX` | regex | -| `MAX_LENGTH` | max_length | \ No newline at end of file diff --git a/docs/models/scorersunion1.md b/docs/models/scorersunion1.md deleted file mode 100644 index fe4ff32..0000000 --- a/docs/models/scorersunion1.md +++ /dev/null @@ -1,17 +0,0 @@ -# ScorersUnion1 - - -## Supported Types - -### `models.ScorersEnum2` - -```python -value: models.ScorersEnum2 = /* values here */ -``` - -### `Dict[str, Any]` - -```python -value: Dict[str, Any] = /* values here */ -``` - diff --git a/docs/models/scorersunion2.md b/docs/models/scorersunion2.md deleted file mode 100644 index e7f53d8..0000000 --- a/docs/models/scorersunion2.md +++ /dev/null @@ -1,25 +0,0 @@ -# ScorersUnion2 - -Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them. - - -## Supported Types - -### `models.ScorersEnum1` - -```python -value: models.ScorersEnum1 = /* values here */ -``` - -### `Dict[str, Any]` - -```python -value: Dict[str, Any] = /* values here */ -``` - -### `List[models.ScorersUnion1]` - -```python -value: List[models.ScorersUnion1] = /* values here */ -``` - diff --git a/docs/models/updatefunctionrequest.md b/docs/models/updatefunctionrequest.md index 19b9f07..c2efb38 100644 --- a/docs/models/updatefunctionrequest.md +++ b/docs/models/updatefunctionrequest.md @@ -11,4 +11,4 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [OptionalNullable[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | Optionally provide a model to use for completing the task.
If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu`

To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation.
An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below.

```json
{
"model": "openai/gpt-4o-mini", # the model name
"options": {
"temperature": 0.5 # the options for the model
}
}
```

To specify a fallback model, use a list of models. The models will then be tried in order.
The second model will be used if the first model is not available, and so on.

```json
[
"openai/gpt-4o-mini", # first model to try
"openai/gpt-4.1-nano", # second model to try
]
```
| | -| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/updatefunctionresponse.md b/docs/models/updatefunctionresponse.md index 3ea6064..5b93412 100644 --- a/docs/models/updatefunctionresponse.md +++ b/docs/models/updatefunctionresponse.md @@ -11,7 +11,8 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | -| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | \ No newline at end of file +| `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | +| `observer_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations. | | \ No newline at end of file diff --git a/docs/models/updatespanrequest.md b/docs/models/updatespanrequest.md index d5ab0cc..d3dea27 100644 --- a/docs/models/updatespanrequest.md +++ b/docs/models/updatespanrequest.md @@ -6,9 +6,9 @@ | Field | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the span, something descriptive about the span that will be used to identify it when querying | my span | -| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-11-04T12:46:20.444851Z | +| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-11-28T13:52:31.359105Z | | `type` | *OptionalNullable[str]* | :heavy_minus_sign: | The type of the span | email_tool | -| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-11-04T12:46:20.444947Z | +| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-11-28T13:52:31.359201Z | | `input` | *OptionalNullable[str]* | :heavy_minus_sign: | The input of the span | Hello, world! | | `output` | *OptionalNullable[str]* | :heavy_minus_sign: | The output of the span | Hello, world! | | `error` | *OptionalNullable[str]* | :heavy_minus_sign: | In case of an error, the error message | Exception: This is an error message | diff --git a/docs/sdks/functions/README.md b/docs/sdks/functions/README.md index 7e9a98c..0879dde 100644 --- a/docs/sdks/functions/README.md +++ b/docs/sdks/functions/README.md @@ -25,7 +25,7 @@ Create a function ```python -from opperai import Opper, models +from opperai import Opper import os @@ -62,11 +62,7 @@ with Opper( ], "title": "OpperOutputExample", "type": "object", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + }, configuration={}) # Handle response print(res) @@ -83,7 +79,7 @@ with Opper( | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -238,10 +234,6 @@ with Opper( "title": "OpperOutputExample", "type": "object", }, configuration={ - "beta.evaluation": { - "enabled": True, - "scorers": "base", - }, "beta.invocation.input_validation.enabled": False, "beta.invocation.xml_mode.enabled": False, "invocation.cache.ttl": 0, @@ -265,7 +257,7 @@ with Opper( | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [OptionalNullable[models.TModel]](../../models/tmodel.md) | :heavy_minus_sign: | Optionally provide a model to use for completing the task.
If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu`

To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation.
An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below.

```json
{
"model": "openai/gpt-4o-mini", # the model name
"options": {
"temperature": 0.5 # the options for the model
}
}
```

To specify a fallback model, use a list of models. The models will then be tried in order.
The second model will be used if the first model is not available, and so on.

```json
[
"openai/gpt-4o-mini", # first model to try
"openai/gpt-4.1-nano", # second model to try
]
```
| | -| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/knowledge/README.md b/docs/sdks/knowledge/README.md index 4272d9a..d5370af 100644 --- a/docs/sdks/knowledge/README.md +++ b/docs/sdks/knowledge/README.md @@ -13,6 +13,8 @@ * [get_upload_url](#get_upload_url) - Get Upload Url * [register_file_upload](#register_file_upload) - Register File Upload * [delete_file](#delete_file) - Delete File From Knowledge Base +* [get_file_download_url](#get_file_download_url) - Get File Download Url +* [list_files](#list_files) - List Files * [query](#query) - Query Knowledge Base * [delete_documents](#delete_documents) - Delete Documents * [add](#add) - Add @@ -305,7 +307,10 @@ with Opper( http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), ) as opper: - res = opper.knowledge.register_file_upload(knowledge_base_id="3c6931ec-d324-46b6-bec6-bf31a5f0623f", filename="example.pdf", file_id="0dff5851-c155-4a46-8450-5b96eb017ae5", content_type="application/pdf") + res = opper.knowledge.register_file_upload(knowledge_base_id="3c6931ec-d324-46b6-bec6-bf31a5f0623f", filename="example.pdf", file_id="0dff5851-c155-4a46-8450-5b96eb017ae5", content_type="application/pdf", metadata={ + "category": "legal", + "client": "acme", + }) # Handle response print(res) @@ -321,6 +326,7 @@ with Opper( | `file_id` | *str* | :heavy_check_mark: | The id of the file to register | | | `content_type` | *str* | :heavy_check_mark: | The content type of the file to register | application/pdf | | `configuration` | [OptionalNullable[models.TextProcessingConfiguration]](../../models/textprocessingconfiguration.md) | :heavy_minus_sign: | The configuration for the file to register | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Optional metadata to attach to the file | {
"category": "legal",
"client": "acme"
} | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -377,6 +383,97 @@ with Opper( | errors.RequestValidationError | 422 | application/json | | errors.APIError | 4XX, 5XX | \*/\* | +## get_file_download_url + +Get a presigned URL to download a file from a knowledge base + +### Example Usage + + +```python +from opperai import Opper +import os + + +with Opper( + http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), +) as opper: + + res = opper.knowledge.get_file_download_url(knowledge_base_id="3d6f9cb0-cbf2-4c9a-8995-331034439b8d", file_id="d917ff5b-acf0-4e5c-943a-7d584204a9f3") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `knowledge_base_id` | *str* | :heavy_check_mark: | The id of the knowledge base | +| `file_id` | *str* | :heavy_check_mark: | The id of the file | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.FileDownloadURLResponse](../../models/filedownloadurlresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ----------------------------- | ----------------------------- | ----------------------------- | +| errors.BadRequestError | 400 | application/json | +| errors.UnauthorizedError | 401 | application/json | +| errors.NotFoundError | 404 | application/json | +| errors.RequestValidationError | 422 | application/json | +| errors.APIError | 4XX, 5XX | \*/\* | + +## list_files + +List all files in a knowledge base + +### Example Usage + + +```python +from opperai import Opper +import os + + +with Opper( + http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), +) as opper: + + res = opper.knowledge.list_files(knowledge_base_id="53b2ef93-22ff-4826-aac5-a53c7fa8e075", offset=0, limit=100) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `knowledge_base_id` | *str* | :heavy_check_mark: | The id of the knowledge base to list files from | +| `offset` | *Optional[int]* | :heavy_minus_sign: | The offset to start the list from | +| `limit` | *Optional[int]* | :heavy_minus_sign: | The number of files to return | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.PaginatedResponseListFilesResponse](../../models/paginatedresponselistfilesresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ----------------------------- | ----------------------------- | ----------------------------- | +| errors.BadRequestError | 400 | application/json | +| errors.UnauthorizedError | 401 | application/json | +| errors.NotFoundError | 404 | application/json | +| errors.RequestValidationError | 422 | application/json | +| errors.APIError | 4XX, 5XX | \*/\* | + ## query Query a knowledge base by its id diff --git a/docs/sdks/opper/README.md b/docs/sdks/opper/README.md index 0b831cc..085f60d 100644 --- a/docs/sdks/opper/README.md +++ b/docs/sdks/opper/README.md @@ -16,7 +16,7 @@ It is a declarative interface with input and output schemas that supports text, ```python -from opperai import Opper, models +from opperai import Opper import os @@ -70,11 +70,7 @@ with Opper( ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ "project": "project_456", "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + }, configuration={}) # Handle response print(res) @@ -94,7 +90,7 @@ with Opper( | `examples` | List[[models.Example](../../models/example.md)] | :heavy_minus_sign: | Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. | [
{
"comment": "Adds two numbers",
"input": {
"x": 1,
"y": 3
},
"output": {
"sum": 4
}
}
] | | `parent_span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Optionally provide the parent span ID to add to the call event. This will automatically tie the call to a parent span in the UI. | 123e4567-e89b-12d3-a456-426614174000 | | `tags` | Dict[str, *str*] | :heavy_minus_sign: | Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension. | {
"project": "project_456",
"user": "company_123"
} | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -186,7 +182,7 @@ data: {"delta": "Engineer", "json_path": "response.role", "chunk_type": "json"} ```python -from opperai import Opper, models +from opperai import Opper import os @@ -240,11 +236,7 @@ with Opper( ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ "project": "project_456", "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + }, configuration={}) with res as event_stream: for event in event_stream: @@ -266,7 +258,7 @@ with Opper( | `examples` | List[[models.Example](../../models/example.md)] | :heavy_minus_sign: | Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. | [
{
"comment": "Adds two numbers",
"input": {
"x": 1,
"y": 3
},
"output": {
"sum": 4
}
}
] | | `parent_span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Optionally provide the parent span ID to add to the call event. This will automatically tie the call to a parent span in the UI. | 123e4567-e89b-12d3-a456-426614174000 | | `tags` | Dict[str, *str*] | :heavy_minus_sign: | Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension. | {
"project": "project_456",
"user": "company_123"
} | -| `configuration` | [OptionalNullable[models.FunctionCallConfiguration]](../../models/functioncallconfiguration.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.evaluation": {
"enabled": true,
"scorers": "base"
},
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/spans/README.md b/docs/sdks/spans/README.md index 4d90ddd..b9da617 100644 --- a/docs/sdks/spans/README.md +++ b/docs/sdks/spans/README.md @@ -130,7 +130,7 @@ with Opper( http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), ) as opper: - res = opper.spans.update(span_id="77b258a2-45c1-4b87-a50c-9116bc8ed1d6", name="my span", start_time=parse_datetime("2025-11-04T12:46:20.444851Z"), type="email_tool", end_time=parse_datetime("2025-11-04T12:46:20.444947Z"), input="Hello, world!", output="Hello, world!", error="Exception: This is an error message", meta={ + res = opper.spans.update(span_id="77b258a2-45c1-4b87-a50c-9116bc8ed1d6", name="my span", start_time=parse_datetime("2025-11-28T13:52:31.359105Z"), type="email_tool", end_time=parse_datetime("2025-11-28T13:52:31.359201Z"), input="Hello, world!", output="Hello, world!", error="Exception: This is an error message", meta={ "key": "value", }, score=10) @@ -145,9 +145,9 @@ with Opper( | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | | `span_id` | *str* | :heavy_check_mark: | The ID of the span to update | | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the span, something descriptive about the span that will be used to identify it when querying | my span | -| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-11-04T12:46:20.444851Z | +| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-11-28T13:52:31.359105Z | | `type` | *OptionalNullable[str]* | :heavy_minus_sign: | The type of the span | email_tool | -| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-11-04T12:46:20.444947Z | +| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-11-28T13:52:31.359201Z | | `input` | *OptionalNullable[str]* | :heavy_minus_sign: | The input of the span | Hello, world! | | `output` | *OptionalNullable[str]* | :heavy_minus_sign: | The output of the span | Hello, world! | | `error` | *OptionalNullable[str]* | :heavy_minus_sign: | In case of an error, the error message | Exception: This is an error message | diff --git a/pyproject.toml b/pyproject.toml index 39685b8..1648503 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "opperai" -version = "1.6.4" +version = "1.6.5" description = "Python Client SDK Generated by Speakeasy." authors = [{ name = "Speakeasy" },] readme = "README.md" @@ -27,6 +27,7 @@ in-project = true [tool.poetry.group.dev.dependencies] mypy = "==1.15.0" pylint = "==3.2.3" +pyright = "==1.1.398" [build-system] requires = ["poetry-core"] diff --git a/src/opperai/_version.py b/src/opperai/_version.py index 6682a6e..8ada501 100644 --- a/src/opperai/_version.py +++ b/src/opperai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "opperai" -__version__: str = "1.6.4" +__version__: str = "1.6.5" __openapi_doc_version__: str = "2.0.0" -__gen_version__: str = "2.737.0" -__user_agent__: str = "speakeasy-sdk/python 1.6.4 2.737.0 2.0.0 opperai" +__gen_version__: str = "2.766.1" +__user_agent__: str = "speakeasy-sdk/python 1.6.5 2.766.1 2.0.0 opperai" try: if __package__ is not None: diff --git a/src/opperai/analytics.py b/src/opperai/analytics.py index 270fdfa..f499d68 100644 --- a/src/opperai/analytics.py +++ b/src/opperai/analytics.py @@ -67,6 +67,7 @@ def get_usage( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -179,6 +180,7 @@ async def get_usage_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/basesdk.py b/src/opperai/basesdk.py index 760313d..e710837 100644 --- a/src/opperai/basesdk.py +++ b/src/opperai/basesdk.py @@ -56,6 +56,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -76,6 +77,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -98,6 +100,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -118,6 +121,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -141,6 +145,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -156,6 +161,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be diff --git a/src/opperai/datasets.py b/src/opperai/datasets.py index 36eaffb..c582329 100644 --- a/src/opperai/datasets.py +++ b/src/opperai/datasets.py @@ -89,6 +89,7 @@ def create_entry( "json", models.CreateDatasetEntryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -210,6 +211,7 @@ async def create_entry_async( "json", models.CreateDatasetEntryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -316,6 +318,7 @@ def list_entries( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -424,6 +427,7 @@ async def list_entries_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -529,6 +533,7 @@ def get_entry( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -632,6 +637,7 @@ async def get_entry_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -735,6 +741,7 @@ def delete_entry( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -838,6 +845,7 @@ async def delete_entry_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -944,6 +952,7 @@ def query_entries( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1052,6 +1061,7 @@ async def query_entries_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/embeddings.py b/src/opperai/embeddings.py index 1bb2d2e..7e101e5 100644 --- a/src/opperai/embeddings.py +++ b/src/opperai/embeddings.py @@ -62,6 +62,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateEmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -171,6 +172,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateEmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/entries.py b/src/opperai/entries.py index 821b6d3..29ffe2a 100644 --- a/src/opperai/entries.py +++ b/src/opperai/entries.py @@ -78,6 +78,7 @@ def update( "json", models.UpdateDatasetEntryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -202,6 +203,7 @@ async def update_async( "json", models.UpdateDatasetEntryRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/functions.py b/src/opperai/functions.py index 7bc4e3b..9453c7b 100644 --- a/src/opperai/functions.py +++ b/src/opperai/functions.py @@ -35,8 +35,8 @@ def create( model: Optional[Union[models.TModel, models.TModelTypedDict]] = None, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -85,7 +85,7 @@ def create( output_schema=output_schema, model=utils.get_pydantic_model(model, Optional[models.TModel]), configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -105,6 +105,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateFunctionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -175,8 +176,8 @@ async def create_async( model: Optional[Union[models.TModel, models.TModelTypedDict]] = None, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -225,7 +226,7 @@ async def create_async( output_schema=output_schema, model=utils.get_pydantic_model(model, Optional[models.TModel]), configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -245,6 +246,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateFunctionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -359,6 +361,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -472,6 +475,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -576,6 +580,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -678,6 +683,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -760,7 +766,31 @@ def update( :param instructions: The instructions for the function, this is the prompt that will be sent to the model to complete the task. Recommended to be concise and to the point :param input_schema: Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. :param output_schema: Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param model: Optionally provide a model to use for completing the task. If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu` To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation. An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below. ```json { \"model\": \"openai/gpt-4o-mini\", # the model name \"options\": { \"temperature\": 0.5 # the options for the model } } ``` To specify a fallback model, use a list of models. The models will then be tried in order. The second model will be used if the first model is not available, and so on. ```json [ \"openai/gpt-4o-mini\", # first model to try \"openai/gpt-4.1-nano\", # second model to try ] ``` + :param model: Optionally provide a model to use for completing the task. + If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu` + + To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation. + An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below. + + ```json + { + \"model\": \"openai/gpt-4o-mini\", # the model name + \"options\": { + \"temperature\": 0.5 # the options for the model + } + } + ``` + + To specify a fallback model, use a list of models. The models will then be tried in order. + The second model will be used if the first model is not available, and so on. + + ```json + [ + \"openai/gpt-4o-mini\", # first model to try + \"openai/gpt-4.1-nano\", # second model to try + ] + ``` + :param configuration: Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -817,6 +847,7 @@ def update( "json", models.UpdateFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -899,7 +930,31 @@ async def update_async( :param instructions: The instructions for the function, this is the prompt that will be sent to the model to complete the task. Recommended to be concise and to the point :param input_schema: Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. :param output_schema: Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param model: Optionally provide a model to use for completing the task. If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu` To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation. An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below. ```json { \"model\": \"openai/gpt-4o-mini\", # the model name \"options\": { \"temperature\": 0.5 # the options for the model } } ``` To specify a fallback model, use a list of models. The models will then be tried in order. The second model will be used if the first model is not available, and so on. ```json [ \"openai/gpt-4o-mini\", # first model to try \"openai/gpt-4.1-nano\", # second model to try ] ``` + :param model: Optionally provide a model to use for completing the task. + If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu` + + To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation. + An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below. + + ```json + { + \"model\": \"openai/gpt-4o-mini\", # the model name + \"options\": { + \"temperature\": 0.5 # the options for the model + } + } + ``` + + To specify a fallback model, use a list of models. The models will then be tried in order. + The second model will be used if the first model is not available, and so on. + + ```json + [ + \"openai/gpt-4o-mini\", # first model to try + \"openai/gpt-4.1-nano\", # second model to try + ] + ``` + :param configuration: Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -956,6 +1011,7 @@ async def update_async( "json", models.UpdateFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1058,6 +1114,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1160,6 +1217,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1262,6 +1320,7 @@ def get_by_name( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1364,6 +1423,7 @@ async def get_by_name_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1469,6 +1529,7 @@ def get_by_revision( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1574,6 +1635,7 @@ async def get_by_revision_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1701,6 +1763,7 @@ def call( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1830,6 +1893,7 @@ async def call_async( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1973,6 +2037,7 @@ def stream( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2132,6 +2197,7 @@ async def stream_async( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2280,6 +2346,7 @@ def call_revision( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2412,6 +2479,7 @@ async def call_revision_async( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2560,6 +2628,7 @@ def stream_revision( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2724,6 +2793,7 @@ async def stream_revision_async( "json", models.AppAPIPublicV2FunctionsCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/knowledge.py b/src/opperai/knowledge.py index e5309e7..38dfb74 100644 --- a/src/opperai/knowledge.py +++ b/src/opperai/knowledge.py @@ -62,6 +62,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateKnowledgeBaseRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -170,6 +171,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateKnowledgeBaseRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -275,6 +277,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -382,6 +385,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -486,6 +490,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -588,6 +593,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -690,6 +696,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -792,6 +799,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -896,6 +904,7 @@ def get_by_name( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1000,6 +1009,7 @@ async def get_by_name_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1110,6 +1120,7 @@ def get_upload_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1220,6 +1231,7 @@ async def get_upload_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1289,6 +1301,7 @@ def register_file_upload( models.TextProcessingConfigurationTypedDict, ] ] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1308,6 +1321,7 @@ def register_file_upload( :param file_id: The id of the file to register :param content_type: The content type of the file to register :param configuration: The configuration for the file to register + :param metadata: Optional metadata to attach to the file :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1334,6 +1348,7 @@ def register_file_upload( configuration, OptionalNullable[models.TextProcessingConfiguration], ), + metadata=metadata, ), ) ) @@ -1358,6 +1373,7 @@ def register_file_upload( "json", models.RegisterFileUploadRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1427,6 +1443,7 @@ async def register_file_upload_async( models.TextProcessingConfigurationTypedDict, ] ] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1446,6 +1463,7 @@ async def register_file_upload_async( :param file_id: The id of the file to register :param content_type: The content type of the file to register :param configuration: The configuration for the file to register + :param metadata: Optional metadata to attach to the file :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1472,6 +1490,7 @@ async def register_file_upload_async( configuration, OptionalNullable[models.TextProcessingConfiguration], ), + metadata=metadata, ), ) ) @@ -1496,6 +1515,7 @@ async def register_file_upload_async( "json", models.RegisterFileUploadRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1601,6 +1621,7 @@ def delete_file( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1706,6 +1727,7 @@ async def delete_file_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1762,6 +1784,440 @@ async def delete_file_async( raise errors.APIError("Unexpected response received", http_res) + def get_file_download_url( + self, + *, + knowledge_base_id: str, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileDownloadURLResponse: + r"""Get File Download Url + + Get a presigned URL to download a file from a knowledge base + + :param knowledge_base_id: The id of the knowledge base + :param file_id: The id of the file + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest( + knowledge_base_id=knowledge_base_id, + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/knowledge/{knowledge_base_id}/files/{file_id}/download_url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_file_download_url_knowledge__knowledge_base_id__files__file_id__download_url_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileDownloadURLResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + + async def get_file_download_url_async( + self, + *, + knowledge_base_id: str, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileDownloadURLResponse: + r"""Get File Download Url + + Get a presigned URL to download a file from a knowledge base + + :param knowledge_base_id: The id of the knowledge base + :param file_id: The id of the file + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest( + knowledge_base_id=knowledge_base_id, + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/knowledge/{knowledge_base_id}/files/{file_id}/download_url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_file_download_url_knowledge__knowledge_base_id__files__file_id__download_url_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileDownloadURLResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + + def list_files( + self, + *, + knowledge_base_id: str, + offset: Optional[int] = 0, + limit: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.PaginatedResponseListFilesResponse: + r"""List Files + + List all files in a knowledge base + + :param knowledge_base_id: The id of the knowledge base to list files from + :param offset: The offset to start the list from + :param limit: The number of files to return + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest( + knowledge_base_id=knowledge_base_id, + offset=offset, + limit=limit, + ) + + req = self._build_request( + method="GET", + path="/knowledge/{knowledge_base_id}/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_files_knowledge__knowledge_base_id__files_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.PaginatedResponseListFilesResponse, http_res + ) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + + async def list_files_async( + self, + *, + knowledge_base_id: str, + offset: Optional[int] = 0, + limit: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.PaginatedResponseListFilesResponse: + r"""List Files + + List all files in a knowledge base + + :param knowledge_base_id: The id of the knowledge base to list files from + :param offset: The offset to start the list from + :param limit: The number of files to return + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest( + knowledge_base_id=knowledge_base_id, + offset=offset, + limit=limit, + ) + + req = self._build_request_async( + method="GET", + path="/knowledge/{knowledge_base_id}/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_files_knowledge__knowledge_base_id__files_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.PaginatedResponseListFilesResponse, http_res + ) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + def query( self, *, @@ -1839,6 +2295,7 @@ def query( "json", models.QueryKnowledgeBaseRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1974,6 +2431,7 @@ async def query_async( "json", models.QueryKnowledgeBaseRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2094,6 +2552,7 @@ def delete_documents( "json", OptionalNullable[models.DeleteKnowledgeBaseRequest], ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2212,6 +2671,7 @@ async def delete_documents_async( "json", OptionalNullable[models.DeleteKnowledgeBaseRequest], ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2338,6 +2798,7 @@ def add( get_serialized_body=lambda: utils.serialize_request_body( request.add_request, False, False, "json", models.AddRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2464,6 +2925,7 @@ async def add_async( get_serialized_body=lambda: utils.serialize_request_body( request.add_request, False, False, "json", models.AddRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/language_models.py b/src/opperai/language_models.py index a2cd71f..3160408 100644 --- a/src/opperai/language_models.py +++ b/src/opperai/language_models.py @@ -61,6 +61,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -170,6 +171,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -286,6 +288,7 @@ def register_custom( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.RegisterCustomModelRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -403,6 +406,7 @@ async def register_custom_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.RegisterCustomModelRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -511,6 +515,7 @@ def list_custom( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -618,6 +623,7 @@ async def list_custom_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -722,6 +728,7 @@ def get_custom( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -824,6 +831,7 @@ async def get_custom_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -947,6 +955,7 @@ def update_custom( "json", models.UpdateCustomModelRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1070,6 +1079,7 @@ async def update_custom_async( "json", models.UpdateCustomModelRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1172,6 +1182,7 @@ def delete_custom( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1274,6 +1285,7 @@ async def delete_custom_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1376,6 +1388,7 @@ def get_custom_by_name( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1478,6 +1491,7 @@ async def get_custom_by_name_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1593,6 +1607,7 @@ def create_alias( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateModelAliasRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1711,6 +1726,7 @@ async def create_alias_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateModelAliasRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1819,6 +1835,7 @@ def list_aliases( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1926,6 +1943,7 @@ async def list_aliases_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2030,6 +2048,7 @@ def get_alias( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2132,6 +2151,7 @@ async def get_alias_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2252,6 +2272,7 @@ def update_alias( "json", models.UpdateModelAliasRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2372,6 +2393,7 @@ async def update_alias_async( "json", models.UpdateModelAliasRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2474,6 +2496,7 @@ def delete_alias( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2576,6 +2599,7 @@ async def delete_alias_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2678,6 +2702,7 @@ def get_alias_by_name( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2780,6 +2805,7 @@ async def get_alias_by_name_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/models/__init__.py b/src/opperai/models/__init__.py index 6a32d9f..8438010 100644 --- a/src/opperai/models/__init__.py +++ b/src/opperai/models/__init__.py @@ -345,19 +345,13 @@ DeleteKnowledgeBaseResponse, DeleteKnowledgeBaseResponseTypedDict, ) - from .evaluationconfig import ( - EvaluationConfig, - EvaluationConfigTypedDict, - ScorersEnum1, - ScorersEnum2, - ScorersUnion1, - ScorersUnion1TypedDict, - ScorersUnion2, - ScorersUnion2TypedDict, - ) from .example import Example, ExampleTypedDict from .examplein import ExampleIn, ExampleInTypedDict from .file import File, FileTypedDict + from .filedownloadurlresponse import ( + FileDownloadURLResponse, + FileDownloadURLResponseTypedDict, + ) from .filefile import FileFile, FileFileTypedDict from .filter_ import ( Filter, @@ -380,9 +374,13 @@ ) from .functioncall_input import FunctionCallInput, FunctionCallInputTypedDict from .functioncall_output import FunctionCallOutput, FunctionCallOutputTypedDict - from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, + from .functioncallconfiguration_input import ( + FunctionCallConfigurationInput, + FunctionCallConfigurationInputTypedDict, + ) + from .functioncallconfiguration_output import ( + FunctionCallConfigurationOutput, + FunctionCallConfigurationOutputTypedDict, ) from .functiondefinition import FunctionDefinition, FunctionDefinitionTypedDict from .get_custom_model_by_name_models_custom_by_name_name_getop import ( @@ -397,6 +395,10 @@ GetDatasetEntryDatasetsDatasetIDEntriesEntryIDGetRequest, GetDatasetEntryDatasetsDatasetIDEntriesEntryIDGetRequestTypedDict, ) + from .get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop import ( + GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest, + GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequestTypedDict, + ) from .get_function_by_name_functions_by_name_name_getop import ( GetFunctionByNameFunctionsByNameNameGetRequest, GetFunctionByNameFunctionsByNameNameGetRequestTypedDict, @@ -485,6 +487,10 @@ ListDatasetEntriesDatasetsDatasetIDEntriesGetRequest, ListDatasetEntriesDatasetsDatasetIDEntriesGetRequestTypedDict, ) + from .list_files_knowledge_knowledge_base_id_files_getop import ( + ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest, + ListFilesKnowledgeKnowledgeBaseIDFilesGetRequestTypedDict, + ) from .list_function_revisions_functions_function_id_revisions_getop import ( ListFunctionRevisionsFunctionsFunctionIDRevisionsGetRequest, ListFunctionRevisionsFunctionsFunctionIDRevisionsGetRequestTypedDict, @@ -517,6 +523,7 @@ ListCustomModelsResponseItem, ListCustomModelsResponseItemTypedDict, ) + from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict from .listfunctionrevisionresponse import ( ListFunctionRevisionResponse, ListFunctionRevisionResponseTypedDict, @@ -583,6 +590,10 @@ PaginatedResponseListCustomModelsResponseItem, PaginatedResponseListCustomModelsResponseItemTypedDict, ) + from .paginatedresponse_listfilesresponse_ import ( + PaginatedResponseListFilesResponse, + PaginatedResponseListFilesResponseTypedDict, + ) from .paginatedresponse_listfunctionrevisionresponse_ import ( PaginatedResponseListFunctionRevisionResponse, PaginatedResponseListFunctionRevisionResponseTypedDict, @@ -1006,13 +1017,13 @@ "Delta", "DeltaTypedDict", "Detail", - "EvaluationConfig", - "EvaluationConfigTypedDict", "Example", "ExampleIn", "ExampleInTypedDict", "ExampleTypedDict", "File", + "FileDownloadURLResponse", + "FileDownloadURLResponseTypedDict", "FileFile", "FileFileTypedDict", "FileTypedDict", @@ -1021,8 +1032,10 @@ "FinishReason", "Format", "FormatTypedDict", - "FunctionCallConfiguration", - "FunctionCallConfigurationTypedDict", + "FunctionCallConfigurationInput", + "FunctionCallConfigurationInputTypedDict", + "FunctionCallConfigurationOutput", + "FunctionCallConfigurationOutputTypedDict", "FunctionCallInput", "FunctionCallInputTypedDict", "FunctionCallOutput", @@ -1047,6 +1060,8 @@ "GetDatasetEntryDatasetsDatasetIDEntriesEntryIDGetRequestTypedDict", "GetDatasetEntryResponse", "GetDatasetEntryResponseTypedDict", + "GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest", + "GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequestTypedDict", "GetFunctionByNameFunctionsByNameNameGetRequest", "GetFunctionByNameFunctionsByNameNameGetRequestTypedDict", "GetFunctionByRevisionFunctionsFunctionIDRevisionsRevisionIDGetRequest", @@ -1103,6 +1118,10 @@ "ListCustomModelsResponseItemTypedDict", "ListDatasetEntriesDatasetsDatasetIDEntriesGetRequest", "ListDatasetEntriesDatasetsDatasetIDEntriesGetRequestTypedDict", + "ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest", + "ListFilesKnowledgeKnowledgeBaseIDFilesGetRequestTypedDict", + "ListFilesResponse", + "ListFilesResponseTypedDict", "ListFunctionRevisionResponse", "ListFunctionRevisionResponseTypedDict", "ListFunctionRevisionsFunctionsFunctionIDRevisionsGetRequest", @@ -1155,6 +1174,8 @@ "PaginatedResponseGetDatasetEntriesResponseTypedDict", "PaginatedResponseListCustomModelsResponseItem", "PaginatedResponseListCustomModelsResponseItemTypedDict", + "PaginatedResponseListFilesResponse", + "PaginatedResponseListFilesResponseTypedDict", "PaginatedResponseListFunctionRevisionResponse", "PaginatedResponseListFunctionRevisionResponseTypedDict", "PaginatedResponseListFunctionsResponseItem", @@ -1215,12 +1236,6 @@ "SaveToDatasetResponseTypedDict", "SaveToDatasetSpansSpanIDSaveExamplesPostRequest", "SaveToDatasetSpansSpanIDSaveExamplesPostRequestTypedDict", - "ScorersEnum1", - "ScorersEnum2", - "ScorersUnion1", - "ScorersUnion1TypedDict", - "ScorersUnion2", - "ScorersUnion2TypedDict", "SearchContextSize", "Security", "SecurityTypedDict", @@ -1533,20 +1548,14 @@ "DeleteKnowledgeBaseRequestTypedDict": ".deleteknowledgebaserequest", "DeleteKnowledgeBaseResponse": ".deleteknowledgebaseresponse", "DeleteKnowledgeBaseResponseTypedDict": ".deleteknowledgebaseresponse", - "EvaluationConfig": ".evaluationconfig", - "EvaluationConfigTypedDict": ".evaluationconfig", - "ScorersEnum1": ".evaluationconfig", - "ScorersEnum2": ".evaluationconfig", - "ScorersUnion1": ".evaluationconfig", - "ScorersUnion1TypedDict": ".evaluationconfig", - "ScorersUnion2": ".evaluationconfig", - "ScorersUnion2TypedDict": ".evaluationconfig", "Example": ".example", "ExampleTypedDict": ".example", "ExampleIn": ".examplein", "ExampleInTypedDict": ".examplein", "File": ".file", "FileTypedDict": ".file", + "FileDownloadURLResponse": ".filedownloadurlresponse", + "FileDownloadURLResponseTypedDict": ".filedownloadurlresponse", "FileFile": ".filefile", "FileFileTypedDict": ".filefile", "Filter": ".filter_", @@ -1569,8 +1578,10 @@ "FunctionCallInputTypedDict": ".functioncall_input", "FunctionCallOutput": ".functioncall_output", "FunctionCallOutputTypedDict": ".functioncall_output", - "FunctionCallConfiguration": ".functioncallconfiguration", - "FunctionCallConfigurationTypedDict": ".functioncallconfiguration", + "FunctionCallConfigurationInput": ".functioncallconfiguration_input", + "FunctionCallConfigurationInputTypedDict": ".functioncallconfiguration_input", + "FunctionCallConfigurationOutput": ".functioncallconfiguration_output", + "FunctionCallConfigurationOutputTypedDict": ".functioncallconfiguration_output", "FunctionDefinition": ".functiondefinition", "FunctionDefinitionTypedDict": ".functiondefinition", "GetCustomModelByNameModelsCustomByNameNameGetRequest": ".get_custom_model_by_name_models_custom_by_name_name_getop", @@ -1579,6 +1590,8 @@ "GetCustomModelModelsCustomModelIDGetRequestTypedDict": ".get_custom_model_models_custom_model_id_getop", "GetDatasetEntryDatasetsDatasetIDEntriesEntryIDGetRequest": ".get_dataset_entry_datasets_dataset_id_entries_entry_id_getop", "GetDatasetEntryDatasetsDatasetIDEntriesEntryIDGetRequestTypedDict": ".get_dataset_entry_datasets_dataset_id_entries_entry_id_getop", + "GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest": ".get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop", + "GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequestTypedDict": ".get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop", "GetFunctionByNameFunctionsByNameNameGetRequest": ".get_function_by_name_functions_by_name_name_getop", "GetFunctionByNameFunctionsByNameNameGetRequestTypedDict": ".get_function_by_name_functions_by_name_name_getop", "GetFunctionByRevisionFunctionsFunctionIDRevisionsRevisionIDGetRequest": ".get_function_by_revision_functions_function_id_revisions_revision_id_getop", @@ -1636,6 +1649,8 @@ "ListCustomModelsModelsCustomGetRequestTypedDict": ".list_custom_models_models_custom_getop", "ListDatasetEntriesDatasetsDatasetIDEntriesGetRequest": ".list_dataset_entries_datasets_dataset_id_entries_getop", "ListDatasetEntriesDatasetsDatasetIDEntriesGetRequestTypedDict": ".list_dataset_entries_datasets_dataset_id_entries_getop", + "ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest": ".list_files_knowledge_knowledge_base_id_files_getop", + "ListFilesKnowledgeKnowledgeBaseIDFilesGetRequestTypedDict": ".list_files_knowledge_knowledge_base_id_files_getop", "ListFunctionRevisionsFunctionsFunctionIDRevisionsGetRequest": ".list_function_revisions_functions_function_id_revisions_getop", "ListFunctionRevisionsFunctionsFunctionIDRevisionsGetRequestTypedDict": ".list_function_revisions_functions_function_id_revisions_getop", "ListFunctionsFunctionsGetRequest": ".list_functions_functions_getop", @@ -1652,6 +1667,8 @@ "ListTracesTracesGetRequestTypedDict": ".list_traces_traces_getop", "ListCustomModelsResponseItem": ".listcustommodelsresponseitem", "ListCustomModelsResponseItemTypedDict": ".listcustommodelsresponseitem", + "ListFilesResponse": ".listfilesresponse", + "ListFilesResponseTypedDict": ".listfilesresponse", "ListFunctionRevisionResponse": ".listfunctionrevisionresponse", "ListFunctionRevisionResponseTypedDict": ".listfunctionrevisionresponse", "ListFunctionsResponseItem": ".listfunctionsresponseitem", @@ -1691,6 +1708,8 @@ "PaginatedResponseGetDatasetEntriesResponseTypedDict": ".paginatedresponse_getdatasetentriesresponse_", "PaginatedResponseListCustomModelsResponseItem": ".paginatedresponse_listcustommodelsresponseitem_", "PaginatedResponseListCustomModelsResponseItemTypedDict": ".paginatedresponse_listcustommodelsresponseitem_", + "PaginatedResponseListFilesResponse": ".paginatedresponse_listfilesresponse_", + "PaginatedResponseListFilesResponseTypedDict": ".paginatedresponse_listfilesresponse_", "PaginatedResponseListFunctionRevisionResponse": ".paginatedresponse_listfunctionrevisionresponse_", "PaginatedResponseListFunctionRevisionResponseTypedDict": ".paginatedresponse_listfunctionrevisionresponse_", "PaginatedResponseListFunctionsResponseItem": ".paginatedresponse_listfunctionsresponseitem_", diff --git a/src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py b/src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py index 9d48f39..b9ca70e 100644 --- a/src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py +++ b/src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py @@ -2,9 +2,9 @@ from __future__ import annotations from .example import Example, ExampleTypedDict -from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, +from .functioncallconfiguration_input import ( + FunctionCallConfigurationInput, + FunctionCallConfigurationInputTypedDict, ) from .tmodel import TModel, TModelTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -34,7 +34,7 @@ class AppAPIPublicV2FunctionCallCallFunctionRequestTypedDict(TypedDict): r"""Optionally provide the parent span ID to add to the call event. This will automatically tie the call to a parent span in the UI.""" tags: NotRequired[Nullable[Dict[str, str]]] r"""Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension.""" - configuration: NotRequired[Nullable[FunctionCallConfigurationTypedDict]] + configuration: NotRequired[Nullable[FunctionCallConfigurationInputTypedDict]] r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" @@ -68,7 +68,7 @@ class AppAPIPublicV2FunctionCallCallFunctionRequest(BaseModel): tags: OptionalNullable[Dict[str, str]] = UNSET r"""Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension.""" - configuration: OptionalNullable[FunctionCallConfiguration] = UNSET + configuration: OptionalNullable[FunctionCallConfigurationInput] = UNSET r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" @model_serializer(mode="wrap") diff --git a/src/opperai/models/chatcompletionassistantmessageparam.py b/src/opperai/models/chatcompletionassistantmessageparam.py index 9fe530c..1241759 100644 --- a/src/opperai/models/chatcompletionassistantmessageparam.py +++ b/src/opperai/models/chatcompletionassistantmessageparam.py @@ -20,9 +20,9 @@ ) from .functioncall_input import FunctionCallInput, FunctionCallInputTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import validate_const +from opperai.utils import get_discriminator, validate_const import pydantic -from pydantic import model_serializer +from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -37,10 +37,13 @@ ) -ChatCompletionAssistantMessageParamContent1 = TypeAliasType( - "ChatCompletionAssistantMessageParamContent1", - Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam], -) +ChatCompletionAssistantMessageParamContent1 = Annotated[ + Union[ + Annotated[ChatCompletionContentPartTextParam, Tag("text")], + Annotated[ChatCompletionContentPartRefusalParam, Tag("refusal")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] ChatCompletionAssistantMessageParamContent2TypedDict = TypeAliasType( @@ -64,13 +67,13 @@ ) -ChatCompletionAssistantMessageParamToolCall = TypeAliasType( - "ChatCompletionAssistantMessageParamToolCall", +ChatCompletionAssistantMessageParamToolCall = Annotated[ Union[ - ChatCompletionMessageFunctionToolCallParam, - ChatCompletionMessageCustomToolCallParam, + Annotated[ChatCompletionMessageFunctionToolCallParam, Tag("function")], + Annotated[ChatCompletionMessageCustomToolCallParam, Tag("custom")], ], -) + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionAssistantMessageParamTypedDict(TypedDict): diff --git a/src/opperai/models/chatcompletionmessage.py b/src/opperai/models/chatcompletionmessage.py index 5d76a5a..5c68540 100644 --- a/src/opperai/models/chatcompletionmessage.py +++ b/src/opperai/models/chatcompletionmessage.py @@ -13,9 +13,9 @@ ) from .functioncall_output import FunctionCallOutput, FunctionCallOutputTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import validate_const +from opperai.utils import get_discriminator, validate_const import pydantic -from pydantic import ConfigDict, model_serializer +from pydantic import ConfigDict, Discriminator, Tag, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -30,10 +30,13 @@ ) -ChatCompletionMessageToolCall = TypeAliasType( - "ChatCompletionMessageToolCall", - Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], -) +ChatCompletionMessageToolCall = Annotated[ + Union[ + Annotated[ChatCompletionMessageFunctionToolCall, Tag("function")], + Annotated[ChatCompletionMessageCustomToolCall, Tag("custom")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionMessageTypedDict(TypedDict): diff --git a/src/opperai/models/chatcompletionnonstreaming.py b/src/opperai/models/chatcompletionnonstreaming.py index b5eeeaf..0cdd9c1 100644 --- a/src/opperai/models/chatcompletionnonstreaming.py +++ b/src/opperai/models/chatcompletionnonstreaming.py @@ -78,9 +78,9 @@ from .websearchoptions import WebSearchOptions, WebSearchOptionsTypedDict from enum import Enum from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import validate_const +from opperai.utils import get_discriminator, validate_const import pydantic -from pydantic import model_serializer +from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -99,17 +99,17 @@ ) -ChatCompletionNonStreamingMessage = TypeAliasType( - "ChatCompletionNonStreamingMessage", +ChatCompletionNonStreamingMessage = Annotated[ Union[ - ChatCompletionDeveloperMessageParam, - ChatCompletionSystemMessageParam, - ChatCompletionUserMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionFunctionMessageParam, - ChatCompletionAssistantMessageParam, + Annotated[ChatCompletionDeveloperMessageParam, Tag("developer")], + Annotated[ChatCompletionSystemMessageParam, Tag("system")], + Annotated[ChatCompletionUserMessageParam, Tag("user")], + Annotated[ChatCompletionAssistantMessageParam, Tag("assistant")], + Annotated[ChatCompletionToolMessageParam, Tag("tool")], + Annotated[ChatCompletionFunctionMessageParam, Tag("function")], ], -) + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] class ChatCompletionNonStreamingFunctionCallEnum(str, Enum): @@ -157,10 +157,14 @@ class ChatCompletionNonStreamingReasoningEffort(str, Enum): ) -ChatCompletionNonStreamingResponseFormat = TypeAliasType( - "ChatCompletionNonStreamingResponseFormat", - Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema], -) +ChatCompletionNonStreamingResponseFormat = Annotated[ + Union[ + Annotated[ResponseFormatText, Tag("text")], + Annotated[ResponseFormatJSONSchema, Tag("json_schema")], + Annotated[ResponseFormatJSONObject, Tag("json_object")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionNonStreamingServiceTier(str, Enum): @@ -217,10 +221,13 @@ class ChatCompletionNonStreamingToolChoiceEnum(str, Enum): ) -ChatCompletionNonStreamingTool = TypeAliasType( - "ChatCompletionNonStreamingTool", - Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam], -) +ChatCompletionNonStreamingTool = Annotated[ + Union[ + Annotated[ChatCompletionFunctionToolParam, Tag("function")], + Annotated[ChatCompletionCustomToolParam, Tag("custom")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionNonStreamingVerbosity(str, Enum): diff --git a/src/opperai/models/chatcompletionstreaming.py b/src/opperai/models/chatcompletionstreaming.py index 0869d2c..12deecf 100644 --- a/src/opperai/models/chatcompletionstreaming.py +++ b/src/opperai/models/chatcompletionstreaming.py @@ -78,9 +78,9 @@ from .websearchoptions import WebSearchOptions, WebSearchOptionsTypedDict from enum import Enum from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import validate_const +from opperai.utils import get_discriminator, validate_const import pydantic -from pydantic import model_serializer +from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -99,17 +99,17 @@ ) -ChatCompletionStreamingMessage = TypeAliasType( - "ChatCompletionStreamingMessage", +ChatCompletionStreamingMessage = Annotated[ Union[ - ChatCompletionDeveloperMessageParam, - ChatCompletionSystemMessageParam, - ChatCompletionUserMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionFunctionMessageParam, - ChatCompletionAssistantMessageParam, + Annotated[ChatCompletionDeveloperMessageParam, Tag("developer")], + Annotated[ChatCompletionSystemMessageParam, Tag("system")], + Annotated[ChatCompletionUserMessageParam, Tag("user")], + Annotated[ChatCompletionAssistantMessageParam, Tag("assistant")], + Annotated[ChatCompletionToolMessageParam, Tag("tool")], + Annotated[ChatCompletionFunctionMessageParam, Tag("function")], ], -) + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] class ChatCompletionStreamingFunctionCallEnum(str, Enum): @@ -156,10 +156,14 @@ class ChatCompletionStreamingReasoningEffort(str, Enum): ) -ChatCompletionStreamingResponseFormat = TypeAliasType( - "ChatCompletionStreamingResponseFormat", - Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema], -) +ChatCompletionStreamingResponseFormat = Annotated[ + Union[ + Annotated[ResponseFormatText, Tag("text")], + Annotated[ResponseFormatJSONSchema, Tag("json_schema")], + Annotated[ResponseFormatJSONObject, Tag("json_object")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionStreamingServiceTier(str, Enum): @@ -216,10 +220,13 @@ class ChatCompletionStreamingToolChoiceEnum(str, Enum): ) -ChatCompletionStreamingTool = TypeAliasType( - "ChatCompletionStreamingTool", - Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam], -) +ChatCompletionStreamingTool = Annotated[ + Union[ + Annotated[ChatCompletionFunctionToolParam, Tag("function")], + Annotated[ChatCompletionCustomToolParam, Tag("custom")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ChatCompletionStreamingVerbosity(str, Enum): diff --git a/src/opperai/models/chatcompletionusermessageparam.py b/src/opperai/models/chatcompletionusermessageparam.py index 5415fee..e3121ee 100644 --- a/src/opperai/models/chatcompletionusermessageparam.py +++ b/src/opperai/models/chatcompletionusermessageparam.py @@ -15,8 +15,9 @@ ) from .file import File, FileTypedDict from opperai.types import BaseModel -from opperai.utils import validate_const +from opperai.utils import get_discriminator, validate_const import pydantic +from pydantic import Discriminator, Tag from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -33,15 +34,15 @@ ) -ChatCompletionUserMessageParamContent1 = TypeAliasType( - "ChatCompletionUserMessageParamContent1", +ChatCompletionUserMessageParamContent1 = Annotated[ Union[ - ChatCompletionContentPartTextParam, - ChatCompletionContentPartImageParam, - ChatCompletionContentPartInputAudioParam, - File, + Annotated[ChatCompletionContentPartTextParam, Tag("text")], + Annotated[ChatCompletionContentPartImageParam, Tag("image_url")], + Annotated[ChatCompletionContentPartInputAudioParam, Tag("input_audio")], + Annotated[File, Tag("file")], ], -) + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] ChatCompletionUserMessageParamContent2TypedDict = TypeAliasType( diff --git a/src/opperai/models/createfunctionrequest.py b/src/opperai/models/createfunctionrequest.py index 83d8052..14a38ae 100644 --- a/src/opperai/models/createfunctionrequest.py +++ b/src/opperai/models/createfunctionrequest.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, +from .functioncallconfiguration_input import ( + FunctionCallConfigurationInput, + FunctionCallConfigurationInputTypedDict, ) from .tmodel import TModel, TModelTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -24,7 +24,7 @@ class CreateFunctionRequestTypedDict(TypedDict): output_schema: NotRequired[Nullable[Dict[str, Any]]] r"""Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references.""" model: NotRequired[TModelTypedDict] - configuration: NotRequired[Nullable[FunctionCallConfigurationTypedDict]] + configuration: NotRequired[Nullable[FunctionCallConfigurationInputTypedDict]] r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" @@ -46,7 +46,7 @@ class CreateFunctionRequest(BaseModel): model: Optional[TModel] = None - configuration: OptionalNullable[FunctionCallConfiguration] = UNSET + configuration: OptionalNullable[FunctionCallConfigurationInput] = UNSET r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" @model_serializer(mode="wrap") diff --git a/src/opperai/models/createfunctionresponse.py b/src/opperai/models/createfunctionresponse.py index e0218a0..0c25ce3 100644 --- a/src/opperai/models/createfunctionresponse.py +++ b/src/opperai/models/createfunctionresponse.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, +from .functioncallconfiguration_output import ( + FunctionCallConfigurationOutput, + FunctionCallConfigurationOutputTypedDict, ) from .tmodel import TModel, TModelTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -26,12 +26,14 @@ class CreateFunctionResponseTypedDict(TypedDict): output_schema: NotRequired[Nullable[Dict[str, Any]]] r"""Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references.""" model: NotRequired[TModelTypedDict] - configuration: NotRequired[Nullable[FunctionCallConfigurationTypedDict]] + configuration: NotRequired[Nullable[FunctionCallConfigurationOutputTypedDict]] r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: NotRequired[Nullable[str]] r"""The ID of the dataset associated with the function""" revision_id: NotRequired[Nullable[str]] r"""The ID of the latest revision of the function""" + observer_enabled: NotRequired[bool] + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" class CreateFunctionResponse(BaseModel): @@ -55,7 +57,7 @@ class CreateFunctionResponse(BaseModel): model: Optional[TModel] = None - configuration: OptionalNullable[FunctionCallConfiguration] = UNSET + configuration: OptionalNullable[FunctionCallConfigurationOutput] = UNSET r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: OptionalNullable[str] = UNSET @@ -64,6 +66,9 @@ class CreateFunctionResponse(BaseModel): revision_id: OptionalNullable[str] = UNSET r"""The ID of the latest revision of the function""" + observer_enabled: Optional[bool] = True + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -74,6 +79,7 @@ def serialize_model(self, handler): "configuration", "dataset_id", "revision_id", + "observer_enabled", ] nullable_fields = [ "description", diff --git a/src/opperai/models/evaluationconfig.py b/src/opperai/models/evaluationconfig.py deleted file mode 100644 index 842664d..0000000 --- a/src/opperai/models/evaluationconfig.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from enum import Enum -from opperai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -class ScorersEnum2(str, Enum): - BASE = "base" - RUBRICS = "rubrics" - TOXICITY = "toxicity" - HALLUCINATION = "hallucination" - QA = "qa" - AGENT_TOOL_SELECTION = "agent_tool_selection" - REGEX = "regex" - MAX_LENGTH = "max_length" - - -ScorersUnion1TypedDict = TypeAliasType( - "ScorersUnion1TypedDict", Union[ScorersEnum2, Dict[str, Any]] -) - - -ScorersUnion1 = TypeAliasType("ScorersUnion1", Union[ScorersEnum2, Dict[str, Any]]) - - -class ScorersEnum1(str, Enum): - BASE = "base" - RUBRICS = "rubrics" - TOXICITY = "toxicity" - HALLUCINATION = "hallucination" - QA = "qa" - AGENT_TOOL_SELECTION = "agent_tool_selection" - REGEX = "regex" - MAX_LENGTH = "max_length" - - -ScorersUnion2TypedDict = TypeAliasType( - "ScorersUnion2TypedDict", - Union[ScorersEnum1, Dict[str, Any], List[ScorersUnion1TypedDict]], -) -r"""Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them.""" - - -ScorersUnion2 = TypeAliasType( - "ScorersUnion2", Union[ScorersEnum1, Dict[str, Any], List[ScorersUnion1]] -) -r"""Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them.""" - - -class EvaluationConfigTypedDict(TypedDict): - r"""Configuration for evaluation features stored under 'beta.evaluation'. - - - enabled: master switch - - scorers: which evaluators to run. Accepts: - - string: \"base\" | \"rubrics\" | \"toxicity\" | \"hallucination\" | \"qa\" - - dict: { \"rubrics\": RubricDefinition-like payload } - - list[str | dict] - \"base\" is the default scorer. - """ - - enabled: NotRequired[bool] - r"""Enable evaluation features (base or rubrics).""" - scorers: NotRequired[ScorersUnion2TypedDict] - r"""Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them.""" - - -class EvaluationConfig(BaseModel): - r"""Configuration for evaluation features stored under 'beta.evaluation'. - - - enabled: master switch - - scorers: which evaluators to run. Accepts: - - string: \"base\" | \"rubrics\" | \"toxicity\" | \"hallucination\" | \"qa\" - - dict: { \"rubrics\": RubricDefinition-like payload } - - list[str | dict] - \"base\" is the default scorer. - """ - - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - enabled: Optional[bool] = True - r"""Enable evaluation features (base or rubrics).""" - - scorers: Optional[ScorersUnion2] = None - r"""Evaluation scorers to run: 'base', 'rubrics', 'toxicity', 'hallucination', 'qa', 'agent_tool_selection', 'regex', 'max_length', or a list of them.""" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/opperai/models/filedownloadurlresponse.py b/src/opperai/models/filedownloadurlresponse.py new file mode 100644 index 0000000..18aa248 --- /dev/null +++ b/src/opperai/models/filedownloadurlresponse.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from typing_extensions import TypedDict + + +class FileDownloadURLResponseTypedDict(TypedDict): + url: str + r"""Presigned URL to download the file""" + expires_in: int + r"""Number of seconds until the URL expires""" + + +class FileDownloadURLResponse(BaseModel): + url: str + r"""Presigned URL to download the file""" + + expires_in: int + r"""Number of seconds until the URL expires""" diff --git a/src/opperai/models/functioncallconfiguration_input.py b/src/opperai/models/functioncallconfiguration_input.py new file mode 100644 index 0000000..c3d10c1 --- /dev/null +++ b/src/opperai/models/functioncallconfiguration_input.py @@ -0,0 +1,93 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Any, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FunctionCallConfigurationInputTypedDict(TypedDict): + invocation_few_shot_count: NotRequired[int] + r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + beta_evaluation: NotRequired[Nullable[Any]] + r"""[DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled.""" + invocation_structured_generation_max_attempts: NotRequired[int] + r"""The maximum number of attempts to make when generating a response matching the output schema if provided.""" + invocation_cache_ttl: NotRequired[int] + r"""The time to live for the cache in seconds. If 0, the cache is disabled.""" + beta_invocation_input_validation_enabled: NotRequired[bool] + r"""Whether to enable input validation against the input schema. This is a beta feature and is disabled by default.""" + beta_invocation_xml_mode_enabled: NotRequired[bool] + r"""Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled.""" + + +class FunctionCallConfigurationInput(BaseModel): + invocation_few_shot_count: Annotated[ + Optional[int], pydantic.Field(alias="invocation.few_shot.count") + ] = 0 + r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + + beta_evaluation: Annotated[ + OptionalNullable[Any], pydantic.Field(alias="beta.evaluation") + ] = UNSET + r"""[DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled.""" + + invocation_structured_generation_max_attempts: Annotated[ + Optional[int], + pydantic.Field(alias="invocation.structured_generation.max_attempts"), + ] = 5 + r"""The maximum number of attempts to make when generating a response matching the output schema if provided.""" + + invocation_cache_ttl: Annotated[ + Optional[int], pydantic.Field(alias="invocation.cache.ttl") + ] = 0 + r"""The time to live for the cache in seconds. If 0, the cache is disabled.""" + + beta_invocation_input_validation_enabled: Annotated[ + Optional[bool], pydantic.Field(alias="beta.invocation.input_validation.enabled") + ] = False + r"""Whether to enable input validation against the input schema. This is a beta feature and is disabled by default.""" + + beta_invocation_xml_mode_enabled: Annotated[ + Optional[bool], pydantic.Field(alias="beta.invocation.xml_mode.enabled") + ] = False + r"""Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "invocation.few_shot.count", + "beta.evaluation", + "invocation.structured_generation.max_attempts", + "invocation.cache.ttl", + "beta.invocation.input_validation.enabled", + "beta.invocation.xml_mode.enabled", + ] + nullable_fields = ["beta.evaluation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/opperai/models/functioncallconfiguration.py b/src/opperai/models/functioncallconfiguration_output.py similarity index 72% rename from src/opperai/models/functioncallconfiguration.py rename to src/opperai/models/functioncallconfiguration_output.py index 68054a0..6ad1125 100644 --- a/src/opperai/models/functioncallconfiguration.py +++ b/src/opperai/models/functioncallconfiguration_output.py @@ -1,26 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .evaluationconfig import EvaluationConfig, EvaluationConfigTypedDict from opperai.types import BaseModel import pydantic from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class FunctionCallConfigurationTypedDict(TypedDict): +class FunctionCallConfigurationOutputTypedDict(TypedDict): invocation_few_shot_count: NotRequired[int] r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" - beta_evaluation: NotRequired[EvaluationConfigTypedDict] - r"""Configuration for evaluation features stored under 'beta.evaluation'. - - - enabled: master switch - - scorers: which evaluators to run. Accepts: - - string: \"base\" | \"rubrics\" | \"toxicity\" | \"hallucination\" | \"qa\" - - dict: { \"rubrics\": RubricDefinition-like payload } - - list[str | dict] - \"base\" is the default scorer. - """ invocation_structured_generation_max_attempts: NotRequired[int] r"""The maximum number of attempts to make when generating a response matching the output schema if provided.""" invocation_cache_ttl: NotRequired[int] @@ -31,25 +20,12 @@ class FunctionCallConfigurationTypedDict(TypedDict): r"""Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled.""" -class FunctionCallConfiguration(BaseModel): +class FunctionCallConfigurationOutput(BaseModel): invocation_few_shot_count: Annotated[ Optional[int], pydantic.Field(alias="invocation.few_shot.count") ] = 0 r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" - beta_evaluation: Annotated[ - Optional[EvaluationConfig], pydantic.Field(alias="beta.evaluation") - ] = None - r"""Configuration for evaluation features stored under 'beta.evaluation'. - - - enabled: master switch - - scorers: which evaluators to run. Accepts: - - string: \"base\" | \"rubrics\" | \"toxicity\" | \"hallucination\" | \"qa\" - - dict: { \"rubrics\": RubricDefinition-like payload } - - list[str | dict] - \"base\" is the default scorer. - """ - invocation_structured_generation_max_attempts: Annotated[ Optional[int], pydantic.Field(alias="invocation.structured_generation.max_attempts"), diff --git a/src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py b/src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py new file mode 100644 index 0000000..ceeec2a --- /dev/null +++ b/src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from opperai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequestTypedDict( + TypedDict +): + knowledge_base_id: str + r"""The id of the knowledge base""" + file_id: str + r"""The id of the file""" + + +class GetFileDownloadURLKnowledgeKnowledgeBaseIDFilesFileIDDownloadURLGetRequest( + BaseModel +): + knowledge_base_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The id of the knowledge base""" + + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The id of the file""" diff --git a/src/opperai/models/getfunctionresponse.py b/src/opperai/models/getfunctionresponse.py index 65386cc..3d23495 100644 --- a/src/opperai/models/getfunctionresponse.py +++ b/src/opperai/models/getfunctionresponse.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, +from .functioncallconfiguration_output import ( + FunctionCallConfigurationOutput, + FunctionCallConfigurationOutputTypedDict, ) from .tmodel import TModel, TModelTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -26,12 +26,14 @@ class GetFunctionResponseTypedDict(TypedDict): output_schema: NotRequired[Nullable[Dict[str, Any]]] r"""Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references.""" model: NotRequired[TModelTypedDict] - configuration: NotRequired[Nullable[FunctionCallConfigurationTypedDict]] + configuration: NotRequired[Nullable[FunctionCallConfigurationOutputTypedDict]] r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: NotRequired[Nullable[str]] r"""The ID of the dataset associated with the function""" revision_id: NotRequired[Nullable[str]] r"""The ID of the latest revision of the function""" + observer_enabled: NotRequired[bool] + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" class GetFunctionResponse(BaseModel): @@ -55,7 +57,7 @@ class GetFunctionResponse(BaseModel): model: Optional[TModel] = None - configuration: OptionalNullable[FunctionCallConfiguration] = UNSET + configuration: OptionalNullable[FunctionCallConfigurationOutput] = UNSET r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: OptionalNullable[str] = UNSET @@ -64,6 +66,9 @@ class GetFunctionResponse(BaseModel): revision_id: OptionalNullable[str] = UNSET r"""The ID of the latest revision of the function""" + observer_enabled: Optional[bool] = True + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -74,6 +79,7 @@ def serialize_model(self, handler): "configuration", "dataset_id", "revision_id", + "observer_enabled", ] nullable_fields = [ "description", diff --git a/src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py b/src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py new file mode 100644 index 0000000..1a4fa77 --- /dev/null +++ b/src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from opperai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListFilesKnowledgeKnowledgeBaseIDFilesGetRequestTypedDict(TypedDict): + knowledge_base_id: str + r"""The id of the knowledge base to list files from""" + offset: NotRequired[int] + r"""The offset to start the list from""" + limit: NotRequired[int] + r"""The number of files to return""" + + +class ListFilesKnowledgeKnowledgeBaseIDFilesGetRequest(BaseModel): + knowledge_base_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The id of the knowledge base to list files from""" + + offset: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""The offset to start the list from""" + + limit: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + r"""The number of files to return""" diff --git a/src/opperai/models/listfilesresponse.py b/src/opperai/models/listfilesresponse.py new file mode 100644 index 0000000..58f6462 --- /dev/null +++ b/src/opperai/models/listfilesresponse.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ListFilesResponseTypedDict(TypedDict): + id: str + r"""The id of the file""" + original_filename: str + r"""The original filename""" + size: int + r"""The size of the file in bytes""" + status: str + r"""The indexing status of the file""" + document_id: int + r"""The id of the associated document""" + metadata: NotRequired[Dict[str, Any]] + r"""The metadata attached to the file""" + + +class ListFilesResponse(BaseModel): + id: str + r"""The id of the file""" + + original_filename: str + r"""The original filename""" + + size: int + r"""The size of the file in bytes""" + + status: str + r"""The indexing status of the file""" + + document_id: int + r"""The id of the associated document""" + + metadata: Optional[Dict[str, Any]] = None + r"""The metadata attached to the file""" diff --git a/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py b/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py index 0d7f94e..9642be5 100644 --- a/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py +++ b/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py @@ -4,7 +4,9 @@ from .customformatgrammar import CustomFormatGrammar, CustomFormatGrammarTypedDict from .customformattext import CustomFormatText, CustomFormatTextTypedDict from opperai.types import BaseModel +from opperai.utils import get_discriminator import pydantic +from pydantic import Discriminator, Tag from typing import Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -14,7 +16,13 @@ ) -Format = TypeAliasType("Format", Union[CustomFormatText, CustomFormatGrammar]) +Format = Annotated[ + Union[ + Annotated[CustomFormatText, Tag("text")], + Annotated[CustomFormatGrammar, Tag("grammar")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class OpenaiTypesChatChatCompletionCustomToolParamCustomTypedDict(TypedDict): diff --git a/src/opperai/models/paginatedresponse_listfilesresponse_.py b/src/opperai/models/paginatedresponse_listfilesresponse_.py new file mode 100644 index 0000000..e3b9ce3 --- /dev/null +++ b/src/opperai/models/paginatedresponse_listfilesresponse_.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict +from .meta import Meta, MetaTypedDict +from opperai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class PaginatedResponseListFilesResponseTypedDict(TypedDict): + meta: MetaTypedDict + data: List[ListFilesResponseTypedDict] + r"""List of items returned in the response""" + + +class PaginatedResponseListFilesResponse(BaseModel): + meta: Meta + + data: List[ListFilesResponse] + r"""List of items returned in the response""" diff --git a/src/opperai/models/registerfileuploadrequest.py b/src/opperai/models/registerfileuploadrequest.py index 4ae8c3d..0c08df3 100644 --- a/src/opperai/models/registerfileuploadrequest.py +++ b/src/opperai/models/registerfileuploadrequest.py @@ -7,6 +7,7 @@ ) from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer +from typing import Any, Dict from typing_extensions import NotRequired, TypedDict @@ -19,6 +20,8 @@ class RegisterFileUploadRequestTypedDict(TypedDict): r"""The content type of the file to register""" configuration: NotRequired[Nullable[TextProcessingConfigurationTypedDict]] r"""The configuration for the file to register""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Optional metadata to attach to the file""" class RegisterFileUploadRequest(BaseModel): @@ -34,10 +37,13 @@ class RegisterFileUploadRequest(BaseModel): configuration: OptionalNullable[TextProcessingConfiguration] = UNSET r"""The configuration for the file to register""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Optional metadata to attach to the file""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["configuration"] - nullable_fields = ["configuration"] + optional_fields = ["configuration", "metadata"] + nullable_fields = ["configuration", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/opperai/models/registerfileuploadresponse.py b/src/opperai/models/registerfileuploadresponse.py index ce6e174..6102e0a 100644 --- a/src/opperai/models/registerfileuploadresponse.py +++ b/src/opperai/models/registerfileuploadresponse.py @@ -2,7 +2,8 @@ from __future__ import annotations from opperai.types import BaseModel -from typing_extensions import TypedDict +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class RegisterFileUploadResponseTypedDict(TypedDict): @@ -10,6 +11,7 @@ class RegisterFileUploadResponseTypedDict(TypedDict): key: str original_filename: str document_id: int + metadata: NotRequired[Dict[str, Any]] class RegisterFileUploadResponse(BaseModel): @@ -20,3 +22,5 @@ class RegisterFileUploadResponse(BaseModel): original_filename: str document_id: int + + metadata: Optional[Dict[str, Any]] = None diff --git a/src/opperai/models/updatefunctionresponse.py b/src/opperai/models/updatefunctionresponse.py index 6ac4ba6..964b9b0 100644 --- a/src/opperai/models/updatefunctionresponse.py +++ b/src/opperai/models/updatefunctionresponse.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .functioncallconfiguration import ( - FunctionCallConfiguration, - FunctionCallConfigurationTypedDict, +from .functioncallconfiguration_output import ( + FunctionCallConfigurationOutput, + FunctionCallConfigurationOutputTypedDict, ) from .tmodel import TModel, TModelTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -26,12 +26,14 @@ class UpdateFunctionResponseTypedDict(TypedDict): output_schema: NotRequired[Nullable[Dict[str, Any]]] r"""Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references.""" model: NotRequired[TModelTypedDict] - configuration: NotRequired[Nullable[FunctionCallConfigurationTypedDict]] + configuration: NotRequired[Nullable[FunctionCallConfigurationOutputTypedDict]] r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: NotRequired[Nullable[str]] r"""The ID of the dataset associated with the function""" revision_id: NotRequired[Nullable[str]] r"""The ID of the latest revision of the function""" + observer_enabled: NotRequired[bool] + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" class UpdateFunctionResponse(BaseModel): @@ -55,7 +57,7 @@ class UpdateFunctionResponse(BaseModel): model: Optional[TModel] = None - configuration: OptionalNullable[FunctionCallConfiguration] = UNSET + configuration: OptionalNullable[FunctionCallConfigurationOutput] = UNSET r"""Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function.""" dataset_id: OptionalNullable[str] = UNSET @@ -64,6 +66,9 @@ class UpdateFunctionResponse(BaseModel): revision_id: OptionalNullable[str] = UNSET r"""The ID of the latest revision of the function""" + observer_enabled: Optional[bool] = True + r"""Whether the observer is enabled for this function. When enabled, the observer monitors and evaluates all generations.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -74,6 +79,7 @@ def serialize_model(self, handler): "configuration", "dataset_id", "revision_id", + "observer_enabled", ] nullable_fields = [ "description", diff --git a/src/opperai/openai.py b/src/opperai/openai.py index a8b6e03..5aafab8 100644 --- a/src/opperai/openai.py +++ b/src/opperai/openai.py @@ -57,6 +57,7 @@ def create_chat_completion( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.Payload ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -160,6 +161,7 @@ async def create_chat_completion_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.Payload ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/rerank.py b/src/opperai/rerank.py index 432f4d0..043dcbe 100644 --- a/src/opperai/rerank.py +++ b/src/opperai/rerank.py @@ -83,6 +83,7 @@ def documents( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.RerankRequestModel ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -212,6 +213,7 @@ async def documents_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.RerankRequestModel ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -310,6 +312,7 @@ def list_models( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -410,6 +413,7 @@ async def list_models_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/revisions.py b/src/opperai/revisions.py index 6ea8b74..3e0547b 100644 --- a/src/opperai/revisions.py +++ b/src/opperai/revisions.py @@ -65,6 +65,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -178,6 +179,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/sdk.py b/src/opperai/sdk.py index 583c732..c2fa24f 100644 --- a/src/opperai/sdk.py +++ b/src/opperai/sdk.py @@ -231,8 +231,8 @@ def call( tags: OptionalNullable[Dict[str, str]] = UNSET, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -248,7 +248,9 @@ def call( :param name: Provide a unique name of the task. A function with this name will be created in the project. Functions configuration is overridden by the request parameters. :param instructions: Optionally provide an instruction for the model to complete the task. Recommended to be concise and to the point :param input_schema: Optionally provide an input schema for the task. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. + :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. + + **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. :param input: Optionally provide input data as context to complete the task. Could be a text, image, audio or a combination of these. :param model: :param examples: Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. @@ -290,7 +292,7 @@ def call( parent_span_id=parent_span_id, tags=tags, configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -314,6 +316,7 @@ def call( "json", models.AppAPIPublicV2FunctionCallCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -388,8 +391,8 @@ async def call_async( tags: OptionalNullable[Dict[str, str]] = UNSET, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -405,7 +408,9 @@ async def call_async( :param name: Provide a unique name of the task. A function with this name will be created in the project. Functions configuration is overridden by the request parameters. :param instructions: Optionally provide an instruction for the model to complete the task. Recommended to be concise and to the point :param input_schema: Optionally provide an input schema for the task. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. + :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. + + **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. :param input: Optionally provide input data as context to complete the task. Could be a text, image, audio or a combination of these. :param model: :param examples: Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. @@ -447,7 +452,7 @@ async def call_async( parent_span_id=parent_span_id, tags=tags, configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -471,6 +476,7 @@ async def call_async( "json", models.AppAPIPublicV2FunctionCallCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -545,8 +551,8 @@ def stream( tags: OptionalNullable[Dict[str, str]] = UNSET, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -628,7 +634,9 @@ def stream( :param name: Provide a unique name of the task. A function with this name will be created in the project. Functions configuration is overridden by the request parameters. :param instructions: Optionally provide an instruction for the model to complete the task. Recommended to be concise and to the point :param input_schema: Optionally provide an input schema for the task. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. + :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. + + **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. :param input: Optionally provide input data as context to complete the task. Could be a text, image, audio or a combination of these. :param model: :param examples: Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. @@ -670,7 +678,7 @@ def stream( parent_span_id=parent_span_id, tags=tags, configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -694,6 +702,7 @@ def stream( "json", models.AppAPIPublicV2FunctionCallCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -783,8 +792,8 @@ async def stream_async( tags: OptionalNullable[Dict[str, str]] = UNSET, configuration: OptionalNullable[ Union[ - models.FunctionCallConfiguration, - models.FunctionCallConfigurationTypedDict, + models.FunctionCallConfigurationInput, + models.FunctionCallConfigurationInputTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -866,7 +875,9 @@ async def stream_async( :param name: Provide a unique name of the task. A function with this name will be created in the project. Functions configuration is overridden by the request parameters. :param instructions: Optionally provide an instruction for the model to complete the task. Recommended to be concise and to the point :param input_schema: Optionally provide an input schema for the task. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. - :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. + :param output_schema: Optionally provide an output schema for the task. Response is guaranteed to match the schema or throw an error. Can preferably include field descriptions to allow the model to reason about the output variables. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. + + **Streaming with output_schema:** When used with streaming endpoints, enables precise field tracking via json_path. Each streaming chunk includes the exact schema field being populated (e.g., 'response.people[0].name'), allowing real-time UI updates by routing content to specific components. :param input: Optionally provide input data as context to complete the task. Could be a text, image, audio or a combination of these. :param model: :param examples: Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. @@ -908,7 +919,7 @@ async def stream_async( parent_span_id=parent_span_id, tags=tags, configuration=utils.get_pydantic_model( - configuration, OptionalNullable[models.FunctionCallConfiguration] + configuration, OptionalNullable[models.FunctionCallConfigurationInput] ), ) @@ -932,6 +943,7 @@ async def stream_async( "json", models.AppAPIPublicV2FunctionCallCallFunctionRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/spanmetrics.py b/src/opperai/spanmetrics.py index ce3f131..3f8b030 100644 --- a/src/opperai/spanmetrics.py +++ b/src/opperai/spanmetrics.py @@ -74,6 +74,7 @@ def create_metric( "json", models.CreateSpanMetricRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -197,6 +198,7 @@ async def create_metric_async( "json", models.CreateSpanMetricRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -308,6 +310,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -418,6 +421,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -525,6 +529,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -630,6 +635,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -753,6 +759,7 @@ def update_metric( "json", models.UpdateSpanMetricRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -876,6 +883,7 @@ async def update_metric_async( "json", models.UpdateSpanMetricRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -981,6 +989,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1086,6 +1095,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/spans.py b/src/opperai/spans.py index 255addd..ec964d3 100644 --- a/src/opperai/spans.py +++ b/src/opperai/spans.py @@ -93,6 +93,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateSpanRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -231,6 +232,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.CreateSpanRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -333,6 +335,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -435,6 +438,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -573,6 +577,7 @@ def update( "json", models.UpdateSpanRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -711,6 +716,7 @@ async def update_async( "json", models.UpdateSpanRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -813,6 +819,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -915,6 +922,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1017,6 +1025,7 @@ def save_examples( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1119,6 +1128,7 @@ async def save_examples_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/traces.py b/src/opperai/traces.py index 7749df1..c5e4cd3 100644 --- a/src/opperai/traces.py +++ b/src/opperai/traces.py @@ -62,6 +62,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -172,6 +173,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -276,6 +278,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -378,6 +381,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) diff --git a/src/opperai/utils/forms.py b/src/opperai/utils/forms.py index e873495..f961e76 100644 --- a/src/opperai/utils/forms.py +++ b/src/opperai/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/src/opperai/utils/queryparams.py b/src/opperai/utils/queryparams.py index 37a6e7f..c04e0db 100644 --- a/src/opperai/utils/queryparams.py +++ b/src/opperai/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/src/opperai/utils/retries.py b/src/opperai/utils/retries.py index 4d60867..88a91b1 100644 --- a/src/opperai/utils/retries.py +++ b/src/opperai/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/src/opperai/utils/unmarshal_json_response.py b/src/opperai/utils/unmarshal_json_response.py index 19a3b4f..c3957b5 100644 --- a/src/opperai/utils/unmarshal_json_response.py +++ b/src/opperai/utils/unmarshal_json_response.py @@ -1,12 +1,26 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Optional +from typing import Any, Optional, Type, TypeVar, overload import httpx from .serializers import unmarshal_json from opperai import errors +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + def unmarshal_json_response( typ: Any, http_res: httpx.Response, body: Optional[str] = None