From 07be74f9bbcccb3c500f93be53b01c2d03280f77 Mon Sep 17 00:00:00 2001 From: Jose Sabater Date: Tue, 9 Dec 2025 12:01:55 +0100 Subject: [PATCH] adjust versioning --- .genignore | 1 + .speakeasy/gen.lock | 3498 ++++++++++++++--- .speakeasy/gen.yaml | 4 +- .../speakeasy-modifications-overlay.yaml | 40 +- .speakeasy/workflow.lock | 12 +- Makefile | 7 +- README.md | 1179 +----- ...publicv2functioncallcallfunctionrequest.md | 2 +- ...dfileknowledgeknowledgebaseiduploadpost.md | 11 + ...eknowledgeknowledgebaseiduploadpostfile.md | 10 + docs/models/createfunctionrequest.md | 2 +- docs/models/createfunctionresponse.md | 2 +- docs/models/feedbackinfo.md | 11 + docs/models/functioncallconfigurationinput.md | 16 +- .../models/functioncallconfigurationoutput.md | 14 +- docs/models/getfunctionresponse.md | 2 +- docs/models/getspanresponse.md | 3 +- docs/models/streamingchunk.md | 4 +- docs/models/submitfeedbackrequest.md | 10 + docs/models/submitfeedbackresponse.md | 10 + ...nfeedbackspansspanidfeedbackpostrequest.md | 9 + docs/models/updatefunctionrequest.md | 2 +- docs/models/updatefunctionresponse.md | 2 +- docs/models/updatespanrequest.md | 4 +- ...owledgeknowledgebaseiduploadpostrequest.md | 9 + docs/models/uploadfileresponse.md | 12 + pyproject.toml | 2 +- src/opperai/_version.py | 6 +- src/opperai/knowledge.py | 270 ++ src/opperai/models/__init__.py | 56 + ...knowledge_knowledge_base_id_upload_post.py | 92 + .../chatcompletionassistantmessageparam.py | 8 +- src/opperai/models/chatcompletionmessage.py | 11 +- .../models/chatcompletionnonstreaming.py | 33 +- src/opperai/models/chatcompletionstreaming.py | 33 +- src/opperai/models/feedbackinfo.py | 55 + .../function_stream_call_stream_postop.py | 32 +- .../models/functioncallconfiguration_input.py | 12 +- .../functioncallconfiguration_output.py | 12 +- src/opperai/models/getspanresponse.py | 8 + ...hat_completion_custom_tool_param_custom.py | 9 +- ..._feedback_spans_span_id_feedback_postop.py | 25 + src/opperai/models/submitfeedbackrequest.py | 56 + src/opperai/models/submitfeedbackresponse.py | 25 + ...owledge_knowledge_base_id_upload_postop.py | 30 + src/opperai/models/uploadfileresponse.py | 26 + src/opperai/spans.py | 258 ++ 47 files changed, 4135 insertions(+), 1800 deletions(-) create mode 100644 .genignore create mode 100644 docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md create mode 100644 docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md create mode 100644 docs/models/feedbackinfo.md create mode 100644 docs/models/submitfeedbackrequest.md create mode 100644 docs/models/submitfeedbackresponse.md create mode 100644 docs/models/submitspanfeedbackspansspanidfeedbackpostrequest.md create mode 100644 docs/models/uploadfileknowledgeknowledgebaseiduploadpostrequest.md create mode 100644 docs/models/uploadfileresponse.md create mode 100644 src/opperai/models/body_upload_file_knowledge_knowledge_base_id_upload_post.py create mode 100644 src/opperai/models/feedbackinfo.py create mode 100644 src/opperai/models/submit_span_feedback_spans_span_id_feedback_postop.py create mode 100644 src/opperai/models/submitfeedbackrequest.py create mode 100644 src/opperai/models/submitfeedbackresponse.py create mode 100644 src/opperai/models/upload_file_knowledge_knowledge_base_id_upload_postop.py create mode 100644 src/opperai/models/uploadfileresponse.py diff --git a/.genignore b/.genignore new file mode 100644 index 0000000..b43bf86 --- /dev/null +++ b/.genignore @@ -0,0 +1 @@ +README.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 0963167..7a80464 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,16 @@ lockVersion: 2.0.0 id: c75567cf-9def-4617-8ae7-65af6f367b68 management: - docChecksum: a349bae08dffd8722915fe3e70c77eeb + docChecksum: ad849136d2135a913b15d8147c882059 docVersion: 2.0.0 - speakeasyVersion: 1.664.0 - generationVersion: 2.766.1 - releaseVersion: 1.6.5 - configChecksum: 7be41082871778bfb564435aecdc963a + speakeasyVersion: 1.673.0 + generationVersion: 2.776.1 + releaseVersion: 1.7.1 + configChecksum: 2efddbde2eaccea1f7f7420c5bbaf1c5 +persistentEdits: + generation_id: a968d6ec-034b-424b-b2b0-01d92b49c0bd + pristine_commit_hash: 090780c87208cd19ffaa456169aebd90b6fb35db + pristine_tree_hash: 3d6aff9f19f92e057e0e74a075885f69ae99b2c4 features: python: additionalDependencies: 1.0.0 @@ -15,6 +19,7 @@ features: core: 5.23.13 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 + deprecations: 3.0.2 devContainers: 3.0.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -26,588 +31,2297 @@ features: globalServerURLs: 3.2.0 groups: 3.0.1 methodArguments: 1.0.2 + multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 nullables: 1.0.1 responseFormat: 1.0.1 retries: 3.0.3 - sdkHooks: 1.1.0 + sdkHooks: 1.2.0 serverEvents: 1.0.11 - unions: 3.1.0 -generatedFiles: - - .devcontainer/README.md - - .devcontainer/devcontainer.json - - .devcontainer/setup.sh - - .gitattributes - - .vscode/settings.json - - USAGE.md - - docs/errors/badrequesterror.md - - docs/errors/conflicterror.md - - docs/errors/error.md - - docs/errors/notfounderror.md - - docs/errors/requestvalidationerror.md - - docs/errors/unauthorizederror.md - - docs/models/addknowledgeknowledgebaseidaddpostrequest.md - - docs/models/addrequest.md - - docs/models/annotation.md - - docs/models/annotationurlcitation.md - - docs/models/appapipublicv2functioncallcallfunctionrequest.md - - docs/models/appapipublicv2functioncallcallfunctionresponse.md - - docs/models/appapipublicv2functionscallfunctionrequest.md - - docs/models/appapipublicv2functionscallfunctionresponse.md - - docs/models/audio.md - - docs/models/callfunctionfunctionsfunctionidcallpostrequest.md - - docs/models/callfunctionrevisionfunctionsfunctionidcallrevisionidpostrequest.md - - docs/models/chatcompletion.md - - docs/models/chatcompletionallowedtoolchoiceparam.md - - docs/models/chatcompletionallowedtoolsparam.md - - docs/models/chatcompletionassistantmessageparam.md - - docs/models/chatcompletionassistantmessageparamcontent1.md - - docs/models/chatcompletionassistantmessageparamcontent2.md - - docs/models/chatcompletionassistantmessageparamtoolcall.md - - docs/models/chatcompletionaudio.md - - docs/models/chatcompletionaudioparam.md - - docs/models/chatcompletionaudioparamformat.md - - docs/models/chatcompletioncontentpartimageparam.md - - docs/models/chatcompletioncontentpartinputaudioparam.md - - docs/models/chatcompletioncontentpartrefusalparam.md - - docs/models/chatcompletioncontentparttextparam.md - - docs/models/chatcompletioncustomtoolparam.md - - docs/models/chatcompletiondevelopermessageparam.md - - docs/models/chatcompletiondevelopermessageparamcontent.md - - docs/models/chatcompletionfunctioncalloptionparam.md - - docs/models/chatcompletionfunctionmessageparam.md - - docs/models/chatcompletionfunctiontoolparam.md - - docs/models/chatcompletionmessage.md - - docs/models/chatcompletionmessagecustomtoolcall.md - - docs/models/chatcompletionmessagecustomtoolcallparam.md - - docs/models/chatcompletionmessagefunctiontoolcall.md - - docs/models/chatcompletionmessagefunctiontoolcallparam.md - - docs/models/chatcompletionmessagetoolcall.md - - docs/models/chatcompletionnamedtoolchoicecustomparam.md - - docs/models/chatcompletionnamedtoolchoiceparam.md - - docs/models/chatcompletionnonstreaming.md - - docs/models/chatcompletionnonstreamingfunctioncallenum.md - - docs/models/chatcompletionnonstreamingfunctioncallunion.md - - docs/models/chatcompletionnonstreamingmessage.md - - docs/models/chatcompletionnonstreamingmodality.md - - docs/models/chatcompletionnonstreamingreasoningeffort.md - - docs/models/chatcompletionnonstreamingresponseformat.md - - docs/models/chatcompletionnonstreamingservicetier.md - - docs/models/chatcompletionnonstreamingstop.md - - docs/models/chatcompletionnonstreamingtool.md - - docs/models/chatcompletionnonstreamingtoolchoiceenum.md - - docs/models/chatcompletionnonstreamingtoolchoiceunion.md - - docs/models/chatcompletionnonstreamingverbosity.md - - docs/models/chatcompletionpredictioncontentparam.md - - docs/models/chatcompletionpredictioncontentparamcontent.md - - docs/models/chatcompletionservicetier.md - - docs/models/chatcompletionstreaming.md - - docs/models/chatcompletionstreamingfunctioncallenum.md - - docs/models/chatcompletionstreamingfunctioncallunion.md - - docs/models/chatcompletionstreamingmessage.md - - docs/models/chatcompletionstreamingmodality.md - - docs/models/chatcompletionstreamingreasoningeffort.md - - docs/models/chatcompletionstreamingresponseformat.md - - docs/models/chatcompletionstreamingservicetier.md - - docs/models/chatcompletionstreamingstop.md - - docs/models/chatcompletionstreamingtool.md - - docs/models/chatcompletionstreamingtoolchoiceenum.md - - docs/models/chatcompletionstreamingtoolchoiceunion.md - - docs/models/chatcompletionstreamingverbosity.md - - docs/models/chatcompletionstreamoptionsparam.md - - docs/models/chatcompletionsystemmessageparam.md - - docs/models/chatcompletionsystemmessageparamcontent.md - - docs/models/chatcompletiontokenlogprob.md - - docs/models/chatcompletiontoolmessageparam.md - - docs/models/chatcompletiontoolmessageparamcontent.md - - docs/models/chatcompletionusermessageparam.md - - docs/models/chatcompletionusermessageparamcontent1.md - - docs/models/chatcompletionusermessageparamcontent2.md - - docs/models/choice.md - - docs/models/choicelogprobs.md - - docs/models/completiontokensdetails.md - - docs/models/completionusage.md - - docs/models/createdatasetentrydatasetsdatasetidpostrequest.md - - docs/models/createdatasetentryrequest.md - - docs/models/createdatasetentryresponse.md - - docs/models/createembeddingrequest.md - - docs/models/createembeddingresponse.md - - docs/models/createfunctionrequest.md - - docs/models/createfunctionresponse.md - - docs/models/createknowledgebaserequest.md - - docs/models/createknowledgebaseresponse.md - - docs/models/createmetricspansspanidmetricspostrequest.md - - docs/models/createmodelaliasrequest.md - - docs/models/createmodelaliasresponse.md - - docs/models/createspanmetricrequest.md - - docs/models/createspanmetricresponse.md - - docs/models/createspanrequest.md - - docs/models/createspanresponse.md - - docs/models/customformatgrammar.md - - docs/models/customformatgrammargrammar.md - - docs/models/customformattext.md - - docs/models/customoutput.md - - docs/models/deletecustommodelmodelscustommodeliddeleterequest.md - - docs/models/deletedatasetentrydatasetsdatasetidentriesentryiddeleterequest.md - - docs/models/deletedocumentsknowledgeknowledgebaseidquerydeleterequest.md - - docs/models/deletefilefromknowledgebaseknowledgeknowledgebaseidfilesfileiddeleterequest.md - - docs/models/deletefunctionfunctionsfunctioniddeleterequest.md - - docs/models/deleteknowledgebaseknowledgeknowledgebaseiddeleterequest.md - - docs/models/deleteknowledgebaserequest.md - - docs/models/deleteknowledgebaseresponse.md - - docs/models/deletemetricspansspanidmetricsmetriciddeleterequest.md - - docs/models/deletemodelaliasmodelsaliasesaliasiddeleterequest.md - - docs/models/deletespanspansspaniddeleterequest.md - - docs/models/delta.md - - docs/models/detail.md - - docs/models/example.md - - docs/models/examplein.md - - docs/models/file.md - - docs/models/filedownloadurlresponse.md - - docs/models/filefile.md - - docs/models/filter_.md - - docs/models/finishreason.md - - docs/models/format_.md - - docs/models/functioncallconfigurationinput.md - - docs/models/functioncallconfigurationoutput.md - - docs/models/functioncallinput.md - - docs/models/functioncalloutput.md - - docs/models/functiondefinition.md - - docs/models/functionoutput.md - - docs/models/functionstreamcallstreampostresponse.md - - docs/models/functionstreamcallstreampostresponsebody.md - - docs/models/getcustommodelbynamemodelscustombynamenamegetrequest.md - - docs/models/getcustommodelmodelscustommodelidgetrequest.md - - docs/models/getcustommodelresponse.md - - docs/models/getdatasetentriesresponse.md - - docs/models/getdatasetentrydatasetsdatasetidentriesentryidgetrequest.md - - docs/models/getdatasetentryresponse.md - - docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md - - docs/models/getfunctionbynamefunctionsbynamenamegetrequest.md - - docs/models/getfunctionbyrevisionfunctionsfunctionidrevisionsrevisionidgetrequest.md - - docs/models/getfunctionfunctionsfunctionidgetrequest.md - - docs/models/getfunctionresponse.md - - docs/models/getknowledgebasebynameknowledgebynameknowledgebasenamegetrequest.md - - docs/models/getknowledgebaseknowledgeknowledgebaseidgetrequest.md - - docs/models/getknowledgebaseresponse.md - - docs/models/getmetricspansspanidmetricsmetricidgetrequest.md - - docs/models/getmodelaliasbynamemodelsaliasesbynamenamegetrequest.md - - docs/models/getmodelaliasmodelsaliasesaliasidgetrequest.md - - docs/models/getmodelaliasresponse.md - - docs/models/getspanmetricresponse.md - - docs/models/getspanresponse.md - - docs/models/getspanspansspanidgetrequest.md - - docs/models/gettraceresponse.md - - docs/models/gettracetracestraceidgetrequest.md - - docs/models/getuploadurlknowledgeknowledgebaseiduploadurlgetrequest.md - - docs/models/getuploadurlresponse.md - - docs/models/getusageresultitem.md - - docs/models/granularity.md - - docs/models/imageurl.md - - docs/models/input.md - - docs/models/inputaudio.md - - docs/models/inputaudioformat.md - - docs/models/jsonpayload.md - - docs/models/jsonschema.md - - docs/models/listcustommodelsmodelscustomgetrequest.md - - docs/models/listcustommodelsresponseitem.md - - docs/models/listdatasetentriesdatasetsdatasetidentriesgetrequest.md - - docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md - - docs/models/listfilesresponse.md - - docs/models/listfunctionrevisionresponse.md - - docs/models/listfunctionrevisionsfunctionsfunctionidrevisionsgetrequest.md - - docs/models/listfunctionsfunctionsgetrequest.md - - docs/models/listfunctionsresponseitem.md - - docs/models/listknowledgebasesknowledgegetrequest.md - - docs/models/listknowledgebasesresponse.md - - docs/models/listlanguagemodelsresponse.md - - docs/models/listmetricsspansspanidmetricsgetrequest.md - - docs/models/listmodelaliasesmodelsaliasesgetrequest.md - - docs/models/listmodelaliasesresponseitem.md - - docs/models/listmodelsmodelsgetrequest.md - - docs/models/listrerankmodelsresponse.md - - docs/models/listspanmetricsresponse.md - - docs/models/listtracesresponse.md - - docs/models/listtracestracesgetrequest.md - - docs/models/meta.md - - docs/models/mode.md - - docs/models/model.md - - docs/models/op.md - - docs/models/openaitypeschatchatcompletioncustomtoolparamcustom.md - - docs/models/openaitypeschatchatcompletionmessagecustomtoolcallparamcustom.md - - docs/models/openaitypeschatchatcompletionmessagefunctiontoolcallparamfunction.md - - docs/models/openaitypeschatchatcompletionnamedtoolchoicecustomparamcustom.md - - docs/models/openaitypeschatchatcompletionnamedtoolchoiceparamfunction.md - - docs/models/openaitypeschatcompletioncreateparamsfunction.md - - docs/models/paginatedresponsegetdatasetentriesresponse.md - - docs/models/paginatedresponselistcustommodelsresponseitem.md - - docs/models/paginatedresponselistfilesresponse.md - - docs/models/paginatedresponselistfunctionrevisionresponse.md - - docs/models/paginatedresponselistfunctionsresponseitem.md - - docs/models/paginatedresponselistknowledgebasesresponse.md - - docs/models/paginatedresponselistlanguagemodelsresponse.md - - docs/models/paginatedresponselistmodelaliasesresponseitem.md - - docs/models/paginatedresponselistrerankmodelsresponse.md - - docs/models/paginatedresponselistspanmetricsresponse.md - - docs/models/paginatedresponselisttracesresponse.md - - docs/models/payload.md - - docs/models/prompttokensdetails.md - - docs/models/querydatasetentriesdatasetsdatasetidentriesquerypostrequest.md - - docs/models/querydatasetentriesresponse.md - - docs/models/queryknowledgebaseknowledgeknowledgebaseidquerypostrequest.md - - docs/models/queryknowledgebaserequest.md - - docs/models/queryknowledgebaseresponse.md - - docs/models/registercustommodelrequest.md - - docs/models/registercustommodelresponse.md - - docs/models/registerfileuploadknowledgeknowledgebaseidregisterfilepostrequest.md - - docs/models/registerfileuploadrequest.md - - docs/models/registerfileuploadresponse.md - - docs/models/rerankcost.md - - docs/models/rerankdocument.md - - docs/models/rerankrequestmodel.md - - docs/models/rerankresponsemodel.md - - docs/models/rerankresult.md - - docs/models/responseformatjsonobject.md - - docs/models/responseformatjsonschema.md - - docs/models/responseformattext.md - - docs/models/savetodatasetresponse.md - - docs/models/savetodatasetspansspanidsaveexamplespostrequest.md - - docs/models/searchcontextsize.md - - docs/models/security.md - - docs/models/spandata.md - - docs/models/spanmetricdata.md - - docs/models/spanschema.md - - docs/models/streamfunctionfunctionsfunctionidcallstreampostdata.md - - docs/models/streamfunctionfunctionsfunctionidcallstreampostrequest.md - - docs/models/streamfunctionfunctionsfunctionidcallstreampostresponse.md - - docs/models/streamfunctionfunctionsfunctionidcallstreampostresponsebody.md - - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostdata.md - - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostrequest.md - - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponse.md - - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponsebody.md - - docs/models/streamingchunk.md - - docs/models/syntax.md - - docs/models/textprocessingconfiguration.md - - docs/models/tmodel.md - - docs/models/tmodel1.md - - docs/models/toplogprob.md - - docs/models/updatecustommodelmodelscustommodelidpatchrequest.md - - docs/models/updatecustommodelrequest.md - - docs/models/updatecustommodelresponse.md - - docs/models/updatedatasetentrydatasetsdatasetidentriesentryidpatchrequest.md - - docs/models/updatedatasetentryrequest.md - - docs/models/updatedatasetentryresponse.md - - docs/models/updatefunctionfunctionsfunctionidpatchrequest.md - - docs/models/updatefunctionrequest.md - - docs/models/updatefunctionresponse.md - - docs/models/updatemetricspansspanidmetricsmetricidpatchrequest.md - - docs/models/updatemodelaliasmodelsaliasesaliasidpatchrequest.md - - docs/models/updatemodelaliasrequest.md - - docs/models/updatemodelaliasresponse.md - - docs/models/updatespanmetricrequest.md - - docs/models/updatespanmetricresponse.md - - docs/models/updatespanrequest.md - - docs/models/updatespanresponse.md - - docs/models/updatespanspansspanidpatchrequest.md - - docs/models/usageanalyticsusagegetrequest.md - - docs/models/utils/retryconfig.md - - docs/models/value1.md - - docs/models/value2.md - - docs/models/voice.md - - docs/models/voiceenum.md - - docs/models/websearchoptions.md - - docs/models/websearchoptionsuserlocation.md - - docs/models/websearchoptionsuserlocationapproximate.md - - docs/sdks/analytics/README.md - - docs/sdks/datasets/README.md - - docs/sdks/embeddings/README.md - - docs/sdks/entries/README.md - - docs/sdks/functions/README.md - - docs/sdks/knowledge/README.md - - docs/sdks/languagemodels/README.md - - docs/sdks/openai/README.md - - docs/sdks/opper/README.md - - docs/sdks/rerank/README.md - - docs/sdks/revisions/README.md - - docs/sdks/spanmetrics/README.md - - docs/sdks/spans/README.md - - docs/sdks/traces/README.md - - poetry.toml - - py.typed - - pylintrc - - pyproject.toml - - scripts/publish.sh - - src/opperai/__init__.py - - src/opperai/_hooks/__init__.py - - src/opperai/_hooks/sdkhooks.py - - src/opperai/_hooks/types.py - - src/opperai/_version.py - - src/opperai/analytics.py - - src/opperai/basesdk.py - - src/opperai/datasets.py - - src/opperai/embeddings.py - - src/opperai/entries.py - - src/opperai/errors/__init__.py - - src/opperai/errors/apierror.py - - src/opperai/errors/badrequesterror.py - - src/opperai/errors/conflicterror.py - - src/opperai/errors/error.py - - src/opperai/errors/no_response_error.py - - src/opperai/errors/notfounderror.py - - src/opperai/errors/oppererror.py - - src/opperai/errors/requestvalidationerror.py - - src/opperai/errors/responsevalidationerror.py - - src/opperai/errors/unauthorizederror.py - - src/opperai/functions.py - - src/opperai/httpclient.py - - src/opperai/knowledge.py - - src/opperai/language_models.py - - src/opperai/models/__init__.py - - src/opperai/models/add_knowledge_knowledge_base_id_add_postop.py - - src/opperai/models/addrequest.py - - src/opperai/models/annotation.py - - src/opperai/models/annotationurlcitation.py - - src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py - - src/opperai/models/app_api_public_v2_function_call_callfunctionresponse.py - - src/opperai/models/app_api_public_v2_functions_callfunctionrequest.py - - src/opperai/models/app_api_public_v2_functions_callfunctionresponse.py - - src/opperai/models/audio.py - - src/opperai/models/call_function_functions_function_id_call_postop.py - - src/opperai/models/call_function_revision_functions_function_id_call_revision_id_postop.py - - src/opperai/models/chat_completions_openai_chat_completions_postop.py - - src/opperai/models/chatcompletion.py - - src/opperai/models/chatcompletionallowedtoolchoiceparam.py - - src/opperai/models/chatcompletionallowedtoolsparam.py - - src/opperai/models/chatcompletionassistantmessageparam.py - - src/opperai/models/chatcompletionaudio.py - - src/opperai/models/chatcompletionaudioparam.py - - src/opperai/models/chatcompletioncontentpartimageparam.py - - src/opperai/models/chatcompletioncontentpartinputaudioparam.py - - src/opperai/models/chatcompletioncontentpartrefusalparam.py - - src/opperai/models/chatcompletioncontentparttextparam.py - - src/opperai/models/chatcompletioncustomtoolparam.py - - src/opperai/models/chatcompletiondevelopermessageparam.py - - src/opperai/models/chatcompletionfunctioncalloptionparam.py - - src/opperai/models/chatcompletionfunctionmessageparam.py - - src/opperai/models/chatcompletionfunctiontoolparam.py - - src/opperai/models/chatcompletionmessage.py - - src/opperai/models/chatcompletionmessagecustomtoolcall.py - - src/opperai/models/chatcompletionmessagecustomtoolcallparam.py - - src/opperai/models/chatcompletionmessagefunctiontoolcall.py - - src/opperai/models/chatcompletionmessagefunctiontoolcallparam.py - - src/opperai/models/chatcompletionnamedtoolchoicecustomparam.py - - src/opperai/models/chatcompletionnamedtoolchoiceparam.py - - src/opperai/models/chatcompletionnonstreaming.py - - src/opperai/models/chatcompletionpredictioncontentparam.py - - src/opperai/models/chatcompletionstreaming.py - - src/opperai/models/chatcompletionstreamoptionsparam.py - - src/opperai/models/chatcompletionsystemmessageparam.py - - src/opperai/models/chatcompletiontokenlogprob.py - - src/opperai/models/chatcompletiontoolmessageparam.py - - src/opperai/models/chatcompletionusermessageparam.py - - src/opperai/models/choice.py - - src/opperai/models/choicelogprobs.py - - src/opperai/models/completiontokensdetails.py - - src/opperai/models/completionusage.py - - src/opperai/models/create_dataset_entry_datasets_dataset_id_postop.py - - src/opperai/models/create_metric_spans_span_id_metrics_postop.py - - src/opperai/models/createdatasetentryrequest.py - - src/opperai/models/createdatasetentryresponse.py - - src/opperai/models/createembeddingrequest.py - - src/opperai/models/createembeddingresponse.py - - src/opperai/models/createfunctionrequest.py - - src/opperai/models/createfunctionresponse.py - - src/opperai/models/createknowledgebaserequest.py - - src/opperai/models/createknowledgebaseresponse.py - - src/opperai/models/createmodelaliasrequest.py - - src/opperai/models/createmodelaliasresponse.py - - src/opperai/models/createspanmetricrequest.py - - src/opperai/models/createspanmetricresponse.py - - src/opperai/models/createspanrequest.py - - src/opperai/models/createspanresponse.py - - src/opperai/models/custom_output.py - - src/opperai/models/customformatgrammar.py - - src/opperai/models/customformatgrammargrammar.py - - src/opperai/models/customformattext.py - - src/opperai/models/delete_custom_model_models_custom_model_id_deleteop.py - - src/opperai/models/delete_dataset_entry_datasets_dataset_id_entries_entry_id_deleteop.py - - src/opperai/models/delete_documents_knowledge_knowledge_base_id_query_deleteop.py - - src/opperai/models/delete_file_from_knowledge_base_knowledge_knowledge_base_id_files_file_id_deleteop.py - - src/opperai/models/delete_function_functions_function_id_deleteop.py - - src/opperai/models/delete_knowledge_base_knowledge_knowledge_base_id_deleteop.py - - src/opperai/models/delete_metric_spans_span_id_metrics_metric_id_deleteop.py - - src/opperai/models/delete_model_alias_models_aliases_alias_id_deleteop.py - - src/opperai/models/delete_span_spans_span_id_deleteop.py - - src/opperai/models/deleteknowledgebaserequest.py - - src/opperai/models/deleteknowledgebaseresponse.py - - src/opperai/models/example.py - - src/opperai/models/examplein.py - - src/opperai/models/file.py - - src/opperai/models/filedownloadurlresponse.py - - src/opperai/models/filefile.py - - src/opperai/models/filter_.py - - src/opperai/models/function_output.py - - src/opperai/models/function_stream_call_stream_postop.py - - src/opperai/models/functioncall_input.py - - src/opperai/models/functioncall_output.py - - src/opperai/models/functioncallconfiguration_input.py - - src/opperai/models/functioncallconfiguration_output.py - - src/opperai/models/functiondefinition.py - - src/opperai/models/get_custom_model_by_name_models_custom_by_name_name_getop.py - - src/opperai/models/get_custom_model_models_custom_model_id_getop.py - - src/opperai/models/get_dataset_entry_datasets_dataset_id_entries_entry_id_getop.py - - src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py - - src/opperai/models/get_function_by_name_functions_by_name_name_getop.py - - src/opperai/models/get_function_by_revision_functions_function_id_revisions_revision_id_getop.py - - src/opperai/models/get_function_functions_function_id_getop.py - - src/opperai/models/get_knowledge_base_by_name_knowledge_by_name_knowledge_base_name_getop.py - - src/opperai/models/get_knowledge_base_knowledge_knowledge_base_id_getop.py - - src/opperai/models/get_metric_spans_span_id_metrics_metric_id_getop.py - - src/opperai/models/get_model_alias_by_name_models_aliases_by_name_name_getop.py - - src/opperai/models/get_model_alias_models_aliases_alias_id_getop.py - - src/opperai/models/get_span_spans_span_id_getop.py - - src/opperai/models/get_trace_traces_trace_id_getop.py - - src/opperai/models/get_upload_url_knowledge_knowledge_base_id_upload_url_getop.py - - src/opperai/models/getcustommodelresponse.py - - src/opperai/models/getdatasetentriesresponse.py - - src/opperai/models/getdatasetentryresponse.py - - src/opperai/models/getfunctionresponse.py - - src/opperai/models/getknowledgebaseresponse.py - - src/opperai/models/getmodelaliasresponse.py - - src/opperai/models/getspanmetricresponse.py - - src/opperai/models/getspanresponse.py - - src/opperai/models/gettraceresponse.py - - src/opperai/models/getuploadurlresponse.py - - src/opperai/models/getusageresultitem.py - - src/opperai/models/granularity.py - - src/opperai/models/imageurl.py - - src/opperai/models/inputaudio.py - - src/opperai/models/jsonschema.py - - src/opperai/models/list_custom_models_models_custom_getop.py - - src/opperai/models/list_dataset_entries_datasets_dataset_id_entries_getop.py - - src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py - - src/opperai/models/list_function_revisions_functions_function_id_revisions_getop.py - - src/opperai/models/list_functions_functions_getop.py - - src/opperai/models/list_knowledge_bases_knowledge_getop.py - - src/opperai/models/list_metrics_spans_span_id_metrics_getop.py - - src/opperai/models/list_model_aliases_models_aliases_getop.py - - src/opperai/models/list_models_models_getop.py - - src/opperai/models/list_traces_traces_getop.py - - src/opperai/models/listcustommodelsresponseitem.py - - src/opperai/models/listfilesresponse.py - - src/opperai/models/listfunctionrevisionresponse.py - - src/opperai/models/listfunctionsresponseitem.py - - src/opperai/models/listknowledgebasesresponse.py - - src/opperai/models/listlanguagemodelsresponse.py - - src/opperai/models/listmodelaliasesresponseitem.py - - src/opperai/models/listrerankmodelsresponse.py - - src/opperai/models/listspanmetricsresponse.py - - src/opperai/models/listtracesresponse.py - - src/opperai/models/meta.py - - src/opperai/models/model.py - - src/opperai/models/op.py - - src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py - - src/opperai/models/openai_types_chat_chat_completion_message_custom_tool_call_param_custom.py - - src/opperai/models/openai_types_chat_chat_completion_message_function_tool_call_param_function.py - - src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_custom_param_custom.py - - src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_param_function.py - - src/opperai/models/openai_types_chat_completion_create_params_function.py - - src/opperai/models/paginatedresponse_getdatasetentriesresponse_.py - - src/opperai/models/paginatedresponse_listcustommodelsresponseitem_.py - - src/opperai/models/paginatedresponse_listfilesresponse_.py - - src/opperai/models/paginatedresponse_listfunctionrevisionresponse_.py - - src/opperai/models/paginatedresponse_listfunctionsresponseitem_.py - - src/opperai/models/paginatedresponse_listknowledgebasesresponse_.py - - src/opperai/models/paginatedresponse_listlanguagemodelsresponse_.py - - src/opperai/models/paginatedresponse_listmodelaliasesresponseitem_.py - - src/opperai/models/paginatedresponse_listrerankmodelsresponse_.py - - src/opperai/models/paginatedresponse_listspanmetricsresponse_.py - - src/opperai/models/paginatedresponse_listtracesresponse_.py - - src/opperai/models/prompttokensdetails.py - - src/opperai/models/query_dataset_entries_datasets_dataset_id_entries_query_postop.py - - src/opperai/models/query_knowledge_base_knowledge_knowledge_base_id_query_postop.py - - src/opperai/models/querydatasetentriesresponse.py - - src/opperai/models/queryknowledgebaserequest.py - - src/opperai/models/queryknowledgebaseresponse.py - - src/opperai/models/register_file_upload_knowledge_knowledge_base_id_register_file_postop.py - - src/opperai/models/registercustommodelrequest.py - - src/opperai/models/registercustommodelresponse.py - - src/opperai/models/registerfileuploadrequest.py - - src/opperai/models/registerfileuploadresponse.py - - src/opperai/models/rerankcost.py - - src/opperai/models/rerankdocument.py - - src/opperai/models/rerankrequestmodel.py - - src/opperai/models/rerankresponsemodel.py - - src/opperai/models/rerankresult.py - - src/opperai/models/responseformatjsonobject.py - - src/opperai/models/responseformatjsonschema.py - - src/opperai/models/responseformattext.py - - src/opperai/models/save_to_dataset_spans_span_id_save_examples_postop.py - - src/opperai/models/savetodatasetresponse.py - - src/opperai/models/security.py - - src/opperai/models/spandata.py - - src/opperai/models/spanmetricdata.py - - src/opperai/models/spanschema.py - - src/opperai/models/stream_function_functions_function_id_call_stream_postop.py - - src/opperai/models/stream_function_revision_functions_function_id_call_stream_revision_id_postop.py - - src/opperai/models/textprocessingconfiguration.py - - src/opperai/models/tmodel.py - - src/opperai/models/toplogprob.py - - src/opperai/models/update_custom_model_models_custom_model_id_patchop.py - - src/opperai/models/update_dataset_entry_datasets_dataset_id_entries_entry_id_patchop.py - - src/opperai/models/update_function_functions_function_id_patchop.py - - src/opperai/models/update_metric_spans_span_id_metrics_metric_id_patchop.py - - src/opperai/models/update_model_alias_models_aliases_alias_id_patchop.py - - src/opperai/models/update_span_spans_span_id_patchop.py - - src/opperai/models/updatecustommodelrequest.py - - src/opperai/models/updatecustommodelresponse.py - - src/opperai/models/updatedatasetentryrequest.py - - src/opperai/models/updatedatasetentryresponse.py - - src/opperai/models/updatefunctionrequest.py - - src/opperai/models/updatefunctionresponse.py - - src/opperai/models/updatemodelaliasrequest.py - - src/opperai/models/updatemodelaliasresponse.py - - src/opperai/models/updatespanmetricrequest.py - - src/opperai/models/updatespanmetricresponse.py - - src/opperai/models/updatespanrequest.py - - src/opperai/models/updatespanresponse.py - - src/opperai/models/usage_analytics_usage_getop.py - - src/opperai/models/websearchoptions.py - - src/opperai/models/websearchoptionsuserlocation.py - - src/opperai/models/websearchoptionsuserlocationapproximate.py - - src/opperai/openai.py - - src/opperai/py.typed - - src/opperai/rerank.py - - src/opperai/revisions.py - - src/opperai/sdk.py - - src/opperai/sdkconfiguration.py - - src/opperai/spanmetrics.py - - src/opperai/spans.py - - src/opperai/traces.py - - src/opperai/types/__init__.py - - src/opperai/types/basemodel.py - - src/opperai/utils/__init__.py - - src/opperai/utils/annotations.py - - src/opperai/utils/datetimes.py - - src/opperai/utils/enums.py - - src/opperai/utils/eventstreaming.py - - src/opperai/utils/forms.py - - src/opperai/utils/headers.py - - src/opperai/utils/logger.py - - src/opperai/utils/metadata.py - - src/opperai/utils/queryparams.py - - src/opperai/utils/requestbodies.py - - src/opperai/utils/retries.py - - src/opperai/utils/security.py - - src/opperai/utils/serializers.py - - src/opperai/utils/unmarshal_json_response.py - - src/opperai/utils/url.py - - src/opperai/utils/values.py + unions: 3.1.1 + uploadStreams: 1.0.0 +trackedFiles: + .devcontainer/devcontainer.json: + id: b34062a34eb1 + last_write_checksum: sha1:fbbce5654b8990b172a9ffe841d7635cd51c495b + pristine_git_object: 7e67af9f5b5aeed56bab8dd84f75d3729df3ada1 + .devcontainer/setup.sh: + id: 5f1dfbfeb8eb + last_write_checksum: sha1:1a8cedad7e993739dbfebdb8c3f070ea0d45fa2a + pristine_git_object: 9abf35e153fc4117f2bae31fdf24fab9b9211699 + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + USAGE.md: + id: 3aed33ce6e6f + last_write_checksum: sha1:80470ebe301df55fb2d09badec493e17c3c4fbfb + pristine_git_object: 094e32afe3acbade520b737350c44e905b5fa1f9 + docs/errors/badrequesterror.md: + id: 75b54f860141 + last_write_checksum: sha1:3d36470d86acba69b57a37c1ef13f4f85cc0237a + pristine_git_object: 3e5f0be9fb15dd3586dc4e1240592755b6692d4a + docs/errors/conflicterror.md: + id: 87bc1d8e340b + last_write_checksum: sha1:fe634df8910b5764327fda0eab3e23bbe5c6717e + pristine_git_object: 496c1b5de2ab55756c4c7ad92b9183b3eda80a63 + docs/errors/error.md: + id: 098e4ba23534 + last_write_checksum: sha1:b62e6cf06b29eaa8e04d0997d1664d828bf3dbe5 + pristine_git_object: 000b2dee9b902cd58eeef82b84f32a6d3f779076 + docs/errors/notfounderror.md: + id: 2bd9ff5d704d + last_write_checksum: sha1:f601ec69c2f9b7f1a878d56877e325c38f89273f + pristine_git_object: a9d97a8e9559c86a8ac9cbf9eda7f89fa9569a76 + docs/errors/requestvalidationerror.md: + id: e670e340955f + last_write_checksum: sha1:c2a5370a99f768bbc11fe03ac99a86ca57191647 + pristine_git_object: 8b808cdd7e63bffe41b6e1f3356f3fe976a36429 + docs/errors/unauthorizederror.md: + id: c54ee3b4e5ad + last_write_checksum: sha1:5096302d94ff096a05e3f287cf50a5bc0bf81d21 + pristine_git_object: 59ed322cbd9cae9bb81abf6a97ce69e747a96046 + docs/models/addknowledgeknowledgebaseidaddpostrequest.md: + id: 185a734035a5 + last_write_checksum: sha1:d9439cd6adba48240f7a5cb303808afe2fadc5e7 + pristine_git_object: 4af2d928f82b30e47949e26abab7139669c51b6e + docs/models/addrequest.md: + id: 3e93941a80eb + last_write_checksum: sha1:cef9c111c9abe9710fa51db8dc61288e0ab171a2 + pristine_git_object: 5cc07e7e4cff9ffc4637337116e052ff857fc27f + docs/models/annotation.md: + id: faab6b24438b + last_write_checksum: sha1:f2ca22ce58710528a5d78a98cc14a97bfa622f7d + pristine_git_object: 4acc86084615148b2eb163e8fb09d0c761b5d9af + docs/models/annotationurlcitation.md: + id: 3a73378fa514 + last_write_checksum: sha1:684e2a96c2af3a1a3fcb008875b6ae17678a8ba3 + pristine_git_object: a567a64d961e76376338dd0058addfd36d2520ee + docs/models/appapipublicv2functioncallcallfunctionrequest.md: + id: 9cf1fdcedc3a + last_write_checksum: sha1:6aeba4ecdfe6519909f44fabdc57be03f134c9a8 + pristine_git_object: d57bfe84a3a6300247417da9d1b64742837d18d3 + docs/models/appapipublicv2functioncallcallfunctionresponse.md: + id: 428a6cce6b14 + last_write_checksum: sha1:1d68f6c366336669f64eb8370737d9c14d91aca4 + pristine_git_object: 1f402f0878d199f2235cd17e1fe5c36ccfa196f1 + docs/models/appapipublicv2functionscallfunctionrequest.md: + id: e9f082ece3bc + last_write_checksum: sha1:18eea4e02d31b0370d628d389e23525d062786b9 + pristine_git_object: 50f43382f65f661b97976e340fdd593cc6113f8b + docs/models/appapipublicv2functionscallfunctionresponse.md: + id: 062b4a4a392e + last_write_checksum: sha1:f5f9709b21d313c60ab79ce126c0f01caf317866 + pristine_git_object: 90218d3279c0f6c50191ad90164a55f88cfcfbe3 + docs/models/audio.md: + id: 197ec2b1424a + last_write_checksum: sha1:2bcf3b1a13574a9d9500878acc0c6520a3089f2f + pristine_git_object: fc6fb7b38fc8deea2f5cd452ac1589caa193ad50 + docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md: + id: 6f240addceef + last_write_checksum: sha1:60f75e08ee4cde51d017a9abac0be93f93aa747e + pristine_git_object: 2a88e22d59c3009eb6726734a3dc0ffde23187b0 + docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md: + id: 89cc22c950cb + last_write_checksum: sha1:17348fee87616c898163d0610a59cd30ee6a6e6d + pristine_git_object: 25c60f4f4aaa0e0f5dd34379bd6ec92189bc8991 + docs/models/callfunctionfunctionsfunctionidcallpostrequest.md: + id: e5f44d1ff715 + last_write_checksum: sha1:8b67e61424dad26f3d558c11c6b8c30167032e2b + pristine_git_object: 8fb10d6ada5388ad60a695e7bf8f33d6164532f8 + docs/models/callfunctionrevisionfunctionsfunctionidcallrevisionidpostrequest.md: + id: "074638653196" + last_write_checksum: sha1:d083d869ed5f3ae1d2cbdbcaf2a105de17a20f16 + pristine_git_object: f40b789537977d700e6a67b2505bbc379bbd385a + docs/models/chatcompletion.md: + id: 7087ed1b2694 + last_write_checksum: sha1:553ddd850762d9ed059aa5d0827f88f47b452958 + pristine_git_object: 722ed0b3a867c606e0a67ebd58c21bb73b42f916 + docs/models/chatcompletionallowedtoolchoiceparam.md: + id: e3fed3e89033 + last_write_checksum: sha1:5ea0e0afa754a4cd08691f47935a1a6be8f9516f + pristine_git_object: e596d58c23e01bdf8c34beb121efa9387a15f3f3 + docs/models/chatcompletionallowedtoolsparam.md: + id: 9df48f583aea + last_write_checksum: sha1:cc402da0340b529dd279c4402a8be23f1d1f4938 + pristine_git_object: e636e49c850d329722f531f234721186cfc1f5eb + docs/models/chatcompletionassistantmessageparam.md: + id: 98527d311b33 + last_write_checksum: sha1:90a7f98de2bc07db7efaa39cd8858bd01764945a + pristine_git_object: bbb760e10b7bb31a5b848b179b02b6bbc5cb6df4 + docs/models/chatcompletionassistantmessageparamcontent1.md: + id: a9a9724a28b0 + last_write_checksum: sha1:eca92a66eb7d92efa1552c0691d743e3d1e3f778 + pristine_git_object: 8da3eab30d2ea7f50cd1a7c2166b933df1f827f1 + docs/models/chatcompletionassistantmessageparamcontent2.md: + id: 9f19cabf905c + last_write_checksum: sha1:3bc856d8e881084a002a76cf89f30429eee04f8b + pristine_git_object: 521d58cdb8aec9b0568a6fe18a9556c0adfc84f5 + docs/models/chatcompletionassistantmessageparamtoolcall.md: + id: 0a94ebcd8495 + last_write_checksum: sha1:f2cdff93532abdb7448306fef28c664a14bdcd5d + pristine_git_object: 3020f3fa0266317c97ce375e0e0a35253a5a0dd0 + docs/models/chatcompletionaudio.md: + id: 5db9f3e23630 + last_write_checksum: sha1:ada1f65d3703a85a7066e98d71f35a82f20e7b06 + pristine_git_object: 86d07c9f801b22543861eb923d71e9caf6690dc3 + docs/models/chatcompletionaudioparam.md: + id: b86bb9a2ab41 + last_write_checksum: sha1:a36eb2645cb0c67efe0b00c5426897a6e5d2be5e + pristine_git_object: 830cc7ef8e661687f93212f170c84a52b5945d75 + docs/models/chatcompletionaudioparamformat.md: + id: 112ed65cd0ba + last_write_checksum: sha1:ae5171c49d76946bc1d4ce27afce3a5dac4b3e4f + pristine_git_object: 3969e42ab962a90961e9bc64e87f257bf1ba4966 + docs/models/chatcompletioncontentpartimageparam.md: + id: 69396d282df3 + last_write_checksum: sha1:daff48e2a8e70a58aa7ad54e00848e97bc3f6b35 + pristine_git_object: f64910f04b1f6886e1896b33da2ee0516e8266d1 + docs/models/chatcompletioncontentpartinputaudioparam.md: + id: dbf1547a27c9 + last_write_checksum: sha1:a69617c4298f321849983f1f832ebe263026ef24 + pristine_git_object: 7e26c274860d62528ef13c644fd12a4b0548d16a + docs/models/chatcompletioncontentpartrefusalparam.md: + id: 9ffb19aaf30b + last_write_checksum: sha1:a7c340a6c6f89f5d55ee9815154dedf3a60da9dc + pristine_git_object: 661e8bf097ce696703eb7897d49142124923c75d + docs/models/chatcompletioncontentparttextparam.md: + id: ef2b6560db6d + last_write_checksum: sha1:4351d0371738d1e8a6e9919ec5959960378302e3 + pristine_git_object: 9f4c722c624882ef91cd03de5b27c4658e6558b5 + docs/models/chatcompletioncustomtoolparam.md: + id: 0c02c2d18d4f + last_write_checksum: sha1:0dc9f5f8f7e5a75403f9d7d517b27924f31f5a75 + pristine_git_object: 4d1a57e55001a5db0dbf8d7c249cf55553beffd1 + docs/models/chatcompletiondevelopermessageparam.md: + id: 2360c0c7af35 + last_write_checksum: sha1:d43d4c4daceb85c418af335a4d54ec57c14fd9f3 + pristine_git_object: b305c7e76ac511b3cd4cdc5a3f0da26de4d3ee84 + docs/models/chatcompletiondevelopermessageparamcontent.md: + id: ea72c7b474cf + last_write_checksum: sha1:3604eeefba907ed13cc0a35448248bfcf6fe82d4 + pristine_git_object: 0738a652fd4a8123a95d6114f4f3ccdde5c337a1 + docs/models/chatcompletionfunctioncalloptionparam.md: + id: 8b655d293afc + last_write_checksum: sha1:528c2a36014ab128021b75092a7816ea4a3c194b + pristine_git_object: 5187df28c435a0042341833334a2d7e623daeee1 + docs/models/chatcompletionfunctionmessageparam.md: + id: 9544be788d4f + last_write_checksum: sha1:e56d292b7c6d0402a0be8d8fec973d6cb6d9e83f + pristine_git_object: c905731d8ae192ca21df5b6782e372ca18000fb9 + docs/models/chatcompletionfunctiontoolparam.md: + id: f8029f20961e + last_write_checksum: sha1:493c824dc956f924a710568f2ce7060ced4439c0 + pristine_git_object: 4c65d1cdace9c1334a3017ec4a91514716b1c790 + docs/models/chatcompletionmessage.md: + id: 980f9e8f8e17 + last_write_checksum: sha1:56f478066e7226ded53d1929167476ee373ac345 + pristine_git_object: ca026c553f73fee5652e1f990b459ca08ac2cd94 + docs/models/chatcompletionmessagecustomtoolcall.md: + id: ace36cbf55c0 + last_write_checksum: sha1:bead36eca9519fb93d3a75756f388dd3e4c18d49 + pristine_git_object: 14fa035c5b542b2e4f7a90596dbe6bba3276d79c + docs/models/chatcompletionmessagecustomtoolcallparam.md: + id: 195cdbd15b2b + last_write_checksum: sha1:5cf66f5596ff4a6f973a67a17799c609fbd4dd8b + pristine_git_object: a072f6d1c51f9c918c3ae884e3d99d8872c8e77a + docs/models/chatcompletionmessagefunctiontoolcall.md: + id: 84d952e2b078 + last_write_checksum: sha1:d29704559b2c5f6312e4c0d66b05ccff1377d339 + pristine_git_object: 4311dca06a1b94a2b99142e97f8aa70d27b173e4 + docs/models/chatcompletionmessagefunctiontoolcallparam.md: + id: df5941860ac6 + last_write_checksum: sha1:32e41324bff0b6d363632847d75fc84cf6e1621c + pristine_git_object: f1f649670b12572c69e115625c170e9bc42dccbf + docs/models/chatcompletionmessagetoolcall.md: + id: 1f89cd3acd26 + last_write_checksum: sha1:446d70ae69ca7a3f3b9dc2b308be8bffba8e8fe0 + pristine_git_object: 00db4cd624f3d25bfae9017d9487d72515c5ad3f + docs/models/chatcompletionnamedtoolchoicecustomparam.md: + id: 84531c282b97 + last_write_checksum: sha1:b4b1c06e0b46cf9489a3e32442c651b15f3c41bf + pristine_git_object: 888fa1c04ef24e5b503a76639bbdb24993aaf70a + docs/models/chatcompletionnamedtoolchoiceparam.md: + id: 549a2b573300 + last_write_checksum: sha1:d2236196916b001d39d2020c7fc41027bad3ba29 + pristine_git_object: c0ab42a5eb2c5f50543b41d0d211aa6661b4755b + docs/models/chatcompletionnonstreaming.md: + id: 624dd746fb75 + last_write_checksum: sha1:f299da00a6717168b32d8641dd1530122953f233 + pristine_git_object: 723c8bdd280df127eeec2b3774e0e29bc6c34561 + docs/models/chatcompletionnonstreamingfunctioncallenum.md: + id: bbf7f75f7499 + last_write_checksum: sha1:0901cffbcd5896a1655b403c1e00831f2b27d39d + pristine_git_object: 0ab83cc15c8006cd7110c6b396b87d2f3a05db4f + docs/models/chatcompletionnonstreamingfunctioncallunion.md: + id: 9d0dff533aa5 + last_write_checksum: sha1:601618b1f5d9bcf8206c2a4b6b0b34513fe912c2 + pristine_git_object: aab9713c58d2836673cf4d9975daccf888bd7e44 + docs/models/chatcompletionnonstreamingmessage.md: + id: 4be98cee55da + last_write_checksum: sha1:fd64f0de34dc3c4c754b31edb52c62f8795ae222 + pristine_git_object: 30a9c4e1be4b90e96737d57701de4d985fc41636 + docs/models/chatcompletionnonstreamingmodality.md: + id: 5a63d9f4a4b6 + last_write_checksum: sha1:4a3a806b8e6ea92cfba4e83c70e7c4746112b2f4 + pristine_git_object: 60402a6e6d8f6b4ff6d6620b875941fed7efb2e5 + docs/models/chatcompletionnonstreamingreasoningeffort.md: + id: aecbe7b4f62e + last_write_checksum: sha1:9ce5dcb7895cfcf6c8fc9f643b910e709ea6dc2f + pristine_git_object: 7cd4cc30b43823a28865ec76f41395aaa923977b + docs/models/chatcompletionnonstreamingresponseformat.md: + id: 2a745e49bd9b + last_write_checksum: sha1:8feea2e9955803696f9f5774233e32354d551f1c + pristine_git_object: 0bb0c9f0c41fe379ba18bbec7181d44f434d10f5 + docs/models/chatcompletionnonstreamingservicetier.md: + id: f5b5e8cb6dda + last_write_checksum: sha1:d110a974ca7d2bb8cd9676416a39e8d861cfd172 + pristine_git_object: 0b57508e4ebdcdd7b93c342b6f000a56ceb4ee77 + docs/models/chatcompletionnonstreamingstop.md: + id: a40b074646d9 + last_write_checksum: sha1:d98d73ac285eac22e39491f1a39210515caed922 + pristine_git_object: 9f9a379bbc377a52e237200ed299a1058588ce99 + docs/models/chatcompletionnonstreamingtool.md: + id: a9e049a549d1 + last_write_checksum: sha1:7140b76a7879c5f5407de4519b8724fc19249071 + pristine_git_object: 360c02ab4cb004b123b4feff32da681e6912f54f + docs/models/chatcompletionnonstreamingtoolchoiceenum.md: + id: 589fc87db1fa + last_write_checksum: sha1:590c42f44c9f5556c5f95d0bed59fd95da8af478 + pristine_git_object: b047c82039fa2f2a314411cf498839be4c57cdd7 + docs/models/chatcompletionnonstreamingtoolchoiceunion.md: + id: b07a36649c9a + last_write_checksum: sha1:9f6fc7dcc89f4f2d0812d453401ee7da7f700d0d + pristine_git_object: 05acb3b3752de20b1e56c48e526c3046c09913b8 + docs/models/chatcompletionnonstreamingverbosity.md: + id: a3004730f610 + last_write_checksum: sha1:5874752496debb2bd5fb7000e8a8143916e7ab2d + pristine_git_object: 27cef244516fd37c75b8006bb4cbe716bb71d218 + docs/models/chatcompletionpredictioncontentparam.md: + id: 7f0b8f8ef9d1 + last_write_checksum: sha1:6d0f81516e3a8c6ddbc868105b934ae1502d19a8 + pristine_git_object: 163cac0693ffde513f13e08718550989912d1185 + docs/models/chatcompletionpredictioncontentparamcontent.md: + id: 30af0a556b3b + last_write_checksum: sha1:d0299ac28793a9cd199df109106e1212a8748952 + pristine_git_object: 1755ac339640c1479e646465af11dd8bd694d752 + docs/models/chatcompletionservicetier.md: + id: 3f8a04996669 + last_write_checksum: sha1:9953924db6b0152c465898bcda4bcdf3caee3c63 + pristine_git_object: f882b4dcfa612c4ad33cdde9e03d278c2648e8d3 + docs/models/chatcompletionstreaming.md: + id: ceff166241d9 + last_write_checksum: sha1:a7222da4d5f4e46729d4c6fac34ad2a512a8b293 + pristine_git_object: 6ddd19894dea177b1e85c3511398577418a39f74 + docs/models/chatcompletionstreamingfunctioncallenum.md: + id: e39de79e4953 + last_write_checksum: sha1:0c366e6dc7b572b0ae15412b0c20525b923a4f68 + pristine_git_object: 2c13602f01f8c5b1874a5d7da72cae356bbb519a + docs/models/chatcompletionstreamingfunctioncallunion.md: + id: 50f09e627fd7 + last_write_checksum: sha1:e60b99926b21ee6ba636f577a71fc7bda8cc810e + pristine_git_object: 7b5b2156345ea9afd94c9f865302015c8291a364 + docs/models/chatcompletionstreamingmessage.md: + id: c2a167444a03 + last_write_checksum: sha1:a9e6151dfcbca7a6180f45b3c56ac10ad8797294 + pristine_git_object: 7619ea8d255f4387b273c65f9ef77d0bc8390cb2 + docs/models/chatcompletionstreamingmodality.md: + id: 25a54c2e4907 + last_write_checksum: sha1:97f692476f66eb83fe98e86567e1998d47015822 + pristine_git_object: 2e3658bf9ccbb2d3923f902514ca0826e4147b92 + docs/models/chatcompletionstreamingreasoningeffort.md: + id: 5c0a27f2ce8f + last_write_checksum: sha1:9be600fcceedc0823d0e7000d86acb35674ab420 + pristine_git_object: 26a11788271f169ef10480dae5a505e802a2033e + docs/models/chatcompletionstreamingresponseformat.md: + id: 3a17547cbb6d + last_write_checksum: sha1:a34da2b1bd78d1c378b5c32cb08fe61df170671f + pristine_git_object: 53579616537037d7a17e18b500c65413ce9bed59 + docs/models/chatcompletionstreamingservicetier.md: + id: "684929270002" + last_write_checksum: sha1:422ba5c38a6cc0d2bbe2045dc52a43099cc0cc06 + pristine_git_object: a24962dcec3a8635cb5675b86201532a6238a705 + docs/models/chatcompletionstreamingstop.md: + id: 30723fe7f31a + last_write_checksum: sha1:654201903cd7906300b8a0fd4a7e5fa39c4b19e4 + pristine_git_object: c9b1f6b4c148d1362e95e975e7f1714798cb9a0c + docs/models/chatcompletionstreamingtool.md: + id: dd2f0cac0636 + last_write_checksum: sha1:08cb75a5e4b6631a8dd8158a6653fbf93a8f5713 + pristine_git_object: 62dc1bbbc152ccaae87543b7d727e4e135189a10 + docs/models/chatcompletionstreamingtoolchoiceenum.md: + id: 38835f198f75 + last_write_checksum: sha1:495d8790947ca343bc7632b10183a215f4745793 + pristine_git_object: 4d66180bb517084e0ccfdfbed0050b25edf3754e + docs/models/chatcompletionstreamingtoolchoiceunion.md: + id: 7118443a0338 + last_write_checksum: sha1:9ef724eed635456876895c676ef534d66e2f11e1 + pristine_git_object: 9e80af4f15a0785981623059761e82fc2a1dec3e + docs/models/chatcompletionstreamingverbosity.md: + id: a766e831c1e3 + last_write_checksum: sha1:00956be13b560e76e3ea1396684e9613b9b081ba + pristine_git_object: 173d6d3c78734f7b7d4e97677e1df5b5050ff908 + docs/models/chatcompletionstreamoptionsparam.md: + id: c1d2378c8a7e + last_write_checksum: sha1:7114b9492511b089605cfbe9c9f86f2e5ed38ed8 + pristine_git_object: db328d123456e9743ed5062e558296f9a377d831 + docs/models/chatcompletionsystemmessageparam.md: + id: 814d93e2fd52 + last_write_checksum: sha1:16200f966e540adea998e7488c2182273c51e224 + pristine_git_object: 9ea379cb74c815af4c53227143217086922c098f + docs/models/chatcompletionsystemmessageparamcontent.md: + id: 6492cc968c4e + last_write_checksum: sha1:d1bd1529419f6672efa65ed3b2c1c48dd6ab00d5 + pristine_git_object: e935d1bb4411627dfa8a4851c2d536c788016573 + docs/models/chatcompletiontokenlogprob.md: + id: f5d381ebf0cd + last_write_checksum: sha1:03f4f2e67aeaae2227d8817b5088022d2eb24bf1 + pristine_git_object: 32500a1034da408f25c32dc44c325e647e86657e + docs/models/chatcompletiontoolmessageparam.md: + id: 5ec43d87c412 + last_write_checksum: sha1:45b940200fc620543d77598ca71eb5fc083bef88 + pristine_git_object: 2e80068eaaffc7b86b24c404d66196c22e8de330 + docs/models/chatcompletiontoolmessageparamcontent.md: + id: edc0a418f2a0 + last_write_checksum: sha1:17143605dfd32853f2f42091b4c2bb7ff32b0ef5 + pristine_git_object: b3e47cd56285fc0d9870f8ac3b7736ec8fedb945 + docs/models/chatcompletionusermessageparam.md: + id: c3841976f1cd + last_write_checksum: sha1:dd5986527e5fdd2a60aa262eb29cebf0efe1428f + pristine_git_object: 9214fff6a58b3f2fa38437fa3be85655c0984a3f + docs/models/chatcompletionusermessageparamcontent1.md: + id: 9f902bc8a6ed + last_write_checksum: sha1:b4760d3f0de1fcc971bd911b78c7d8f11aec0e8e + pristine_git_object: 63bff6ce3d2193810ea645098067b09e870846a8 + docs/models/chatcompletionusermessageparamcontent2.md: + id: ae4164efb58d + last_write_checksum: sha1:d5833f3f7ade22a546a562c249cc7ada39283422 + pristine_git_object: 60b9730637d73a0335e7ffe2490bd917d938986a + docs/models/choice.md: + id: 7ccf99e7bcf5 + last_write_checksum: sha1:beecf71b95e839242770fbf40792da9f00f8596f + pristine_git_object: 6cf04aaa1e622701c093fba542b029fc1ddd8a52 + docs/models/choicelogprobs.md: + id: e1ec608a588b + last_write_checksum: sha1:f4837c0b1ca0603be37aafe5e94578b2a00b67da + pristine_git_object: cb9af3ffec72c6ee4536f95ef46df9d5e6399d2d + docs/models/completiontokensdetails.md: + id: 72beafa9a4c8 + last_write_checksum: sha1:74038789a31be3f874233c1ee96b0307c7fa00fc + pristine_git_object: f4088dffe7e0acf89d4aa850f3dd740d09d58602 + docs/models/completionusage.md: + id: 930ebf17acf4 + last_write_checksum: sha1:35c894a22cd2b80a1f6657cd77e336d7b2103b2d + pristine_git_object: cf0db588e26f766c1dbc90f2c5c32d074aab56a0 + docs/models/createdatasetentrydatasetsdatasetidpostrequest.md: + id: caa641d735ee + last_write_checksum: sha1:652c4d97faa785aea390935379fd2e6f040cbe5a + pristine_git_object: 10d4ae074ebac95a2417035f356af2325e1a9507 + docs/models/createdatasetentryrequest.md: + id: c134b1731b1f + last_write_checksum: sha1:9945774f0a2fd7d9283c80f86c8d1bf9ef938b20 + pristine_git_object: 00868d63eaf6b0370bfd8ebf3b99fddeb22bfe6d + docs/models/createdatasetentryresponse.md: + id: 4a7cac68d1a8 + last_write_checksum: sha1:383dcf2b5a30f96c8cdf995586f648c263102e4f + pristine_git_object: 93d73f9d5605ba868fe1f3bcb40d812d6b474d29 + docs/models/createembeddingrequest.md: + id: 99ae9a3108fb + last_write_checksum: sha1:24777957260c1a595beaabf07c5076b79765312a + pristine_git_object: 77f69d4cc291129f14b86d80627569506824a9dc + docs/models/createembeddingresponse.md: + id: 73a3c0377574 + last_write_checksum: sha1:0786e178bfe1df3b18a9cc222b4c4be544c9c8ef + pristine_git_object: 5d90e9033df880e85f983af13d354c0f62b5f3fe + docs/models/createfunctionrequest.md: + id: 17f5e233f96e + last_write_checksum: sha1:96081ea2f432b4a26f088f5ce00fe12493a4c25a + pristine_git_object: bfb0d83358220293bdacc180d6938cbf0f43da20 + docs/models/createfunctionresponse.md: + id: 4778b1e0ba22 + last_write_checksum: sha1:4e24ab1c15fa944a73534d79ae1ce9c938ad6224 + pristine_git_object: 9fbe8dc276e6c7cdee7d0d997c74a1658ef891d9 + docs/models/createknowledgebaserequest.md: + id: 38d1edfe105c + last_write_checksum: sha1:94c50112c57b79235face343f92f76153d38f435 + pristine_git_object: 3465e28c851cf141e2bb8de84771dd21b7f887c6 + docs/models/createknowledgebaseresponse.md: + id: e138c8a4ecef + last_write_checksum: sha1:ce1ee4c1a6b2c8c6a2682f6ec6b71426e5b8ffe0 + pristine_git_object: bf6ac5df5453df770e3e07abb6edc06bbfb687eb + docs/models/createmetricspansspanidmetricspostrequest.md: + id: fe8bff988287 + last_write_checksum: sha1:66728becdd247aa62db7231a7b9a39b5f300c92d + pristine_git_object: 7f457a205a4d9c007cfc6d86f42da31e9a5963f7 + docs/models/createmodelaliasrequest.md: + id: 3ee653ad1b41 + last_write_checksum: sha1:9ae32058de860fee4a28ea250eba06d86d48f132 + pristine_git_object: 703d0e15781669a710fab8c1377490acc5195f15 + docs/models/createmodelaliasresponse.md: + id: 3bca5ee53860 + last_write_checksum: sha1:a899fc70ee491792a8001370de262a3b58c35759 + pristine_git_object: acb43a0f4c84d0873eb22710c5dfbae2050e244f + docs/models/createspanmetricrequest.md: + id: 1eac85bb4604 + last_write_checksum: sha1:d0118751cef3843c660aa49d834572c369db2232 + pristine_git_object: 34754d36633cbcaa0d62bbff8c8875767575a5fa + docs/models/createspanmetricresponse.md: + id: 65b49a9558d9 + last_write_checksum: sha1:e5b3e2b4c4fc5068ed1c0202be22707d346d0e4c + pristine_git_object: e9606e2bee31fd9df9e1295b24340e45191e3ea1 + docs/models/createspanrequest.md: + id: d28559d1fcaf + last_write_checksum: sha1:9c8c9161fafb67a0ee209fb9f545d3c32d612e4a + pristine_git_object: 2e1c55e13749f43a3e81fdc6fd60c2a929b79b8d + docs/models/createspanresponse.md: + id: 5978894bf4a7 + last_write_checksum: sha1:45bb93bdea3ec5c753f00361115054064e25a77d + pristine_git_object: fdc49d8b08e7c2fb5a23e0a8348860907bd7b713 + docs/models/customformatgrammar.md: + id: 30681ed71a99 + last_write_checksum: sha1:02910a7f8961d94c667001098823dd97d2c321cb + pristine_git_object: 54040e240c6cccbf5193d7523989177ad516af84 + docs/models/customformatgrammargrammar.md: + id: b17337b48814 + last_write_checksum: sha1:4ab51db7676add062090947edc1339f5f172e3ff + pristine_git_object: 27ee5f9583cf4c8392361e745c78821933408e0f + docs/models/customformattext.md: + id: 98a8bd96a4f8 + last_write_checksum: sha1:0e22714e15033ef5c36ee62965ed77496a7984d2 + pristine_git_object: 86ec7bf744eb4bfd8693d4ea95e07851e3a5c0af + docs/models/customoutput.md: + id: 25722fc766e6 + last_write_checksum: sha1:ea0a910c59a2e03d5eb7bb1dbbef6bd48f1ddbad + pristine_git_object: a1e4ad50f4c5be56a6b2eacca507bd49afabf367 + docs/models/deletecustommodelmodelscustommodeliddeleterequest.md: + id: e6769df83c35 + last_write_checksum: sha1:fccca344c37fff51a0babe4b3ffd3caba9888b90 + pristine_git_object: 9950d8ef27045287f06add08789e0bbd77ad681d + docs/models/deletedatasetentrydatasetsdatasetidentriesentryiddeleterequest.md: + id: 10911012d7cd + last_write_checksum: sha1:4522c111ecdc4f540bd7c1835d01f7964ddc1d6a + pristine_git_object: db450f3e2bbf9008c9a8f61e4ec1eb117f5d8480 + docs/models/deletedocumentsknowledgeknowledgebaseidquerydeleterequest.md: + id: 47af40ee54b2 + last_write_checksum: sha1:7211b7de27eddf8bb88a679ed66af986aa6df281 + pristine_git_object: a4e38c6e5192500b5fa8e946350c9c477f26927d + docs/models/deletefilefromknowledgebaseknowledgeknowledgebaseidfilesfileiddeleterequest.md: + id: da0923a61ac7 + last_write_checksum: sha1:09bd080232b57793fe7e5e62d80e107042b6331e + pristine_git_object: 6a715b81a9cf8c9e1e14e2b409c9e376203d7bb4 + docs/models/deletefunctionfunctionsfunctioniddeleterequest.md: + id: 6d65284e35ce + last_write_checksum: sha1:1fcbe5747331b34db6274a550b272ebddd630289 + pristine_git_object: 1713ac8f7d2eb4731348059c6560ac612cd67b1f + docs/models/deleteknowledgebaseknowledgeknowledgebaseiddeleterequest.md: + id: 7289de2eb543 + last_write_checksum: sha1:7f227b6703e7126619173e609c62f6f433bc3051 + pristine_git_object: edc5d1ab4c758e4145bed49eeabfc729a5ba6966 + docs/models/deleteknowledgebaserequest.md: + id: b5d4f9004c57 + last_write_checksum: sha1:ac9c38d01b3dd9664fd98cae5bdbadeefcf23e30 + pristine_git_object: 06d58b8414e0ea6dbff24d48dc64a3a8c29d387b + docs/models/deleteknowledgebaseresponse.md: + id: 8f3723cfeadf + last_write_checksum: sha1:e9f03463e292f28f8e9eee3b342ef1118c32dcca + pristine_git_object: 7cb92e2ab0c55d64a3a51facde3617967832eac4 + docs/models/deletemetricspansspanidmetricsmetriciddeleterequest.md: + id: fc4df38266a5 + last_write_checksum: sha1:909a095e850cc4a88ffca5f0ce084a11ede325a9 + pristine_git_object: 80ab113d3955776b3a6fcca255d2d72c61db3bd2 + docs/models/deletemodelaliasmodelsaliasesaliasiddeleterequest.md: + id: ce79c40f5515 + last_write_checksum: sha1:cd27df8b5f3fc467ce5bbde8ba50883e09fd0807 + pristine_git_object: e05102b62f6a9b56f0676020c8caa76865043ae9 + docs/models/deletespanspansspaniddeleterequest.md: + id: 1f7945022ede + last_write_checksum: sha1:477c330df7fc3fac5932728399011284af71d5c7 + pristine_git_object: add8747f7f3c4175f4b203fc4c626751bddb204b + docs/models/delta.md: + id: fa3dcef60989 + last_write_checksum: sha1:76f701defa4b2afc612cf119089191db4793ed84 + pristine_git_object: 074dad1b228e3379ccc3a74d8dd9cb96e98c0805 + docs/models/detail.md: + id: bfd4e327e742 + last_write_checksum: sha1:3afeafd1f12678863293b58d1a84f7c59200c64e + pristine_git_object: e58f19ac9f104e2f8a6402e5fb1a4d17d70a8152 + docs/models/example.md: + id: b159060b04d6 + last_write_checksum: sha1:2bd5ebb99f25ee0ccc9d0ca31de423948806dbb4 + pristine_git_object: 77bbcfd6d62bc9a9dceccd1e87c9b582141fbd14 + docs/models/examplein.md: + id: d030e5625a8f + last_write_checksum: sha1:06bf10f849a93209202cbe20c3f95101a58e012b + pristine_git_object: d907a1b251d398c285ebffbe592c3b35b79ff9a3 + docs/models/feedbackinfo.md: + id: ad9b15333cc1 + last_write_checksum: sha1:834bc734760c27223e0ad357e4d2edd876205036 + pristine_git_object: 0768d33d7823f34e9f08b16dc9d801d22e292cfd + docs/models/file.md: + id: 4ad31355bd1c + last_write_checksum: sha1:291841b4f15d8a46129db9c9ae0a2f4e5ad491a3 + pristine_git_object: 27021c221d0b94262c77c67a922076850430f9c2 + docs/models/filedownloadurlresponse.md: + id: 97df29e75ce1 + last_write_checksum: sha1:d7e33770a94f3327e271f36dd27b1fcaefd97d9a + pristine_git_object: 7a63556248bde495f0208a181135c42f5984f391 + docs/models/filefile.md: + id: 7bf4acd4b65b + last_write_checksum: sha1:03d0fb0f739c4bda0dfb107b317b8bbc706339de + pristine_git_object: 0723307d28b06032e6d3be3610e6777a4ec4e800 + docs/models/filter_.md: + id: 5c3da70f78a7 + last_write_checksum: sha1:4d45790af5daa45d84a09c234b117d1f9480a217 + pristine_git_object: 992d47ebc5820fc55418ab66ec6923154c2cae58 + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:156b8529a5519d67d0b0a92660fb24abff246466 + pristine_git_object: 0e5048c90782bc45bdc483777b4a608598e6392d + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:447cf608b54a26bf394ed7ead57bb8f7198abf76 + pristine_git_object: a082047e831951ff6183a8add3f53b49a7aa032c + docs/models/functioncallconfigurationinput.md: + id: 03f6e02c39e4 + last_write_checksum: sha1:638fe50dff24511cf09a078ddaa24ed5bded4033 + pristine_git_object: 9063f6a0809387a24454f6f1d7d6320bc84ce2c3 + docs/models/functioncallconfigurationoutput.md: + id: 561cd1d617ac + last_write_checksum: sha1:f074b57f04a321112ec6464e03d89577532d0f43 + pristine_git_object: dac7c47aa3a8854e125a9a6c6dd0339e543f39ab + docs/models/functioncallinput.md: + id: 4691d850186c + last_write_checksum: sha1:e53984330acb9c989cdcf66972cf3d832dcf9610 + pristine_git_object: 6c72fa20d5cd750ff3e9f595c6f4b990d447a5aa + docs/models/functioncalloutput.md: + id: 924916e779e8 + last_write_checksum: sha1:68ce27e3f43f52a929afc42fd884b4ad5439e8e6 + pristine_git_object: 9d2fa403957af171fa62dccc5d03be600b248b03 + docs/models/functiondefinition.md: + id: ad3367fb72da + last_write_checksum: sha1:370218bda35dba8936a69c45bde9ab586d951b3e + pristine_git_object: fe29e746ca65d52a865cc6d1b142ceea0c5f9a49 + docs/models/functionoutput.md: + id: d9a009ccdb40 + last_write_checksum: sha1:3ee01398f9409d21f28c3fb9aa332da4b541d81a + pristine_git_object: 183db9f0bf511ba67e7e34b834b61f5686d383f7 + docs/models/functionstreamcallstreampostresponse.md: + id: 114ab6484293 + last_write_checksum: sha1:943ab1814781fd5f260d7f8a74f31facf217ff3a + pristine_git_object: 45917b59080b369f24594b36c6edca646c549525 + docs/models/functionstreamcallstreampostresponsebody.md: + id: b94a3c888e55 + last_write_checksum: sha1:b525938fc1633531fcb9071bde1e6da968742c3b + pristine_git_object: ee5f4463c8b21901f934aa0ff0c08f11cb93f720 + docs/models/getcustommodelbynamemodelscustombynamenamegetrequest.md: + id: 6a3c61479389 + last_write_checksum: sha1:925cf4b6bbde50011bc2d8bdd87c65aea94b0d73 + pristine_git_object: 02a85afd7185bd5344f31438647784300795a442 + docs/models/getcustommodelmodelscustommodelidgetrequest.md: + id: 21d0ac5286e5 + last_write_checksum: sha1:6db6b08bd700a8f8ea9587bdb8c7ecaa43f25025 + pristine_git_object: 8090e7e261fc176cffd2de3715f62f113e2427fa + docs/models/getcustommodelresponse.md: + id: 387b880f5c2a + last_write_checksum: sha1:317b6a111eb676b2735189c5cfd3822321d8d6a1 + pristine_git_object: 7208c79e780802f94ef2818605f632358ae7fde8 + docs/models/getdatasetentriesresponse.md: + id: 417801b8b763 + last_write_checksum: sha1:8b545273ea5ce2f698f791ee8c553c2eb2b049fa + pristine_git_object: 547a5fa1ef9b6f6f8322ca2a75bfb97c2f2531de + docs/models/getdatasetentrydatasetsdatasetidentriesentryidgetrequest.md: + id: 59ea3cb89ad4 + last_write_checksum: sha1:3d0cf527eaf20d602d261c708f377cc6e2d1a297 + pristine_git_object: 49a05f13e940882bc6dcc725d65033b04c5b42a4 + docs/models/getdatasetentryresponse.md: + id: 9cd31c6d59f2 + last_write_checksum: sha1:75e3ebaa43548875ed5c0983200c56f89f397fc2 + pristine_git_object: afd8d2679e228cbd0af5f96b5b0a56cbf3838b86 + docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md: + id: 5c69cdfd7ce9 + last_write_checksum: sha1:71bf15e3ca8cc014a0592559e998579ad76ffcc4 + pristine_git_object: 99ba8b2a8a6f5321160d04d7c4decba5aa6c37ef + docs/models/getfunctionbynamefunctionsbynamenamegetrequest.md: + id: 772fde66e6b9 + last_write_checksum: sha1:00060375b9bb808d1ce0b9d7fa05a41ec76752b7 + pristine_git_object: dcf82561b7fcd6675c67d62e6f55e8eb8adfc2a2 + docs/models/getfunctionbyrevisionfunctionsfunctionidrevisionsrevisionidgetrequest.md: + id: ba777cbb395d + last_write_checksum: sha1:dd86ea2303fa398147da38c80b6d2bcb7beb06c5 + pristine_git_object: e0a313d38a16af758f21a4fb1d25c36a6a7112e9 + docs/models/getfunctionfunctionsfunctionidgetrequest.md: + id: 6675961a7787 + last_write_checksum: sha1:f51715a6221e84dc634a316908f4cb66df9545c8 + pristine_git_object: 96e2f113948bc0d4ebba75f95c5710872ddeeaac + docs/models/getfunctionresponse.md: + id: 10cc62e022f7 + last_write_checksum: sha1:9296cb65394904c8d87e02a9850f7d58235bcc20 + pristine_git_object: 2139f14b456be8b624ba8dadcc3d8ec61a20cc94 + docs/models/getknowledgebasebynameknowledgebynameknowledgebasenamegetrequest.md: + id: f7fc58df441b + last_write_checksum: sha1:1baf96445fc20d1bf6418e4f5c5b8524eb9015b1 + pristine_git_object: 1b5b2b358376ab4e56d7e3a0cf3129bd534ca62f + docs/models/getknowledgebaseknowledgeknowledgebaseidgetrequest.md: + id: df7d0ffee9f2 + last_write_checksum: sha1:a618077439b3bd0f3288dac73a3135b9c375826f + pristine_git_object: afd4cacafef15e307405d9b1ca075590263c6cf3 + docs/models/getknowledgebaseresponse.md: + id: 37a25ffdd460 + last_write_checksum: sha1:ea9aa956bbfdaa993c67816915fe500dfc1d7294 + pristine_git_object: 11162fdd4c460c4c223f0966fe0df64eed24330e + docs/models/getmetricspansspanidmetricsmetricidgetrequest.md: + id: 70d89cd703e9 + last_write_checksum: sha1:95c7159e6c894ef8463c1e2ba832983790233cc1 + pristine_git_object: 6dfce307427835c216c635d26cb69209af06bcdf + docs/models/getmodelaliasbynamemodelsaliasesbynamenamegetrequest.md: + id: 2f5b529f96a1 + last_write_checksum: sha1:2e91aaed10656e37eccd5b53f708ad6bf76f69ad + pristine_git_object: f817c0cde3b179020a8a5ec2cf27761d2a5e48e3 + docs/models/getmodelaliasmodelsaliasesaliasidgetrequest.md: + id: 6a74a8be190c + last_write_checksum: sha1:7b72632264335e2fa49979605fe8063491ea191c + pristine_git_object: 08c96c5eea7950b05c6f2ae7d500afdd9d891461 + docs/models/getmodelaliasresponse.md: + id: 84ac30a4273b + last_write_checksum: sha1:fe58afb08fed0bc344985a91b0b0b138f10ed0f9 + pristine_git_object: 36c7547c840914609dd940766b69b3c944e4745b + docs/models/getspanmetricresponse.md: + id: 768693ec2866 + last_write_checksum: sha1:446770f74d744491dcdf3afd1e26c0deb17b4784 + pristine_git_object: e43002e8f64184154e160779a063949482e916ad + docs/models/getspanresponse.md: + id: 0cf783fa97e6 + last_write_checksum: sha1:2352434878fcae119b74e5600e6eb8c417a5a15e + pristine_git_object: f3117c5482d4bfd01e57601ab533c09e451ad388 + docs/models/getspanspansspanidgetrequest.md: + id: 499092a040cf + last_write_checksum: sha1:47ee7eada172d0c830be85dd4c007f1667326330 + pristine_git_object: ea787c051133fd46b550143632cc774e29356d80 + docs/models/gettraceresponse.md: + id: 0479ab4aabd9 + last_write_checksum: sha1:dec0055f7619ac8b53c2375608e499bb5115a888 + pristine_git_object: cb7c2253d5dc1f9e9b1da75f4005377444a13a31 + docs/models/gettracetracestraceidgetrequest.md: + id: c197d7137d4c + last_write_checksum: sha1:aa73ac5cb2545167748d7834bf8682b7dcffaaed + pristine_git_object: 074c0f5d3f7b3ab13f1757d454425036febdde51 + docs/models/getuploadurlknowledgeknowledgebaseiduploadurlgetrequest.md: + id: a483c6e27d18 + last_write_checksum: sha1:6fcb68fa1215a443ba1ae44d291906368da27a2f + pristine_git_object: 53133d3ae15cac344a5645779272eb43d2a38c49 + docs/models/getuploadurlresponse.md: + id: 24b8eef8ba0a + last_write_checksum: sha1:a2e2447baa970daad7d75d45d46d5b36c590114e + pristine_git_object: 4224ac1a52e73ad1f4ad897fe12efc240c7076fc + docs/models/getusageresultitem.md: + id: 723762cf2df5 + last_write_checksum: sha1:2b984a48c04ad860002b64477f7a70523d1db5e8 + pristine_git_object: f9b3622df7ab80bbcb6fe2ca42bf0867e09ffad7 + docs/models/granularity.md: + id: 3cee51c03dea + last_write_checksum: sha1:505b3e103207648b70a7db3f710626c35536784e + pristine_git_object: 9f02dd12a54f62d3cf29d634735302071191af0d + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:bc66eb53f9544d4f0b82696f2f5bd84303a3aed1 + pristine_git_object: 2370dabf004a140dbb7095bdd086be566e28b08a + docs/models/input.md: + id: 5cbc446a3956 + last_write_checksum: sha1:fa54fc8e835bcf17a197f7414a3baf36e719c743 + pristine_git_object: 48330b2739e86aa684840baa5757bb41080ae5ba + docs/models/inputaudio.md: + id: cf8d2935b1e7 + last_write_checksum: sha1:549380afc7900870ecbcd4b3ba7a46a322baf6c2 + pristine_git_object: 6ce6295457008f40f87a79ca41242cc6a197f837 + docs/models/inputaudioformat.md: + id: dbd07924c0ae + last_write_checksum: sha1:e471abad962cee9a794ad63e8af5db18f3661077 + pristine_git_object: f764a30d09f822ff24c4fcbb80744b66f2a67da6 + docs/models/jsonpayload.md: + id: aee262eb3554 + last_write_checksum: sha1:c1f31a4bc47a973e01e14d75580336d1b13ecddb + pristine_git_object: 0d6a623c45a09215fbb07cd0bea1645f1d3f0908 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:68a740289d5d53f3abf1645654c78fe89bbfd2a5 + pristine_git_object: cc260b2daad440e24b3700ea11a6ef6e30d64090 + docs/models/listcustommodelsmodelscustomgetrequest.md: + id: 539ef9ffa36a + last_write_checksum: sha1:4204ac844c3171798497cf3f3cb14095cd6e9219 + pristine_git_object: ad57b3066ca317ce01159a256df8a1ae80148181 + docs/models/listcustommodelsresponseitem.md: + id: 35c270b9763b + last_write_checksum: sha1:fb5cda79e4d43d7dbaceb0a56b8c517047ec6f93 + pristine_git_object: d27a2137ca0b4cbe832b003e55d7aeb99e7f3c7f + docs/models/listdatasetentriesdatasetsdatasetidentriesgetrequest.md: + id: bb26e4325965 + last_write_checksum: sha1:a23fb208bcfe4b0025b6de0ba84c84190388d3ae + pristine_git_object: 93656e16d2441973abb50655507fb2fbe1408193 + docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md: + id: 8ee61dd2b93b + last_write_checksum: sha1:1073b2d0e8dc4fe7c4decf127f4c52966b366096 + pristine_git_object: ba431df747767f3893c5b5f88584c3e51ea108a0 + docs/models/listfilesresponse.md: + id: b15df90d2d59 + last_write_checksum: sha1:83609c74a598bf2d842bd5654a9087821afea645 + pristine_git_object: e63c4beb9e0217a140e827095fb34931707d762e + docs/models/listfunctionrevisionresponse.md: + id: ee30ecda6dae + last_write_checksum: sha1:18188ae258e23ece5e556d7bc8a7b020ca9d7f76 + pristine_git_object: 4ee36eeb592f1ce8752b327ab225fb02093eeb3b + docs/models/listfunctionrevisionsfunctionsfunctionidrevisionsgetrequest.md: + id: 5e145876b206 + last_write_checksum: sha1:d10cea680c9079e87a281a670bd0d13ff589ae99 + pristine_git_object: b194eafc6548446ee3345f781647afcc3533c136 + docs/models/listfunctionsfunctionsgetrequest.md: + id: 52034793426a + last_write_checksum: sha1:32f03ae782b832f6b421e2ce1acf69f10d27e6b8 + pristine_git_object: dcb2de5116ea1b1929d89bec93bd4d7e8c157b85 + docs/models/listfunctionsresponseitem.md: + id: 924f5235d957 + last_write_checksum: sha1:1dc7f1082568f8baf2f1d55046fb1fbb37e0d014 + pristine_git_object: 6e3708f86be3387b1ad47ff0f5dbe02b068daa0a + docs/models/listknowledgebasesknowledgegetrequest.md: + id: 7a7be63b8564 + last_write_checksum: sha1:1a3b18611c232f0c5b0acdfd91f1971c3fed8ec1 + pristine_git_object: 9cbf2354216450a4e0c76890974f1a1edacbf9ee + docs/models/listknowledgebasesresponse.md: + id: 6a43a5968963 + last_write_checksum: sha1:e1252940e1f5765c98b20e03a91542b54af5e846 + pristine_git_object: d6cf113f64cdec797ff3ff588d25e202351ebf22 + docs/models/listlanguagemodelsresponse.md: + id: f00dfaac53d8 + last_write_checksum: sha1:0dd3ee77c5d5ea5e1aea62f5c978a18e4c65619c + pristine_git_object: 8be5dc2078686a7d8c09d3db200f2f31cf4059a3 + docs/models/listmetricsspansspanidmetricsgetrequest.md: + id: 960f90e86f57 + last_write_checksum: sha1:c583cd64f078c65ca2614b048351387cf9c66264 + pristine_git_object: b9f73995f74310b25476e29378ccf5ae71a893ad + docs/models/listmodelaliasesmodelsaliasesgetrequest.md: + id: 2e4089438f50 + last_write_checksum: sha1:3b3ab2d5a4c2cdbd8a6a1fb6e36bb0c91ca9b44c + pristine_git_object: e8e1a96577eac6eadf747309710b97b5274ef596 + docs/models/listmodelaliasesresponseitem.md: + id: 7a1ace15b4bc + last_write_checksum: sha1:8bae77a3aa27d9c8480999f1b0028b859cb68a37 + pristine_git_object: 0c9732dc331b8c1ade38aa1b21fcaf29165b935c + docs/models/listmodelsmodelsgetrequest.md: + id: 9a94dd75fdb1 + last_write_checksum: sha1:72fc79ee5fbb6ca4441b4aa235238ee80e5c62be + pristine_git_object: b05da7e8f2fab7135d08bc34f61be092cca65272 + docs/models/listrerankmodelsresponse.md: + id: 578644c59744 + last_write_checksum: sha1:9e71c14473ac611b8c624ec7d44ed70d91390f1d + pristine_git_object: d19de002922a050dba77ba0a11caa17a4aa0cd07 + docs/models/listspanmetricsresponse.md: + id: 80904a67ea60 + last_write_checksum: sha1:ecbe74a9856be02672e20261f989cb2647fffc94 + pristine_git_object: 60531846dce199103b14a47f8ffe9b43b185621e + docs/models/listtracesresponse.md: + id: f54f6c7c6d3d + last_write_checksum: sha1:1e3162a7322ffdec72c425af69403854e458602a + pristine_git_object: 36a097db330e42a7410de2c7f36953602d5c9d01 + docs/models/listtracestracesgetrequest.md: + id: 11d5ba20257c + last_write_checksum: sha1:6feec6e51d473eb1aeaa72b46a295335927bd808 + pristine_git_object: 94bc0a810256eb7ae4e3d4a601e334fe9d7da85a + docs/models/meta.md: + id: b5731935642a + last_write_checksum: sha1:69e61ea1841eea4cba42a1082b605e3f6d238a30 + pristine_git_object: 384e865e817523c40b1b2ca14a6f25be8e44a53d + docs/models/mode.md: + id: 568581cb28bc + last_write_checksum: sha1:5d57d8e2ec5ad4f3dbbdd0a0549c93c3a7388877 + pristine_git_object: c15869f0c7fb01cc79f263acc58ef86a126c9cea + docs/models/model.md: + id: 66e0236ac289 + last_write_checksum: sha1:6033c542fa2d11f322aa28011ccddeaa5463ec55 + pristine_git_object: 590201641aecf5cdb88bac54efbd51b0a7c3d768 + docs/models/op.md: + id: 4f953e6b890f + last_write_checksum: sha1:1c29e5e6f033f7f85e92487edef2fe8044b7b6b8 + pristine_git_object: 9eb5b8d10c0aaaf9e68d65ce09bc5119f30a157f + docs/models/openaitypeschatchatcompletioncustomtoolparamcustom.md: + id: 1de782f78209 + last_write_checksum: sha1:861f4d45a56d55332e3d9bcecfa9c8ecd3eba14b + pristine_git_object: 49f06bf1b0d1e0e16fa91cb29da3fb3ba805874d + docs/models/openaitypeschatchatcompletionmessagecustomtoolcallparamcustom.md: + id: 410a88ff8ffb + last_write_checksum: sha1:7aa7c545148a7b64c4267209b1577040b4059d4d + pristine_git_object: d9f17aae9c72376080b0d69d65803be6566fb4ee + docs/models/openaitypeschatchatcompletionmessagefunctiontoolcallparamfunction.md: + id: e4ecba7535af + last_write_checksum: sha1:5dae27bd63424a6f2aa6479601bf8492d5fbc8cc + pristine_git_object: f547ca3c5f676550d5621bd871747564e05bf5f8 + docs/models/openaitypeschatchatcompletionnamedtoolchoicecustomparamcustom.md: + id: a49a4f65e52d + last_write_checksum: sha1:22cd64b056f864159a14e0bf574fb8c2fa221ac0 + pristine_git_object: ab616a90eccd7ab53bf32a2f72612c1c18ee59f8 + docs/models/openaitypeschatchatcompletionnamedtoolchoiceparamfunction.md: + id: 128ec65eb634 + last_write_checksum: sha1:8fe762b45f728ff890b290672c6031fabb35d272 + pristine_git_object: deebc36b061bda33d24d88608db957e61bc72a88 + docs/models/openaitypeschatcompletioncreateparamsfunction.md: + id: 0aa998a8e295 + last_write_checksum: sha1:579fb0f87dbb6d89aa9d78334f50629220c24f4a + pristine_git_object: ce2c9f492543ed74e5177e4ae6d4e2d18919b14c + docs/models/paginatedresponsegetdatasetentriesresponse.md: + id: f3d1e4810f8f + last_write_checksum: sha1:38b6c3e961826cfda9388fcc6caa95d34a14000e + pristine_git_object: 5f8132ba6bb60bca75fd9121509e6759b8adb377 + docs/models/paginatedresponselistcustommodelsresponseitem.md: + id: 015fdf9e612b + last_write_checksum: sha1:7862fd5073f655e6204e569ab8340a6e7eeed846 + pristine_git_object: f2123979149d7f62cfa7f7f1586f302f0dc9cd5b + docs/models/paginatedresponselistfilesresponse.md: + id: 568ff4f5b23a + last_write_checksum: sha1:53e13493b52407cb4628b4f365ef2e8f1005ea23 + pristine_git_object: f552e0b8328e90f1637580ed128546c0df017152 + docs/models/paginatedresponselistfunctionrevisionresponse.md: + id: 96155dee90e3 + last_write_checksum: sha1:0c6e67d5c1693bdbf7db9eb94337ba44c50428d8 + pristine_git_object: 5ae80e24e4934345a1812bddca7fbca1bbc636ce + docs/models/paginatedresponselistfunctionsresponseitem.md: + id: 0fd78c8f0e9e + last_write_checksum: sha1:5d3208b18cbe2e80c544b9209dc80797233b079e + pristine_git_object: 746fec87c361c6a1d547a88ccbae5b8501c65b1e + docs/models/paginatedresponselistknowledgebasesresponse.md: + id: 5ac660426289 + last_write_checksum: sha1:7cc6e9d2ff81f5fa3ae1b403e6469a44a4189553 + pristine_git_object: 437f7ea2d591430aa87e91cd09da3e088f6babed + docs/models/paginatedresponselistlanguagemodelsresponse.md: + id: 39a7a72d65a3 + last_write_checksum: sha1:42dd1db03931e12da1aed61d7232648d2b4fb150 + pristine_git_object: beb05b6d643f65696ed9b12584742757186915e3 + docs/models/paginatedresponselistmodelaliasesresponseitem.md: + id: e8720293b148 + last_write_checksum: sha1:311c1dfee790fcb0fe782e4021e6719e7c2022bf + pristine_git_object: 9728eb8cc3e4fc88074f45552336eb052a3ec559 + docs/models/paginatedresponselistrerankmodelsresponse.md: + id: e1b1ba64d278 + last_write_checksum: sha1:5feb5f7d0335b0f3d3c316231b249bc79b6d8f0c + pristine_git_object: d41f9c08cfac156c3a12551ab923bd5e8c3e687d + docs/models/paginatedresponselistspanmetricsresponse.md: + id: 6d78c6b09387 + last_write_checksum: sha1:42ad1bef22b313377a20c7f383b378f38322be1b + pristine_git_object: 1c3cc5b21543f708fdaff4a57b2245245ef7b75c + docs/models/paginatedresponselisttracesresponse.md: + id: e0adde8f7815 + last_write_checksum: sha1:f1eab81d36a2dfc00f20b3c98d57792cb878cc61 + pristine_git_object: fcf64215eeaf22b176bc460397d9f9dd4925d030 + docs/models/payload.md: + id: cd93a0338683 + last_write_checksum: sha1:c31dee79cef5ccd76e93830dd6514e4a90aba392 + pristine_git_object: 6f249a0ebbb9aac8b71158abd7df48763d23fdb8 + docs/models/prompttokensdetails.md: + id: 1391892fce0a + last_write_checksum: sha1:c0b36fbf04be8adeba5aa1894013d219fe9b280d + pristine_git_object: 29041b25f5d11404a0938ca4f72be7e19d820ed7 + docs/models/querydatasetentriesdatasetsdatasetidentriesquerypostrequest.md: + id: f4d9a2046064 + last_write_checksum: sha1:e1cfe4a0b1fc73cd8b92a8135149a54ec802e24b + pristine_git_object: d1f50aff1b132580bd242b7de570689e5db5f316 + docs/models/querydatasetentriesresponse.md: + id: 05ce0dd5285d + last_write_checksum: sha1:08ae7c669d2b9fd4c05cac58ce7963c16c8b001f + pristine_git_object: c23a3fec63905fc2fccf612b6a8d626b77a0855d + docs/models/queryknowledgebaseknowledgeknowledgebaseidquerypostrequest.md: + id: e92c1c9e3f5b + last_write_checksum: sha1:14ffcabafd14502530e4a8f6e7a0c8731351e82e + pristine_git_object: 1c45ade0fe2dd008f074781c548627c2970046ea + docs/models/queryknowledgebaserequest.md: + id: fb565f0e41ce + last_write_checksum: sha1:f5a4663c33540e4d457f42fff6a3f81147a68283 + pristine_git_object: 07572d72c10eddf0fb4dd049e80c1b7e68723783 + docs/models/queryknowledgebaseresponse.md: + id: b08d4a8418f9 + last_write_checksum: sha1:7d0b098641a6b9fbe06e7451649bde2f99b18d68 + pristine_git_object: 6d9d0ae5812c834d119bc1a27b182282164ca117 + docs/models/registercustommodelrequest.md: + id: 47bf493498a1 + last_write_checksum: sha1:0794ee1b0e681da7464ff5c9c395e018b1f645b8 + pristine_git_object: e964c661662fdd907fed7a2e3ffb2452673d7029 + docs/models/registercustommodelresponse.md: + id: 171aa4c89a04 + last_write_checksum: sha1:d875389fb5348baa0894a7999d2d0d58132cd989 + pristine_git_object: 92118eab81b50ed41d763043bdddf4acb41f712b + docs/models/registerfileuploadknowledgeknowledgebaseidregisterfilepostrequest.md: + id: 7318c7826512 + last_write_checksum: sha1:0d0c2b584cfccb9fd9d72e97c7a4a9fb61ce774f + pristine_git_object: 4b08cbbfcf9b1d47bb3a9c12aebd700d91208fec + docs/models/registerfileuploadrequest.md: + id: bb0ff9105db7 + last_write_checksum: sha1:76dae10cbbcd7cebbedd0199760f93c33d758753 + pristine_git_object: 756f9c3416ed6734b8a4b5b4ddcbdc750d8b5fbb + docs/models/registerfileuploadresponse.md: + id: 8b81beb8968e + last_write_checksum: sha1:7a2b89e35c646f86b91f1768695754c3341e73b7 + pristine_git_object: afdc9e10123d58775d714f708686f378b803c3fe + docs/models/rerankcost.md: + id: fa70c9151795 + last_write_checksum: sha1:2fccf2d7c052251d9c42ff4fcd7c2d9a81519ad5 + pristine_git_object: a0b8280d49e89f3ad2c95234f81f5c06651f56b3 + docs/models/rerankdocument.md: + id: 150f43082cdc + last_write_checksum: sha1:a39aaff695fcf6a8d450df8f4d66fab4061afda8 + pristine_git_object: 17ebc0ddf0245edca8543278519bce3733102a31 + docs/models/rerankrequestmodel.md: + id: a81d0924938e + last_write_checksum: sha1:4c95c31324cb790cc2992b40df11243ac19085a3 + pristine_git_object: db623f44641692cbaa5700a7d89b7688ea66fcab + docs/models/rerankresponsemodel.md: + id: 3eed549c98ab + last_write_checksum: sha1:958f015acdd309b638ee31e2b82d9e38451d30cd + pristine_git_object: 9815e1b95aefac92e8342c1f7fd0d203431d944c + docs/models/rerankresult.md: + id: 5568aa44979c + last_write_checksum: sha1:a96f150004292170e64708492c9f53497aa409c4 + pristine_git_object: 1002ed7f6562d7a1cb8aa1fc11fdcc46fad01d0d + docs/models/responseformatjsonobject.md: + id: 83559bf262cc + last_write_checksum: sha1:b90595a4e17f1f7610f045695602fa1452a4f257 + pristine_git_object: 88c50c40338610845c89483a3bfd60c26be51c65 + docs/models/responseformatjsonschema.md: + id: 3a7c34c63fa5 + last_write_checksum: sha1:0c577f1c32a452c9e5a2d886e38883e29937df42 + pristine_git_object: fbf8963010bb56337bb939ef3ff7c53bd03f7df4 + docs/models/responseformattext.md: + id: e80abf091260 + last_write_checksum: sha1:744e75e6f397b02cb1bc5cf1062cedd505de7719 + pristine_git_object: 1b66a7882630d2bc87a4af63e46d2d6eed651a51 + docs/models/savetodatasetresponse.md: + id: 2f5ea3c90a84 + last_write_checksum: sha1:e4d291aa7c9185a6d857743fc165b3f04622cb8d + pristine_git_object: 94b9554fdc278629707fcc4ce7147f9532c113e9 + docs/models/savetodatasetspansspanidsaveexamplespostrequest.md: + id: 7641556fc14e + last_write_checksum: sha1:71b182ddb984b7724e90d6c87f8f051fb7606888 + pristine_git_object: ab2dd0702da487ce57b3320054cc3e06bedaf770 + docs/models/searchcontextsize.md: + id: 67fccca6a74e + last_write_checksum: sha1:8638cafa2793ea6cf564e261c4c41749d2c6f3b8 + pristine_git_object: 32aeaebe48f2e1332253d19e8958a41125c34800 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:5ed85c83f7d7e9d9d82d54b786ee87391f299eec + pristine_git_object: 1a015367b5d2a15d68cb3ef129ac36303bb16c9d + docs/models/spandata.md: + id: fd287ec5035b + last_write_checksum: sha1:e483188da40387342d60d38dc6d6534fa20eb4ef + pristine_git_object: 54bcb9c544436d16cc1e6bac89866c32d82a5aab + docs/models/spanmetricdata.md: + id: 4d20ea29d518 + last_write_checksum: sha1:42aa254d3d6b5ba2fe4c3106863391d57a01048e + pristine_git_object: 3f3d6c16efd4cb21552da3bdc872579ff955e2fe + docs/models/spanschema.md: + id: 012f0324df98 + last_write_checksum: sha1:3d80aeca0dc25772561e0bb96ff9f7b72408c717 + pristine_git_object: e3728e88258b3a2836aa2e062486dda975c49f1b + docs/models/streamfunctionfunctionsfunctionidcallstreampostdata.md: + id: a1303451149a + last_write_checksum: sha1:3de7021a957e7bec147a8c68c2c3f205671732be + pristine_git_object: 628b34e65531554ab3df7165f635576c3fb7a7a9 + docs/models/streamfunctionfunctionsfunctionidcallstreampostrequest.md: + id: 318525b88e43 + last_write_checksum: sha1:997c77cf68e100fac21399db52aaa2e8c95a4c02 + pristine_git_object: 21feecee73403c72b5b4b6600a5fcf3949b88b1c + docs/models/streamfunctionfunctionsfunctionidcallstreampostresponse.md: + id: ab7114f14540 + last_write_checksum: sha1:0b9873eb99f66a23f8fb6ce77d6c0abaa5e92e65 + pristine_git_object: 616ca25e521b95ac5cedc623bce11ad9f658aa26 + docs/models/streamfunctionfunctionsfunctionidcallstreampostresponsebody.md: + id: e5a5f16cb2a0 + last_write_checksum: sha1:65f436ce4e35b87ee2ed3efb63a871c7cef5301b + pristine_git_object: 6138f42075da746141cb636161ba08ce34353169 + docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostdata.md: + id: 3099dd6ccb22 + last_write_checksum: sha1:ce26ace5fb4d84f088547424a18a489caf2a4045 + pristine_git_object: a704428da3de284f038a80d467418b3d4151ebea + docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostrequest.md: + id: b9589807a142 + last_write_checksum: sha1:eebf7f37d3c76598be96066681b39c6ad669b222 + pristine_git_object: dbef43b27fbe406835406631a0d833bcd1b66ae1 + docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponse.md: + id: d1c98e4ac887 + last_write_checksum: sha1:4d6b3f457ad9aa4ccd3e4390a85bb3138659683a + pristine_git_object: 03091efa54932cb953227b3714729f61d9599744 + docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponsebody.md: + id: a15f6a17f148 + last_write_checksum: sha1:4c74b29ba3c69461440cb042b7ddf6bb35b3e648 + pristine_git_object: 5d2d42e88eab8f2a1c64652d0ec05fa5e3955c3c + docs/models/streamingchunk.md: + id: 95b78d3db69d + last_write_checksum: sha1:b19b7de8dcef442303fa502ff0dfa11893cd367d + pristine_git_object: dc272ae63beac3542cd3a97c290580b1af408b2e + docs/models/submitfeedbackrequest.md: + id: 2a3d59a40ce1 + last_write_checksum: sha1:fe9156db7c74079fc6147fce4f9d69c335791f5f + pristine_git_object: 61824cdcb2165b4d2056c637661fea315e3e08e6 + docs/models/submitfeedbackresponse.md: + id: c1452e90e063 + last_write_checksum: sha1:72f987eed34491412cf2714090f0e94c92f56cfd + pristine_git_object: 98f7d4a2835fd3ab71d72525ffeef3160f0806f6 + docs/models/submitspanfeedbackspansspanidfeedbackpostrequest.md: + id: 20be44f1f2bd + last_write_checksum: sha1:af08fe47d0bc70de1f021bbaf2fa067ce706f452 + pristine_git_object: 619b627ef4c3635c816baef364fcbdc3de84fc68 + docs/models/syntax.md: + id: b5f23261fcd5 + last_write_checksum: sha1:632fc2775ec4066a2b1ee403579ff59bdddb4a91 + pristine_git_object: 4b82eb64344981f49f6a8fa220eb817427f4de78 + docs/models/textprocessingconfiguration.md: + id: 20c3c4157099 + last_write_checksum: sha1:3fcf1d7e323f3372ef1fd36fea21844a6645aa94 + pristine_git_object: e4cf7a5b5c3fa4ea9416df5502111196a258e151 + docs/models/tmodel.md: + id: 359d7a267d5d + last_write_checksum: sha1:f7d0a4f79e154c6333041940b99a1661aa60a11f + pristine_git_object: b2d50f71d9764a07e9b6234eaa458e52723b1abf + docs/models/tmodel1.md: + id: a9e430494b37 + last_write_checksum: sha1:76ed80ef67711b142ae142efc2db9fd7444438fa + pristine_git_object: 30ed6b95b4d23e360c0436a4b5e21030b7df6703 + docs/models/toplogprob.md: + id: f3d5d29d1bdb + last_write_checksum: sha1:f0db17a336f2d53dcd650cd1589ca16ac4222fe5 + pristine_git_object: 34de2fa8ff3aca7d51966425d389bdc7981606f5 + docs/models/updatecustommodelmodelscustommodelidpatchrequest.md: + id: 5a209826879c + last_write_checksum: sha1:e7c14d4454f6c0dad6ef821cc4fe7efda9e4465d + pristine_git_object: 9a081239fbda59fed1a7e80404b72e80b80cc3bd + docs/models/updatecustommodelrequest.md: + id: 9c279d73a0f3 + last_write_checksum: sha1:b18a38e9bb6bd72e13cea805bc8d15ad1ba76e66 + pristine_git_object: 3b3d249f5c4605dc9209cca28ce0330be04a9d8f + docs/models/updatecustommodelresponse.md: + id: 57981f380bfa + last_write_checksum: sha1:0120016068d6777808e6e782b12bbb100eebf73d + pristine_git_object: 2b73d64b20a98eeedacc47c9c7e017fee66b174b + docs/models/updatedatasetentrydatasetsdatasetidentriesentryidpatchrequest.md: + id: 83301377c223 + last_write_checksum: sha1:ef2848b83c415d5c008e81bf8f6788155fd7a6db + pristine_git_object: 6190e87c52a373be096af69734bda8b0892c15a1 + docs/models/updatedatasetentryrequest.md: + id: 1eb32690c7d9 + last_write_checksum: sha1:3a5aa4e971873b2a16e87d7b2f3321b80fb1e612 + pristine_git_object: 362780c91543950798f43305507d08e231e9f1da + docs/models/updatedatasetentryresponse.md: + id: a3e87923a814 + last_write_checksum: sha1:9ddd45c6e132d9d8f27598ee640dbee638829bcc + pristine_git_object: 5946212ef7648a7eef02cf046c8f796d2d2a5a6d + docs/models/updatefunctionfunctionsfunctionidpatchrequest.md: + id: 4e4104eb3fea + last_write_checksum: sha1:dcdcf891b22beb3c817acc3742731f252bd84320 + pristine_git_object: 91fc908cc520fc0a4fa366dc45df56da025083d2 + docs/models/updatefunctionrequest.md: + id: 90c80384e8df + last_write_checksum: sha1:6b57e46afaadbff968eafa74c9ff267f719b513f + pristine_git_object: b3ef3dc75d106fb1e83a6276e5c83812d205048e + docs/models/updatefunctionresponse.md: + id: cbf47276a63c + last_write_checksum: sha1:aa40dfc29da83cf37b38274de8fa499b107f1015 + pristine_git_object: 37521e4a50d113bc72f17b414a3ca250a00554bc + docs/models/updatemetricspansspanidmetricsmetricidpatchrequest.md: + id: f7043361ab12 + last_write_checksum: sha1:cb0e6bdfa4e1cbbc7add2213bc224183b1ce7b33 + pristine_git_object: 33de64f87f06032ea787c23174f569f4f229a519 + docs/models/updatemodelaliasmodelsaliasesaliasidpatchrequest.md: + id: 22bd974146f3 + last_write_checksum: sha1:1cbcfbe80159325a879d46c51916013b10e1d261 + pristine_git_object: 90656edbf3bd8ad3fa1c80f7f19f311faee03ddf + docs/models/updatemodelaliasrequest.md: + id: d8fcc898a0f2 + last_write_checksum: sha1:eb5aedc09ccfcea18e55fd3148492bda97c8b38f + pristine_git_object: bf749057b003d8d157b05ee28052db8903b26986 + docs/models/updatemodelaliasresponse.md: + id: 8066f5459f17 + last_write_checksum: sha1:0a0e76a2336375dfae31711e842822adce1ebad9 + pristine_git_object: 39d7c138bf6dae06ad0be5e8c3ad160963b28444 + docs/models/updatespanmetricrequest.md: + id: fa96f95ed43b + last_write_checksum: sha1:c6a828827f78e3884007986bfe1134c93ab1c537 + pristine_git_object: 4aba819e92c88f66787a1ed61f86c40747032d57 + docs/models/updatespanmetricresponse.md: + id: db9d55d7d45b + last_write_checksum: sha1:4e1c3660b06a061635eabb940fb7929c3bddc154 + pristine_git_object: 86898f3afac2cf4c049b526c389b505848cb1fe3 + docs/models/updatespanrequest.md: + id: 0cc273f44164 + last_write_checksum: sha1:b40ea6546c768c0d215a2a2eba275c2caf7225c6 + pristine_git_object: 259d8fa1af941dc21761363927ca8c32b883581d + docs/models/updatespanresponse.md: + id: 163e62c13f3d + last_write_checksum: sha1:4b0e771d63b4081934aa8d1e552b27f6e2cd1600 + pristine_git_object: e328158766a8fdd6a7fdcfc727fcea14901fd364 + docs/models/updatespanspansspanidpatchrequest.md: + id: fc6207361af0 + last_write_checksum: sha1:5620647a95d51af00f4bd48307b0d260c64ff9e4 + pristine_git_object: 7f4363140161b77cae605154ee88a7e3e6d00900 + docs/models/uploadfileknowledgeknowledgebaseiduploadpostrequest.md: + id: fa3d2bb1002f + last_write_checksum: sha1:29fd9b4bd0ef0692b333803b830919c909a03fd3 + pristine_git_object: d426be2935f3c51a046dafd1c5d8ca24b2cc2d53 + docs/models/uploadfileresponse.md: + id: 030a92cf9afd + last_write_checksum: sha1:90976c56833a87a5235b7c7931520a173c2f51e9 + pristine_git_object: cdff9d1d673bcce6acdfa26948c916c600f02f39 + docs/models/usageanalyticsusagegetrequest.md: + id: 11b03c9b0410 + last_write_checksum: sha1:0899fe8fb236b659e912b1a292b827baa20b664d + pristine_git_object: 06eaf259ea088efadd3b104a8f61199396e3359a + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/value1.md: + id: 5d6c54c6615a + last_write_checksum: sha1:d47c033cdf7c3736e7b691a4a40331b49422c266 + pristine_git_object: e2f923be9fb204cc7f14064187d43a177657f7b6 + docs/models/value2.md: + id: 4eceb26154e6 + last_write_checksum: sha1:a5a90137f77c49790ff5a6e1dd9ab649685cdda6 + pristine_git_object: 692669b4087a94d5c890d50d5fd62f049dada858 + docs/models/voice.md: + id: f73e25e55a5a + last_write_checksum: sha1:d44e6c5ed42f012ef96f3d390818a9b4ba3e565f + pristine_git_object: b8fc1366ca48042acbcf6c6fce4e6e890c462d96 + docs/models/voiceenum.md: + id: d02cf0fcfde0 + last_write_checksum: sha1:b210dab0cd55e594e729135df47035db2408b18c + pristine_git_object: 69da0246a435406f4c6a90f6a14ebf5ce518ff1c + docs/models/websearchoptions.md: + id: 0302a3b16daf + last_write_checksum: sha1:dfb7cd1f68c22437c65b5dcae78ee5a89d0f9f14 + pristine_git_object: 914edbc5909611b0e63e86148f838da9bbb10c94 + docs/models/websearchoptionsuserlocation.md: + id: 978bc53b300c + last_write_checksum: sha1:dd1ab0fd5fa8517a4991b58e94a974e63d80d214 + pristine_git_object: 04850bf61880324081edbeb2c69147311eb5ecda + docs/models/websearchoptionsuserlocationapproximate.md: + id: c66bd4d46038 + last_write_checksum: sha1:c7e4f2a8df871ea8db627f2ef6109d1679ce0fa5 + pristine_git_object: ab30100d9159369a2e0043096a1f16f41d473d73 + poetry.toml: + id: a81ade82122a + last_write_checksum: sha1:2242305e29dc6921bdf5b200aea5d4bf67830230 + pristine_git_object: cd3492ac9dc870fdcf23dbd94fd1d40cc753cc8e + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 + pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + pyproject.toml: + id: 5d07e7d72637 + last_write_checksum: sha1:fde18ac941f32732effc089f7afc71233bace992 + pristine_git_object: 448493dca99dbf23c7a4a982b5b458f1ac5593fb + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:11c8a74f613fdb29f432e3feafb84072eeba6428 + pristine_git_object: 5eb52a49483fd30944c0a030f0decde1360f6765 + src/opperai/__init__.py: + id: 930df2025d32 + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 + src/opperai/_hooks/__init__.py: + id: 56875f9a0383 + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/opperai/_hooks/sdkhooks.py: + id: a9ccb84bc702 + last_write_checksum: sha1:267767077a69c768dd13611a652f69d22d7117c8 + pristine_git_object: 26fb333c9ea9b1b09c78535ae90fbac7bfc027b0 + src/opperai/_hooks/types.py: + id: 1550901aa96d + last_write_checksum: sha1:b8221739d68f66346807a72147317540f3f1f180 + pristine_git_object: 9e009e34d493f15cac93e967908a8e1c8f278eef + src/opperai/_version.py: + id: 72eb03a2ae7b + last_write_checksum: sha1:a5807db7deb1b77ccd7ef1d4f1c201e7ff2bbf78 + pristine_git_object: bc661c82eee8cae30b1633b4bbd8c620c1c2a288 + src/opperai/analytics.py: + id: cae4a5545a87 + last_write_checksum: sha1:206040207884b0083db5439ca94c43966045ccec + pristine_git_object: f499d6816b9d6c8932b9d8b41c0dbcac9ca8c913 + src/opperai/basesdk.py: + id: 2acaa9a12318 + last_write_checksum: sha1:38f192e2359d2875fbf6585ab246af805feca547 + pristine_git_object: e71083795328cb195a1a2ed0afb3484f95bb90b8 + src/opperai/datasets.py: + id: 98ecd2ecd6e3 + last_write_checksum: sha1:268f7b82f10b8353c6577ab92d001854d554043d + pristine_git_object: 88713d91dc0442add07c0632b130cc53bbdcedcc + src/opperai/embeddings.py: + id: f3743e190c1b + last_write_checksum: sha1:3e945964647e75bb5d39b9d64b8632c55f7253dc + pristine_git_object: 3c30c0fe3e746453164c9e8b1b7ab95b3aaec671 + src/opperai/entries.py: + id: 2e0ba35e80f9 + last_write_checksum: sha1:759723ecd287d23d29f3f9b5cd9fdb569278b484 + pristine_git_object: d619b89e45c01a80f2ffac36bddf5ba7d09f6491 + src/opperai/errors/__init__.py: + id: b26e2142cf2b + last_write_checksum: sha1:86e16693dc08bea86a989eff9748e96d48794950 + pristine_git_object: 9d109e9c7be4f554c1471648021b8425ef6a266e + src/opperai/errors/apierror.py: + id: fab980e28b53 + last_write_checksum: sha1:4a5f779b8e1c6e4fae5798e8d03d848dd596891f + pristine_git_object: 8dfae77566cbcdbbe8711eeadca6f03bf99d86e5 + src/opperai/errors/badrequesterror.py: + id: 11f62090fc94 + last_write_checksum: sha1:f0eab20b326014cbd4b017a16ec9647becc315e5 + pristine_git_object: e9bc804db278bf3343d9c14e8a0f74d9b09fba70 + src/opperai/errors/conflicterror.py: + id: 596afb1b521f + last_write_checksum: sha1:184e41ae40e646a0337e7c9b2fbf925a16d2cd1c + pristine_git_object: c7840b299689f46759a41eea624df59f4a491f30 + src/opperai/errors/error.py: + id: 6829a3436c79 + last_write_checksum: sha1:ed187a8e6f152a59752366e3367546a5cf72b272 + pristine_git_object: 3177205f67a0f005e8303648d10934924978d553 + src/opperai/errors/no_response_error.py: + id: c39b6cdf541f + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/opperai/errors/notfounderror.py: + id: "899594637008" + last_write_checksum: sha1:e8f644f46e0171ed8ddd87077ea8e6e0ad68bb53 + pristine_git_object: e3440386c10db5413b9c019ba0f9da37e12ac546 + src/opperai/errors/oppererror.py: + id: c1bbb4728c34 + last_write_checksum: sha1:4f8280abfa91a12cbae7f70ac5c7e274b311a82f + pristine_git_object: 87c501643cae3c56df37967ad60c98f79ad18656 + src/opperai/errors/requestvalidationerror.py: + id: e34a9de15861 + last_write_checksum: sha1:fcb444642b37b070fa04e67da00b0f0d636307fc + pristine_git_object: a79bccc983321a2d1de1bc48c453d5a840155b3f + src/opperai/errors/responsevalidationerror.py: + id: 4aa82212c4bc + last_write_checksum: sha1:ef7ec7511e4998f979a8085484d809f8daf39a18 + pristine_git_object: fbe8a90af622ee085668ca6142d21d540dd3ede2 + src/opperai/errors/unauthorizederror.py: + id: d70a1ed5ba75 + last_write_checksum: sha1:31ce9e0bbfba3d9cae8885836e585cf1dc67be02 + pristine_git_object: c88469b0c8f8527c06107c09508842342877dd14 + src/opperai/functions.py: + id: 5d84a036dac4 + last_write_checksum: sha1:b5ffef68687bf0d8607b8b637ff48698a98a728d + pristine_git_object: 5614144b9da7b7b9af26a9a32cd8b179c7ff3402 + src/opperai/httpclient.py: + id: 58a1852610c9 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/opperai/knowledge.py: + id: 6c7b723bfb71 + last_write_checksum: sha1:38925e5ad2e6e66738621ec998042ba9f89dc9fa + pristine_git_object: 0eda2feced25a840177db4fc2dccf6bc03c0f24c + src/opperai/language_models.py: + id: 5dc93a45f506 + last_write_checksum: sha1:e5ad32e464afa8656defb06d7a3e56670bc37397 + pristine_git_object: 3160408e1c0b10c2398279c387d8dc78218122a1 + src/opperai/models/__init__.py: + id: fc680f177e50 + last_write_checksum: sha1:e54b63f650ed3f11fa22ebd79da52055f596056d + pristine_git_object: a190a30560ed37e02f446afa9d6c2e4c977b7e9e + src/opperai/models/add_knowledge_knowledge_base_id_add_postop.py: + id: f518c11078dc + last_write_checksum: sha1:b8489e8694f229d84a726067dc642068ee8b4945 + pristine_git_object: 58c9d6bb4f553ea462a5832db4e76e902af1bd57 + src/opperai/models/addrequest.py: + id: d7f63d5b7ad3 + last_write_checksum: sha1:78d343ec2af76387d636a7eb116e807af8d5dc70 + pristine_git_object: 63ee7f705dea230ddb08a946152a3261215e7744 + src/opperai/models/annotation.py: + id: b002b03bd07e + last_write_checksum: sha1:aaaaad4f67b79fdec9649f3ea299ee235150f3fb + pristine_git_object: e1b232d66bfa73344e683824c3873e361b8f42d5 + src/opperai/models/annotationurlcitation.py: + id: 85dfeeb517da + last_write_checksum: sha1:5364f7262fdbb5f2e0d9f4ea8548702a1ce9369f + pristine_git_object: 190f37d4d571893846bec52da5e86d754565e8a5 + src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py: + id: b79cf1669258 + last_write_checksum: sha1:b01f424582a50183737e2d1000e843c61706f74a + pristine_git_object: b9ca70eb77b49fa4660c06210aa9fe18a61e8a48 + src/opperai/models/app_api_public_v2_function_call_callfunctionresponse.py: + id: 23d8368b106b + last_write_checksum: sha1:5db545981dd99a4640c5416ac7b8128c3ae557bc + pristine_git_object: ae64e39f9426fb5bae753f3896798fafafb8a0ff + src/opperai/models/app_api_public_v2_functions_callfunctionrequest.py: + id: 00df8cb688e4 + last_write_checksum: sha1:c2cdfb5d48076fcb114c2379b944cc0498afc6e0 + pristine_git_object: 2220a8b639758236f991750528d759fdfbde3790 + src/opperai/models/app_api_public_v2_functions_callfunctionresponse.py: + id: e9dfa0844781 + last_write_checksum: sha1:1c82b43acd1c5abe112764a47656ea4cbcba4512 + pristine_git_object: 567e696cd2a6d70fd75b5c8db91766647d87ae1c + src/opperai/models/audio.py: + id: 62f855df7950 + last_write_checksum: sha1:e31f48ff24b33e999d9fd65ddf212a462a58bb83 + pristine_git_object: f31c84a0a2dad3919832b5906a33a0ac08553be7 + src/opperai/models/body_upload_file_knowledge_knowledge_base_id_upload_post.py: + id: c9952da61b34 + last_write_checksum: sha1:d1b37036f938157ad1ce73f7db2324d0317e7c8a + pristine_git_object: b045539ae1a52252e55c568493fff889b8bbbbe0 + src/opperai/models/call_function_functions_function_id_call_postop.py: + id: 8ac24d50bf78 + last_write_checksum: sha1:86affcaa036330bbcb54b3f50187678bf5f027bc + pristine_git_object: c7493ef134d98e7480162b59035053180b3e03bb + src/opperai/models/call_function_revision_functions_function_id_call_revision_id_postop.py: + id: 533920ae8216 + last_write_checksum: sha1:7abd4b069f53d049ec5f8899019ef3f406cc5b98 + pristine_git_object: 3c87c6cbcaa71ac5b7952253bae875f26a71e6db + src/opperai/models/chat_completions_openai_chat_completions_postop.py: + id: 7d9eb7100a42 + last_write_checksum: sha1:c2c9c7931b751fb3b051f45dcf623937c7e18f06 + pristine_git_object: dac604a9f63917c57badaf57cc2c1e19dafc941d + src/opperai/models/chatcompletion.py: + id: 3954c9cbbe69 + last_write_checksum: sha1:8a0fcddb7c5af2a03f3bc0a8fee8ef3566765a66 + pristine_git_object: c19ec43eb4b22f2274705ecdaabf7c3215fecc6b + src/opperai/models/chatcompletionallowedtoolchoiceparam.py: + id: 54bfa8362942 + last_write_checksum: sha1:b51381701d0d8bc9cb53191264d47f3d5d81ee6a + pristine_git_object: a50eb218e830ad2200bb988c770b03bc0d0e6887 + src/opperai/models/chatcompletionallowedtoolsparam.py: + id: f36e57130d6f + last_write_checksum: sha1:3f76fce4b0ef3da56f9be29fcb137d77450676db + pristine_git_object: 92a43f6fd130742857040ab2c1f4bf72db201a94 + src/opperai/models/chatcompletionassistantmessageparam.py: + id: 78bea42cf8fd + last_write_checksum: sha1:25f7370ffdfb6303e63abfddfe73af67dbcb2b13 + pristine_git_object: feb0681a249f4a40848af947b6066489b5849937 + src/opperai/models/chatcompletionaudio.py: + id: 5cab0e7e1282 + last_write_checksum: sha1:14831ac3cd4e40fb838467a58f9dcc9de65d24e6 + pristine_git_object: 5a47b0459b567e8dc2be569bcd27950856c38dc8 + src/opperai/models/chatcompletionaudioparam.py: + id: 274c36ad33af + last_write_checksum: sha1:af7bfbccbc751f8f7e9cd02e0eeb70bf83a60099 + pristine_git_object: c4e5137b5d20d2d610c1981be9da2860910879c9 + src/opperai/models/chatcompletioncontentpartimageparam.py: + id: 11395422d579 + last_write_checksum: sha1:e2c1d8fe305711b4581f668be95cea8bb74df7eb + pristine_git_object: 0b9ed3ba25f9b418c42a52e2cc5eae37de39130d + src/opperai/models/chatcompletioncontentpartinputaudioparam.py: + id: 9f30566da652 + last_write_checksum: sha1:882a6d73f21f80b2156e1a5d9c4e841b43daa206 + pristine_git_object: 13abf666fa3d33270c1881ee29a2564acbe7c25f + src/opperai/models/chatcompletioncontentpartrefusalparam.py: + id: 87ef697b5916 + last_write_checksum: sha1:d0642446d231fac2ea63658cfd03c93bd8d556ee + pristine_git_object: 58f18c2b525b87e7c1f3efc6d5dc9172a67f9ab6 + src/opperai/models/chatcompletioncontentparttextparam.py: + id: f0ce491b4543 + last_write_checksum: sha1:67cdc466ae3e4f9ad460de343c26d796e535c5f5 + pristine_git_object: 6780a8665fd8a03f107246fec12a220b8353650e + src/opperai/models/chatcompletioncustomtoolparam.py: + id: 367682bba4b7 + last_write_checksum: sha1:9e49252c222941d56bc99c6f014a9be2fac4f37c + pristine_git_object: b382ca7ac3ec5c8ac72bd0bf2995fc569a3a2336 + src/opperai/models/chatcompletiondevelopermessageparam.py: + id: 3a897145cd9c + last_write_checksum: sha1:5cd38793b6460ef69d3a7baed59bc33d936aea61 + pristine_git_object: 70f872d92423300ba8d179966afe4fa23cd8290c + src/opperai/models/chatcompletionfunctioncalloptionparam.py: + id: 9f745eb095cd + last_write_checksum: sha1:31bf7d03a4e237c1c5e2c486e32da0f8a2837be1 + pristine_git_object: 8f1847a03b334548c994d30f15a7042868decc17 + src/opperai/models/chatcompletionfunctionmessageparam.py: + id: be66df172123 + last_write_checksum: sha1:000733f0a54cc4eca5b887ec78d1245692d8b6e9 + pristine_git_object: 37693d4e4a4af8be726d25ae5123313e1fe09554 + src/opperai/models/chatcompletionfunctiontoolparam.py: + id: 19cb8889b45c + last_write_checksum: sha1:63b840fc2314bf6eb9e1a7d98836f310d779f36a + pristine_git_object: 5830b00cade1b2ce86aca44f995d56f6e1b82ccb + src/opperai/models/chatcompletionmessage.py: + id: 03402762b461 + last_write_checksum: sha1:b8a48ee590925a049b93faf3812a072e74f80853 + pristine_git_object: 83590412a862b046c9c5ae120145cef91f8dfe82 + src/opperai/models/chatcompletionmessagecustomtoolcall.py: + id: 9fb0c651737d + last_write_checksum: sha1:f119bb318416fd4cbf772fb98d557f10bfe0fe0a + pristine_git_object: 08647e5e2669445f3b49ec80ce60aedf0d12dc01 + src/opperai/models/chatcompletionmessagecustomtoolcallparam.py: + id: 263ae3dd548d + last_write_checksum: sha1:78ef0b53fc02a36f792addfd8c2be507efe69918 + pristine_git_object: 2bab13821a48db345ff82c08cc850649c3df517e + src/opperai/models/chatcompletionmessagefunctiontoolcall.py: + id: 83ec0d01edb4 + last_write_checksum: sha1:2bb688e2d685b3ab3b0fe97363fb3a9766270ea8 + pristine_git_object: 53a8e335e7798e1333d8ed1ea533de2c17e419eb + src/opperai/models/chatcompletionmessagefunctiontoolcallparam.py: + id: f70c84a325e4 + last_write_checksum: sha1:594c94bf2a4b03503870fb852b8f386b55ffccd4 + pristine_git_object: f5a5140291d90592e0d1d222d771e97dad77f8d8 + src/opperai/models/chatcompletionnamedtoolchoicecustomparam.py: + id: c2c78befde48 + last_write_checksum: sha1:104f694fa665c05f387d1be1753638c328ef2fe9 + pristine_git_object: 44e8a8cb27683eb9af8654dcd42f83f4047773ce + src/opperai/models/chatcompletionnamedtoolchoiceparam.py: + id: e324e502d168 + last_write_checksum: sha1:16cfe5857e1f75312fca8fbbf231cfbab7fddb9f + pristine_git_object: 9d7bd0e2d7fe0d6f6662ab17de4f2e7c9cd7c46c + src/opperai/models/chatcompletionnonstreaming.py: + id: 4fa5f2d20d55 + last_write_checksum: sha1:a59527b4d2a4ed9d874d74835978ca8377a7e8ba + pristine_git_object: 1bd293a3b05dc2f94e508970bd3d5029f106794c + src/opperai/models/chatcompletionpredictioncontentparam.py: + id: 2bf4105e0033 + last_write_checksum: sha1:f1c3e7275d4813378c890f71367dc32dd9c5fe16 + pristine_git_object: de4a5a2d4235f706450d069b6eab82c768d3a753 + src/opperai/models/chatcompletionstreaming.py: + id: 31e7807bffaf + last_write_checksum: sha1:50cf6d03ec6ae34d0c637766f6f05eaa0bfe3b61 + pristine_git_object: 5ad5d8db6528d7a5c046eaf18044f6529fa73cc9 + src/opperai/models/chatcompletionstreamoptionsparam.py: + id: 8aaa0697616d + last_write_checksum: sha1:dcaffaac8942c9dbe7fb0035274df8c74bc4d2d6 + pristine_git_object: be6d4c6bce7a6f27b81d1f2b89649bc7496e5acb + src/opperai/models/chatcompletionsystemmessageparam.py: + id: f9198eb786ba + last_write_checksum: sha1:7725dc0d60b023bd9b70448a94e76c299cba6683 + pristine_git_object: ae72880b77a0ec58d9a97ab7e78c98876827c222 + src/opperai/models/chatcompletiontokenlogprob.py: + id: a8dfdfa0dbf3 + last_write_checksum: sha1:4ca595e3949a590a1fc71ed5a846fa3fe1cc76eb + pristine_git_object: a316fefb4de9c57d808d3fba26167f40a0c0f2c1 + src/opperai/models/chatcompletiontoolmessageparam.py: + id: 46d120a0e864 + last_write_checksum: sha1:7382bced164750b12f212378fffbc2fb1354e0b2 + pristine_git_object: 51adf9ac1ec71f8ccbf6c350d299800402be9f18 + src/opperai/models/chatcompletionusermessageparam.py: + id: 5691e9c98dea + last_write_checksum: sha1:c43d4730884a473f173040a232047e0d177db88f + pristine_git_object: e3121eef4d87928ea9af7e58bba64295645f66c0 + src/opperai/models/choice.py: + id: dc69237a701e + last_write_checksum: sha1:02b44a26de22c1187e9f67837fdf8ea855cebf53 + pristine_git_object: 3f79dd06ecd384ddd3289782da8143ff134003dd + src/opperai/models/choicelogprobs.py: + id: 11d9a63e5b76 + last_write_checksum: sha1:84169592e26e6bab25d754cd90a45ce0d29395b0 + pristine_git_object: 9d1c75e09fb746b5163df7d4b2b2693c1c8261db + src/opperai/models/completiontokensdetails.py: + id: def506fb4f79 + last_write_checksum: sha1:68a753ea7bccd0001fe2368e10f4cb3c5b9212aa + pristine_git_object: f68bd03ec4af11e502983e417447d34dd4633a7e + src/opperai/models/completionusage.py: + id: 77cf16b67c5b + last_write_checksum: sha1:b3ce68b329e65de62610acd0614f4f3f5513569f + pristine_git_object: a998efcfb8c40df7a9abde51d7984b10b8ee4010 + src/opperai/models/create_dataset_entry_datasets_dataset_id_postop.py: + id: 608a1a3bc022 + last_write_checksum: sha1:b432bd43102bfa8497c19b8add0ce2f58eb830c1 + pristine_git_object: cf45edb98465ae7012c856ab272a86d6e4c4a409 + src/opperai/models/create_metric_spans_span_id_metrics_postop.py: + id: ad69fbdedd4c + last_write_checksum: sha1:eab836e6b5ac6edc65a3fc59380716083aea7ffd + pristine_git_object: e672099cd7b977695b5f6de020b28cfcd36a4f4d + src/opperai/models/createdatasetentryrequest.py: + id: 6cdb06bc71cd + last_write_checksum: sha1:3a6c0be931b3e931f20bd578659d35e83168706b + pristine_git_object: 79453172b25ed1531be05db009e1b2b167194b2c + src/opperai/models/createdatasetentryresponse.py: + id: 4c5fb992e7ce + last_write_checksum: sha1:8011deef69df312354766c7f0ba49b94f1c36e2d + pristine_git_object: 413e6d9d5056e415186bfe53c263200a8038e9a3 + src/opperai/models/createembeddingrequest.py: + id: d8e001f8889f + last_write_checksum: sha1:9a878db82784f329a4449b9c610baa4ddac765a0 + pristine_git_object: 32605fc85cfd770ed80fb08d83b72026b4506196 + src/opperai/models/createembeddingresponse.py: + id: 02631e2f02d4 + last_write_checksum: sha1:8841b892b4b94c34d59f78729dd5be3c0693e4f3 + pristine_git_object: 970cf1f596eb0664d1b0beeca75cabd05f8c8cb3 + src/opperai/models/createfunctionrequest.py: + id: 46b1036ecb06 + last_write_checksum: sha1:9833c9b5dafe3bf5dc1701e8362197fb97affa0a + pristine_git_object: 14a38ae35d951c6f0dde6686420b76d4fdec1452 + src/opperai/models/createfunctionresponse.py: + id: ebab301a508d + last_write_checksum: sha1:86bb9532c09d8e604b5253682a379c0a8434db4d + pristine_git_object: 0c25ce3792108f74dd3a4887ec3ac4abfaf82f33 + src/opperai/models/createknowledgebaserequest.py: + id: 07f1b1b0af6c + last_write_checksum: sha1:2c78168e5b5dd1c32e4ddfaff937bc34ed0c0a51 + pristine_git_object: d49822a682cd3d9103304eef51a2740c34915fcf + src/opperai/models/createknowledgebaseresponse.py: + id: a461e8eae519 + last_write_checksum: sha1:6551ba8a40224b97665e34396704cd822852f5f5 + pristine_git_object: a3fc10426d95c13feb496eb8544eed3847ecf0af + src/opperai/models/createmodelaliasrequest.py: + id: 0548866bd255 + last_write_checksum: sha1:02269f893d1877819d34447ba66a1e9ad2d9ee5f + pristine_git_object: 670707555885773c00c725eddc87cec04a818b5a + src/opperai/models/createmodelaliasresponse.py: + id: 159a3ca06da2 + last_write_checksum: sha1:f7aed18f3b9cb59c8b9cec1cea0cc501c0f1d3e2 + pristine_git_object: 2e87cb104ccb91f3c1723b53df2e236093ff18de + src/opperai/models/createspanmetricrequest.py: + id: 3688e07c0747 + last_write_checksum: sha1:ce2c3dfc79f3bff07d35ef5661b7ca3e57f808f1 + pristine_git_object: 1cc9fd4090f4f5fd3987e8b5755376e7865d8a03 + src/opperai/models/createspanmetricresponse.py: + id: b650f3d13942 + last_write_checksum: sha1:c550aa078791262d64a35dad792a4d0d2e9c3df3 + pristine_git_object: 9eeed40f004e1fdfed15c1af22cf7776e29d4b70 + src/opperai/models/createspanrequest.py: + id: cada1e72a13f + last_write_checksum: sha1:21f936cf05f8d3ae6b35827c89f20e258a89cd51 + pristine_git_object: 5415e327ab9582ea69bb2c0a0fbda5e46ada92a2 + src/opperai/models/createspanresponse.py: + id: b3d7bead20a4 + last_write_checksum: sha1:25a6374af139c463da3468c6a4ede49fe63d1927 + pristine_git_object: f452994c01f88a0580f9a7fd81eac4bbc52472f7 + src/opperai/models/custom_output.py: + id: 984a2be75ae8 + last_write_checksum: sha1:85a0cabb36ef41564bb47c98a491ba8cba1bd2de + pristine_git_object: 52bcbfc52d5f977cd096691b511e5102f4a032d1 + src/opperai/models/customformatgrammar.py: + id: 626cd9fd2417 + last_write_checksum: sha1:54fd2faa67bd5c4c98c2666a4dea53f9115497fb + pristine_git_object: 2542133f2d3d10e5fff88a293f1197f3834ad581 + src/opperai/models/customformatgrammargrammar.py: + id: c0df6e29ed2a + last_write_checksum: sha1:0f35f363695c957a4806e641becad3e43ef3ef9a + pristine_git_object: 46d606609b39bcbcb91dfa86f59b65ac027e789d + src/opperai/models/customformattext.py: + id: 8d876aca4261 + last_write_checksum: sha1:ddb44341a196d3eb383747664f574da6aae4fa57 + pristine_git_object: 61c21450671060f23ab3c0b29679191dfc77de67 + src/opperai/models/delete_custom_model_models_custom_model_id_deleteop.py: + id: 5f5304837857 + last_write_checksum: sha1:13f092ec00103603fca5c9a8e0bfc4254b31bce0 + pristine_git_object: 7a491181c6228da2669980a7d50e1491fdafeeb4 + src/opperai/models/delete_dataset_entry_datasets_dataset_id_entries_entry_id_deleteop.py: + id: 2b6d715726cc + last_write_checksum: sha1:1d47a08a501fa20ab583d1bdf498893e4bba3840 + pristine_git_object: 193aa652731c70860ffe79e6c55658ed764c03df + src/opperai/models/delete_documents_knowledge_knowledge_base_id_query_deleteop.py: + id: 8574f28f4069 + last_write_checksum: sha1:02ca6a32fe276aa672ebc95701f657fe9921d7a0 + pristine_git_object: a5618bc38c61ba7a74bc12e96e4258392d42ef99 + src/opperai/models/delete_file_from_knowledge_base_knowledge_knowledge_base_id_files_file_id_deleteop.py: + id: a4ff2f3d1247 + last_write_checksum: sha1:73cff24f0d59edadf87bc4434581a918530fc63f + pristine_git_object: 2f1d357c0296d00cf02875b15532cb81d77db2c3 + src/opperai/models/delete_function_functions_function_id_deleteop.py: + id: ec7fc31b5aa5 + last_write_checksum: sha1:9fd1959447f5d71cea760e45007f31c8957de224 + pristine_git_object: c59d4af9d97f4e4c30f339f2db815440329fc8cd + src/opperai/models/delete_knowledge_base_knowledge_knowledge_base_id_deleteop.py: + id: a64eb90c229b + last_write_checksum: sha1:6c15903cc1102b01ed4fa802f7f9ca21605355b5 + pristine_git_object: a68675ed15ef566d7c818b330af10f8d7247b3f3 + src/opperai/models/delete_metric_spans_span_id_metrics_metric_id_deleteop.py: + id: 5dd53d68d446 + last_write_checksum: sha1:cf59c2e8d07f234ed5f2314cf8282a8d4e526014 + pristine_git_object: 62e3a187d488ab3c893f87e4e64ab07d1a9f8542 + src/opperai/models/delete_model_alias_models_aliases_alias_id_deleteop.py: + id: 96211269eaee + last_write_checksum: sha1:41727129c309d3adcbf080fc7d8ddfeed9402456 + pristine_git_object: 450a3f0dc37801edfd8143caed2707d931c4ac2b + src/opperai/models/delete_span_spans_span_id_deleteop.py: + id: 5acef51d606d + last_write_checksum: sha1:1d46dc3cd4808fbca0a50a695fd7334b4ed85d54 + pristine_git_object: 291da2c2a63dc0f6c7bc5802bfc4cc67bcf2a2e2 + src/opperai/models/deleteknowledgebaserequest.py: + id: 15c4a6f8d0e5 + last_write_checksum: sha1:8542ffd54cd3d67f3934649f422c41d393debeef + pristine_git_object: 4cc95b313537c5ef3e0e0377e344824c109c8a8d + src/opperai/models/deleteknowledgebaseresponse.py: + id: 36f4f2d31ac9 + last_write_checksum: sha1:b0dde2527ea75a2c42380687bd001c83b2685eeb + pristine_git_object: d458c387bb647aaccf1ab484ec8ba98ef45c0a50 + src/opperai/models/example.py: + id: 5430aafdbbe7 + last_write_checksum: sha1:b3a4d730cfa0637aef0464c73de52782f3373359 + pristine_git_object: 34048e5b6bea991b007639ff115cae7eb0bf42ed + src/opperai/models/examplein.py: + id: 59d656f0e3b4 + last_write_checksum: sha1:5a90ff8ad2ea27792dd0eec5405d0bcea5d1c4c0 + pristine_git_object: 504324e0261b6a4d9bc20aef485bb41afb26d44f + src/opperai/models/feedbackinfo.py: + id: eb3010a307d0 + last_write_checksum: sha1:8246a01575a16788ca4c85221a8e110732a46333 + pristine_git_object: 3ab88ddca26bf5360907aa9cfec17ff15d551f20 + src/opperai/models/file.py: + id: 61d13f6d40a0 + last_write_checksum: sha1:533ad2ee09013c7324e77eb97fc2307415259eff + pristine_git_object: 4a7ce01cf0e0b99658afce6a85beced3df634a56 + src/opperai/models/filedownloadurlresponse.py: + id: 45e44ddc6761 + last_write_checksum: sha1:12a75e42d3ae0e274526fdce443f783da27ff5d7 + pristine_git_object: 18aa248e26f5a5c36c2c594e2ed290c4b0e3ddee + src/opperai/models/filefile.py: + id: 0cb1838ee4fe + last_write_checksum: sha1:5834e66c77fdc8015e51572eaab9661e0d8223ab + pristine_git_object: 04a5ee84a2fef2096882d9d347022e17b59e7b30 + src/opperai/models/filter_.py: + id: 099ba22b2822 + last_write_checksum: sha1:79e89e9c79ecbb2178623d8f74343608a00700ba + pristine_git_object: 246f10be2e40dbcae77bf59f619b8a2e5c1715f9 + src/opperai/models/function_output.py: + id: 8834291ccfb8 + last_write_checksum: sha1:3e7989b81a4e4b2be8c1d468c8a8b3704ed2d8ea + pristine_git_object: 604c13a4c31db08c48f44be378919d436ef251b6 + src/opperai/models/function_stream_call_stream_postop.py: + id: 0d31f85f95e7 + last_write_checksum: sha1:b0a95679e1e11668aea03256ee90cf050f78522e + pristine_git_object: ebb96a32daf1c77643fd43850db51aa489d029dd + src/opperai/models/functioncall_input.py: + id: 59f934bf00cb + last_write_checksum: sha1:0e92db3a0dcae8b56819f9863615c1bdfa01a502 + pristine_git_object: 1ab7d65b366835229ab2e4f6e524baeb7c86d988 + src/opperai/models/functioncall_output.py: + id: 45bb8babd792 + last_write_checksum: sha1:1d2cc38288667efbd288564b85f8d4d72d2f973a + pristine_git_object: 0c7ee95159dfc9f6b7b2ed2e5d2c41c709a23b38 + src/opperai/models/functioncallconfiguration_input.py: + id: 9f95b3817420 + last_write_checksum: sha1:3d84bf2b3d79ac628b3867a8092d7f0128d5a471 + pristine_git_object: db54d9e5310dc6a2aa411dc91f3d4d7bd0d3adc8 + src/opperai/models/functioncallconfiguration_output.py: + id: d2c79760f7cc + last_write_checksum: sha1:939f59192ae694a4d3f20fc950bb2ff8f506c4b2 + pristine_git_object: ceb1e2fefe6f7dc873604afd03f2feefbd0f00ac + src/opperai/models/functiondefinition.py: + id: 2249cb0a9930 + last_write_checksum: sha1:67330a0047aadbe3818f5a16a8d8cedf4af4636c + pristine_git_object: 58848907676b24b5df9bd2fa1432a67d3394f393 + src/opperai/models/get_custom_model_by_name_models_custom_by_name_name_getop.py: + id: 47b420bcef3b + last_write_checksum: sha1:c411caf47d3f0042dfbb8196b4edafdad2eec907 + pristine_git_object: d899ae2c2f184a94725120a27d7ae948c39d66a5 + src/opperai/models/get_custom_model_models_custom_model_id_getop.py: + id: da55229bff46 + last_write_checksum: sha1:54b4448da48cb345ec392d6cb7406e84a7ad98d8 + pristine_git_object: b692c2a74677226ef3bfe31b93b2101ede414eab + src/opperai/models/get_dataset_entry_datasets_dataset_id_entries_entry_id_getop.py: + id: babcb6480556 + last_write_checksum: sha1:28343a1a23ff500bdbc17069f50ae0dadfd249c4 + pristine_git_object: 3e8b455da77410b1b10e6cd839f7324cea15de0e + src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py: + id: 52f52804b959 + last_write_checksum: sha1:52ba01b66e2997b4d367fc8f0d61f6096d9761aa + pristine_git_object: ceeec2a6becda9b51917379fe1a3296fdc5b49c7 + src/opperai/models/get_function_by_name_functions_by_name_name_getop.py: + id: 0b0aaa62de92 + last_write_checksum: sha1:03c2ca6cf77ff2b3b8e43835e524dbac921c1f48 + pristine_git_object: 83186a38fe8213b4272cd6c43f00188c8db9fa89 + src/opperai/models/get_function_by_revision_functions_function_id_revisions_revision_id_getop.py: + id: 065342ea322b + last_write_checksum: sha1:ec61daeec502ec06f18173ace20ceb226e515e78 + pristine_git_object: 1bcb3f1f024da27e86982e6de07de5ab8a560a87 + src/opperai/models/get_function_functions_function_id_getop.py: + id: c4bb8a4e9b28 + last_write_checksum: sha1:75e2e5d9b4c9af7b1111fe014b1524da8a7d88a5 + pristine_git_object: 0bdd343966fc1acf6679b5ae82ad1cffda38d799 + src/opperai/models/get_knowledge_base_by_name_knowledge_by_name_knowledge_base_name_getop.py: + id: 10c494f7738f + last_write_checksum: sha1:96ba1d9dc5e903ee3f99cb5402d5001881958c89 + pristine_git_object: 10bd22fdd9d9a80cdcb0f2aa1c18c0b59fe28d73 + src/opperai/models/get_knowledge_base_knowledge_knowledge_base_id_getop.py: + id: d6a8f09495d8 + last_write_checksum: sha1:2dee000dffb2655ed8bdd1c79407ade3ba712be5 + pristine_git_object: 9fcf159af0dc798c4c13b221e89a3cff56ee5453 + src/opperai/models/get_metric_spans_span_id_metrics_metric_id_getop.py: + id: d4a7de49a4dd + last_write_checksum: sha1:4961f2f07903eff3f6cfa1be28ba7e16bcfd71a9 + pristine_git_object: 722982cf9a28d0f967a1c32ec00335d68dffc624 + src/opperai/models/get_model_alias_by_name_models_aliases_by_name_name_getop.py: + id: a6735762f1f8 + last_write_checksum: sha1:ea237d9fb557a66a879573f8a49781181119e44e + pristine_git_object: 7f22c9008b76f16b644d666503e5aa148c5e9001 + src/opperai/models/get_model_alias_models_aliases_alias_id_getop.py: + id: c6ffc194b8f9 + last_write_checksum: sha1:e2dad1ee43e9453e2824012dedcbb0eaadbf36f8 + pristine_git_object: 898318dd63777cfc98ec1b0dab9781fc20926414 + src/opperai/models/get_span_spans_span_id_getop.py: + id: e5d5868a6cfc + last_write_checksum: sha1:b6ab005e1815b44c72e05b77687397967f1b07ae + pristine_git_object: 6720f38275164bb67ec27a3b715a38f1ae56c4e3 + src/opperai/models/get_trace_traces_trace_id_getop.py: + id: 765e63d79341 + last_write_checksum: sha1:a00d91130000933714da82d31cb4cf2ec8c93d54 + pristine_git_object: ac7fecd839e9d2e701de055d55270999dbed267d + src/opperai/models/get_upload_url_knowledge_knowledge_base_id_upload_url_getop.py: + id: 42b44c582ef5 + last_write_checksum: sha1:edd663dd59d76c5eaf02f51c52965bdd2a9f7503 + pristine_git_object: fb0451f3f559543ba1dba81e2c2cc7f0dc0b2c3e + src/opperai/models/getcustommodelresponse.py: + id: f7e40a6dbd9e + last_write_checksum: sha1:3f86a0069cdbe523790f756be671b9783649cc38 + pristine_git_object: 9b64f10441784e496f6f52bcb80e3013d14ad9d7 + src/opperai/models/getdatasetentriesresponse.py: + id: b432c0f9c22d + last_write_checksum: sha1:ee334fae3964c7ab0fd2b192cb0bdf4e3b85948c + pristine_git_object: 91b4c1d60eeb0a1bc12df3118c9c60101bcfd564 + src/opperai/models/getdatasetentryresponse.py: + id: 9816d6ffe2d2 + last_write_checksum: sha1:b77578254c9258a17f2123693bcbd6a441d917d8 + pristine_git_object: 7b5c75e7e59c0e92de825785e2167c76b89f10da + src/opperai/models/getfunctionresponse.py: + id: 6a9b8930c55c + last_write_checksum: sha1:ae8bca3c45eb716ec146612fe3d3291f7525d597 + pristine_git_object: 3d234958ca85da86377636f28767feba2c2a01ce + src/opperai/models/getknowledgebaseresponse.py: + id: e397c57ca7d4 + last_write_checksum: sha1:97244d9b533fa883626d1a15ffef13fdd5b6da79 + pristine_git_object: 8f13735ba257a0f9ab342f8a19fb7c4faf97d79e + src/opperai/models/getmodelaliasresponse.py: + id: 9f6e9ad987a2 + last_write_checksum: sha1:4712cc8b7698a9518b76d8a35881dfcbc7c8f608 + pristine_git_object: cf9a49c6370c5007383c04c64396bfa586235576 + src/opperai/models/getspanmetricresponse.py: + id: 54beb8ea4b8b + last_write_checksum: sha1:68c6950dc09fa1bd68efac2e462a43ae52ac463a + pristine_git_object: a34affcb261ccd2eb0f6f115aa7f2379b8c82649 + src/opperai/models/getspanresponse.py: + id: 142575bcc5a3 + last_write_checksum: sha1:cc0c3cd7fa68bcd127d6e449fded704b33f1d36f + pristine_git_object: ed51d7940c8676b77fd2416d57cc763df58c37bd + src/opperai/models/gettraceresponse.py: + id: 661d0f2d998a + last_write_checksum: sha1:52470c61c1b159607f4fe3e4bafd48a85d6ffa33 + pristine_git_object: 5eabdd8098aa5c652711ac38a8bc2c96b344ec61 + src/opperai/models/getuploadurlresponse.py: + id: 6639bc0feea4 + last_write_checksum: sha1:68980a12b971d070850bb407504419b3e7eeeb9f + pristine_git_object: 5797e69a487ab3f5b503e28971cb5f6698440435 + src/opperai/models/getusageresultitem.py: + id: 1cdf4e91a67f + last_write_checksum: sha1:73c2211b8cb647f2065055abee1b359926318b56 + pristine_git_object: 782beb780051b5199ee6836b5417af56f04aa310 + src/opperai/models/granularity.py: + id: 9370e124f14a + last_write_checksum: sha1:dbc5488f9d047ac86da4ac7b23042688658f61d6 + pristine_git_object: 5bf9a213cfd57f26e64bc9045c4827ef0d2cb456 + src/opperai/models/imageurl.py: + id: 5ecf170a54ae + last_write_checksum: sha1:7c7d956b6587a0ca195e4b093c2d49cbd9ab7c27 + pristine_git_object: 3fb1bc18904378a643e9c31aa0a48c633e19530a + src/opperai/models/inputaudio.py: + id: 3a6e6726d216 + last_write_checksum: sha1:74fa8f39abc3c4216e5b2705720b2a5cd6ba8625 + pristine_git_object: 6a0e70a5394da565333258890206204584f2194d + src/opperai/models/jsonschema.py: + id: 85f97fb6ed40 + last_write_checksum: sha1:63afd2dddfbfbb7d947e2b43139d5528809bcb68 + pristine_git_object: 9f403ad8ab98a8c56e0dfa7726b555d250bfd247 + src/opperai/models/list_custom_models_models_custom_getop.py: + id: 954cd1310155 + last_write_checksum: sha1:d5fe8acbffd070e3056f6982c3bcaf4e44ac1af7 + pristine_git_object: d525f3d3c930c45d92ed61fe92e8de3b567dad1f + src/opperai/models/list_dataset_entries_datasets_dataset_id_entries_getop.py: + id: 234832c2c8e0 + last_write_checksum: sha1:4bf174c00a35e07d94abbed7f5c08fbbe5e4714f + pristine_git_object: 6e051fc0fcf8507029709cd9ce298cb1f96a1aba + src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py: + id: f23700e670d0 + last_write_checksum: sha1:3dfdbcb40824035e2c64e00556eb59d48df369f2 + pristine_git_object: 1a4fa77d4c67fc898d787dcd4378ef2b6a0ee9ef + src/opperai/models/list_function_revisions_functions_function_id_revisions_getop.py: + id: 5abaf34c90d1 + last_write_checksum: sha1:e198c4a030f49f3b51b019cd957dc98736c3d295 + pristine_git_object: 1d6e0404bb347d15cf79387d77134ee8bc6abeb9 + src/opperai/models/list_functions_functions_getop.py: + id: e153e9ebff73 + last_write_checksum: sha1:47c50395842e5fbfba1be8c0a091887edb62d553 + pristine_git_object: 3be793098eb9d610240a4e077f8e31de236a34e7 + src/opperai/models/list_knowledge_bases_knowledge_getop.py: + id: 55f5b98666f9 + last_write_checksum: sha1:429845ddbe3d1500f3dcc65944a6e3ec573a8c2f + pristine_git_object: 5bfd8095c2ad2c3f0d2fc6c2053e7172a20dd76e + src/opperai/models/list_metrics_spans_span_id_metrics_getop.py: + id: 5fd54b1a6d32 + last_write_checksum: sha1:357dd1424b83cf6e44c64091d7af507aad2b5a6e + pristine_git_object: 4684622896f4e49e235ceab873459a65bc7c8fce + src/opperai/models/list_model_aliases_models_aliases_getop.py: + id: 09a3383b5dff + last_write_checksum: sha1:1a21796104e791fe1723679625d670c9e8a61e6c + pristine_git_object: 279d139ee7e39d2d99f1faae1370a177760ff3b0 + src/opperai/models/list_models_models_getop.py: + id: 73f173fc057b + last_write_checksum: sha1:00c83ddefe4158bc97eee50a5daabb5c61c0ad45 + pristine_git_object: fe659645ace88fa8cc3fe88ae0454ee70be538d7 + src/opperai/models/list_traces_traces_getop.py: + id: e1cb9fc58035 + last_write_checksum: sha1:cc6bad514ae1fc62895ddd18819ccb459a3b70d9 + pristine_git_object: 0fd8b31f39d9a298869d9831e3043a83e222b14a + src/opperai/models/listcustommodelsresponseitem.py: + id: 90206f7a27c7 + last_write_checksum: sha1:2f22969f9e49ad823b6293aff13e404b1909745e + pristine_git_object: 467abd34f483a889e1c2107b1ca7a4b8fc5b5724 + src/opperai/models/listfilesresponse.py: + id: 5f4be4411d4e + last_write_checksum: sha1:9130c172bfdc7aec51c06fbe22f0f4b65aba9acf + pristine_git_object: 58f64629a1622ae59bcf8dc140a27b2ed6128449 + src/opperai/models/listfunctionrevisionresponse.py: + id: 346b03f2a8c9 + last_write_checksum: sha1:6fd5bc0419725ecace0426a611f5c3801af3b589 + pristine_git_object: 7cc4d658d3d7cad1e375cc98458f73c47e24541c + src/opperai/models/listfunctionsresponseitem.py: + id: 9a655eb97487 + last_write_checksum: sha1:f5d11d90e8e281ed6a6e9eccf8b455cacd4bbbea + pristine_git_object: 3247848d3785dee923450f4f5b5724e76d4dbc1f + src/opperai/models/listknowledgebasesresponse.py: + id: 1eb5201cecc2 + last_write_checksum: sha1:47bd6be3a9ec7059d731ddef1b9068300c74ae6c + pristine_git_object: 319ca021f34973769a6b7d07519c50f04077b090 + src/opperai/models/listlanguagemodelsresponse.py: + id: 8aef69bccb0c + last_write_checksum: sha1:32a861edc406f15859f786886c9c531d3bc0cd37 + pristine_git_object: 905ce610a8522aaae65f9c6c5f740648e88d6e29 + src/opperai/models/listmodelaliasesresponseitem.py: + id: 3bd332810057 + last_write_checksum: sha1:a0d16cc26ae0e545e8feade6b5854c7267f49089 + pristine_git_object: 61993a972ac666377bf3f8f3609812a6164981ed + src/opperai/models/listrerankmodelsresponse.py: + id: 7d8c5efd3e6d + last_write_checksum: sha1:977133550e528dfd93488d7a0a13beccb018a13a + pristine_git_object: 65f6b293b782d93511c83e7ebbd0fc6aaaa5892f + src/opperai/models/listspanmetricsresponse.py: + id: f47286715ba3 + last_write_checksum: sha1:137d4f53c73094a95b3a6c306616f618f4a8e4ac + pristine_git_object: 6c3b068c321afbe2db69c2103bca55caebcebd85 + src/opperai/models/listtracesresponse.py: + id: d60f69fbd6ad + last_write_checksum: sha1:2ab72aff4f9365991f45557c9e485437ad31da7b + pristine_git_object: 648a473feee0dbc9cee2b5a3074b0ff2f3dc1c36 + src/opperai/models/meta.py: + id: 0468bf4c495b + last_write_checksum: sha1:411751dff53de8c3431ae510f9441fb46ad94d21 + pristine_git_object: ae33b92d819c687192edda89e922ef6b541dee3a + src/opperai/models/model.py: + id: 0dcf8d50a750 + last_write_checksum: sha1:0368d5aa4848dd957f8ca5cbc1283e2c7573d8c2 + pristine_git_object: dc3bff8b3fc4019d17c4e76f278e0386fc709a72 + src/opperai/models/op.py: + id: 50367c096373 + last_write_checksum: sha1:b8bca063d12cf3da66da520b00944c313fb14048 + pristine_git_object: 4bc07dd564c0d47ac185cc5c7bb6ac8d7a92f69f + src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py: + id: 671ec956af6d + last_write_checksum: sha1:e07703c87b1f02181fd477592e97d02ca59625c0 + pristine_git_object: 16acb0e35ad9388120e40c7861de8ebc22d8aae3 + src/opperai/models/openai_types_chat_chat_completion_message_custom_tool_call_param_custom.py: + id: c0ad327fae7c + last_write_checksum: sha1:32cc8589ba3ab9b262c0088cc19aded8e1f712cd + pristine_git_object: e3f6f831e19bf4adbfcd56d110bcdd889bcf8f41 + src/opperai/models/openai_types_chat_chat_completion_message_function_tool_call_param_function.py: + id: 4a0b4a878b84 + last_write_checksum: sha1:f99c057879d26652a35612215cad919cba835771 + pristine_git_object: b0c62e94ac73f13c3d25abe1245ae38cbbfa6d53 + src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_custom_param_custom.py: + id: d945ce49f74f + last_write_checksum: sha1:811933511c87dd9ed1f1b93e5b2e73e93ef7f928 + pristine_git_object: 57680ca73815afb288f636bdc61b99476d7bd2c2 + src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_param_function.py: + id: d4f607912c1c + last_write_checksum: sha1:327cd3694bbb6164b92cdaffe0ad5e17ff379118 + pristine_git_object: f456a0d8f232981e2dc4238f9dafaf4aebaa13e4 + src/opperai/models/openai_types_chat_completion_create_params_function.py: + id: 613997e877ff + last_write_checksum: sha1:aab26805c6ec92f7a7f9f327bc4f79ee08496931 + pristine_git_object: 9930fbd821a28aa7a13d59ba74536da6fadbda42 + src/opperai/models/paginatedresponse_getdatasetentriesresponse_.py: + id: e72c8650da1c + last_write_checksum: sha1:8ca41c89c495833c27d62dd61224e23082296c69 + pristine_git_object: 411af990f07b427f6f1d25ed8515168d03bac163 + src/opperai/models/paginatedresponse_listcustommodelsresponseitem_.py: + id: 0cf6f219e73a + last_write_checksum: sha1:fd12a71467c307beb36c4742eba547daa42c5d4d + pristine_git_object: 9083abb62ae59500bd9830ec3b068d02d1cad4fe + src/opperai/models/paginatedresponse_listfilesresponse_.py: + id: 7b71e877242f + last_write_checksum: sha1:4d859be96eecad5db18b554d1d9c47a1bf86bedc + pristine_git_object: e3b9ce37f2015d869369ddac5df5a62554d117de + src/opperai/models/paginatedresponse_listfunctionrevisionresponse_.py: + id: b5fcc968dea6 + last_write_checksum: sha1:3be1eb540edf6f4ced2a401f8cbe9adcbe78fb99 + pristine_git_object: c43fd9842a9c85194d66ae2102aacc9d520ca82e + src/opperai/models/paginatedresponse_listfunctionsresponseitem_.py: + id: 1e4164db761f + last_write_checksum: sha1:dfa934bc0a174cbfbf9a60532e3259d9cb104ec1 + pristine_git_object: 62b79065596a5f4e2ccc6e12d3d1687873317b83 + src/opperai/models/paginatedresponse_listknowledgebasesresponse_.py: + id: 3cbbe2613e49 + last_write_checksum: sha1:e59f3232dadd41a16a8fc2e3414841494645feb7 + pristine_git_object: 24b9e051150455f235dde82e3cd06707441f1df8 + src/opperai/models/paginatedresponse_listlanguagemodelsresponse_.py: + id: 5ea5a4e92371 + last_write_checksum: sha1:53fbacc5b02e6dcc384a808ef5f4efbaa5a02262 + pristine_git_object: 107d80bfe6c6eead754b1200571becf120d870fa + src/opperai/models/paginatedresponse_listmodelaliasesresponseitem_.py: + id: 8fa327a86087 + last_write_checksum: sha1:36cf1bee5f0348219adcdd3db3e49f7bd1c4cbf9 + pristine_git_object: 45800ce1ea8e59ed7f3a2b554514f9183b9393dd + src/opperai/models/paginatedresponse_listrerankmodelsresponse_.py: + id: f7314f812d94 + last_write_checksum: sha1:4e99b4114730eb41e8250e96f09a90371c366a45 + pristine_git_object: 84b91305dbb121b9bc5b827175aa0c3a0048d92c + src/opperai/models/paginatedresponse_listspanmetricsresponse_.py: + id: c423b35fdeaa + last_write_checksum: sha1:942771e020d356f0dd17d804ef1736a0a9a825c3 + pristine_git_object: 3ef21618065ba766d8ba60e945f04efb609bb243 + src/opperai/models/paginatedresponse_listtracesresponse_.py: + id: 4b62c08c29a6 + last_write_checksum: sha1:e7130f3c304c2b49d8b634f4d1b1026944889293 + pristine_git_object: 6c8d21d1359d4c93faf75eaef8813ed848793070 + src/opperai/models/prompttokensdetails.py: + id: 865013e7b56f + last_write_checksum: sha1:34f7bbd764566c3f6af88ec70b78241a4f883a32 + pristine_git_object: 288ee6d46a2bde1d9fab4589312509665757aa5c + src/opperai/models/query_dataset_entries_datasets_dataset_id_entries_query_postop.py: + id: 99a0e69aecf3 + last_write_checksum: sha1:8dba33f03aca359f6a65f70f2a8f7757d5ac9491 + pristine_git_object: 291761c3c61abf9c45c4071d1ba7c3c5e5b59888 + src/opperai/models/query_knowledge_base_knowledge_knowledge_base_id_query_postop.py: + id: e68fd9923efe + last_write_checksum: sha1:9ea93a686f9410ddb1b7f34a2a540a5a885aa993 + pristine_git_object: 47f34674a7f744ae3caa7ee29f4ba0c594b2d989 + src/opperai/models/querydatasetentriesresponse.py: + id: bb39605e6f8e + last_write_checksum: sha1:5473cd831021f88cfac4e4f4a75c38342c1e34d8 + pristine_git_object: 36d3294ceb06bf9a2b4d5eff15e132e3fb58c82f + src/opperai/models/queryknowledgebaserequest.py: + id: f23d33665e5d + last_write_checksum: sha1:bf86c04a82de04d2b8534e652e17e4f83de8f107 + pristine_git_object: b85518d507652975be2fcbe30ca221bed0a64bbc + src/opperai/models/queryknowledgebaseresponse.py: + id: c107c087aa45 + last_write_checksum: sha1:8d34194bd71644b94de4e81d2ba13786701d2b12 + pristine_git_object: cb9df8549ff1675e366d22883c8ba4374774758e + src/opperai/models/register_file_upload_knowledge_knowledge_base_id_register_file_postop.py: + id: 944a91399264 + last_write_checksum: sha1:4df600d86f547379d401316af5d028c1dc853ff8 + pristine_git_object: 64dc1fea998d79a097bb75bd37fb930b63b44370 + src/opperai/models/registercustommodelrequest.py: + id: 3359bcf98368 + last_write_checksum: sha1:afcf5796733431660c26d36a21e8b5aae873baab + pristine_git_object: 6e79426163d4255fd2ed30f0e3d6a062c47f73af + src/opperai/models/registercustommodelresponse.py: + id: 8ecc18c8f448 + last_write_checksum: sha1:49cc4f8006d2730bc6e77d1a40e82bdda2250ded + pristine_git_object: 98ad59ee247b5d2ab4f3dc217ac10776e03e6b98 + src/opperai/models/registerfileuploadrequest.py: + id: 60698f790b4f + last_write_checksum: sha1:7d967124b0608a5bd289e8ecdb43ba555265d263 + pristine_git_object: 0c08df3213ea37c8abcbf5ad0785d5902d78de03 + src/opperai/models/registerfileuploadresponse.py: + id: 265b024272bb + last_write_checksum: sha1:c786725794b47ff320d6c31a670797f5d01c6a77 + pristine_git_object: 6102e0a99e21819887b4437669dc128821271412 + src/opperai/models/rerankcost.py: + id: 73f3ad92a052 + last_write_checksum: sha1:c7de84949982b284e28f7f8ea7ef32aa53946263 + pristine_git_object: 5d9d10f4b68e4e384454a104abe2d82e553e329c + src/opperai/models/rerankdocument.py: + id: 225c69b2bf14 + last_write_checksum: sha1:ae4a6dd81d37cb4c48206b342eda4218a6aa3f67 + pristine_git_object: ec6501faff782aec6a7e64c904d412cd47332997 + src/opperai/models/rerankrequestmodel.py: + id: b4fc21228bcb + last_write_checksum: sha1:89e6f25ad0bdb9d26e27f95369ac6301e02af7ce + pristine_git_object: 78716c5b19f8ca2cab0f82773356dfc16134d902 + src/opperai/models/rerankresponsemodel.py: + id: 1a420f58a45a + last_write_checksum: sha1:1ac30eeb98ce3b3322cb3e2041582d52a81dc84c + pristine_git_object: 342ca322ce74a35ab4ab1772e99c841ba3a50586 + src/opperai/models/rerankresult.py: + id: f8b99d317747 + last_write_checksum: sha1:38db9225894dcd7aee4aeed0ca98815aae7be033 + pristine_git_object: 30b42f9befc0bbe712b98194bd30e1423376c6e4 + src/opperai/models/responseformatjsonobject.py: + id: c33c3f9eb14b + last_write_checksum: sha1:9d0a1ab02b5faf72811aa363cad348c2ef6723b7 + pristine_git_object: ca3e797ce465ea069ddfff7a50dc3fbfa2b4a83f + src/opperai/models/responseformatjsonschema.py: + id: 43c3c6b899a5 + last_write_checksum: sha1:34925bec5f58d011989fb558d945b75f7755c548 + pristine_git_object: c07983cd506517c22a3ce3136b97bd420bcaa917 + src/opperai/models/responseformattext.py: + id: 273e1fecb0d0 + last_write_checksum: sha1:be065f958b62ec13856367e20a91bd6e0ce67a33 + pristine_git_object: 8c170e4a4b8a1337543ef253cf389d3af89376ec + src/opperai/models/save_to_dataset_spans_span_id_save_examples_postop.py: + id: 9e7b0d3f90aa + last_write_checksum: sha1:77b70f39086c74dae46eee3c39b7ec5ade662d85 + pristine_git_object: 26a9615ec28735b94f1c83ebe46ac64dbcff1ca0 + src/opperai/models/savetodatasetresponse.py: + id: 00f539d92dd5 + last_write_checksum: sha1:56b06961b4ce4cf5022d695a553e50c726093db1 + pristine_git_object: 683d0bd5b210180dbb4b9f89c008d9ced378b359 + src/opperai/models/security.py: + id: 933f1188788c + last_write_checksum: sha1:80d3a57416acf3ded49f1122662201f617fc2718 + pristine_git_object: de1268b952d84cb1ce3776c9498204022f8e244d + src/opperai/models/spandata.py: + id: 37d88765bf1f + last_write_checksum: sha1:221840a38d0c48d779c6eed3f1a77619772fe2bb + pristine_git_object: be4ff12b458fcee1179536d4bdd921d49345eee5 + src/opperai/models/spanmetricdata.py: + id: 91a93d64142e + last_write_checksum: sha1:74c3294fa9dfa007178f1f7e50bd48bb8d6546fb + pristine_git_object: 9e4c66253a6555613a95e2ab2a3899c6557bc348 + src/opperai/models/spanschema.py: + id: b5eaab587ff7 + last_write_checksum: sha1:78ad909c9a6008e5d3e6eb73c535564b960e7e81 + pristine_git_object: 0c294a4393e27158fbceef9f6c06a94e3ea26e42 + src/opperai/models/stream_function_functions_function_id_call_stream_postop.py: + id: ea8434fdf38d + last_write_checksum: sha1:ebd093d747ee0dac1896698ed61d04e36d553e41 + pristine_git_object: 2bb4e63fc2027e261e5e6f38060806b03e8b02cb + src/opperai/models/stream_function_revision_functions_function_id_call_stream_revision_id_postop.py: + id: 12751cb92523 + last_write_checksum: sha1:20171cd0a04734610b3d5cf81b49adc753d450bf + pristine_git_object: ed77b2c0af4cc2299298a65ba4277dfc7a1e86f0 + src/opperai/models/submit_span_feedback_spans_span_id_feedback_postop.py: + id: 42d8d6cf2a55 + last_write_checksum: sha1:0454229560bc2d360c50a47cdbc12a3389d0e469 + pristine_git_object: 0396e6e750b1e11cea46e6af253f6c37ab60ecd1 + src/opperai/models/submitfeedbackrequest.py: + id: 8581d0e000c9 + last_write_checksum: sha1:0b8b7ba33fb61444400ee53863da00a309849ab4 + pristine_git_object: 71c61b2ed4a70a232d501d36282a23307c672f80 + src/opperai/models/submitfeedbackresponse.py: + id: a7df8f602efc + last_write_checksum: sha1:ac54176a9aa8380f2dfd7813f5b90d6e61799f73 + pristine_git_object: 11ce9db545ccd05f22f2331943cf9d7e5fe80e04 + src/opperai/models/textprocessingconfiguration.py: + id: d0752b79d5a1 + last_write_checksum: sha1:a5d13c7ca8f3af68af864a3d9244df03221d0ccb + pristine_git_object: 608cfd66260345cb8f10c3802356fb00e33a9d9d + src/opperai/models/tmodel.py: + id: 88c118a2a14a + last_write_checksum: sha1:e087f92e013456a443d7ee5119fba6ef1906a84a + pristine_git_object: 82df7d36dcbd6dcbe6a99588f0ed5f438d2cd8de + src/opperai/models/toplogprob.py: + id: 88b00aee421b + last_write_checksum: sha1:8a84fbef0f09ee481753aca25d5aaa544c38ebc2 + pristine_git_object: 09bccd5e6fba3c0440dcd40b36afdaf910462ebf + src/opperai/models/update_custom_model_models_custom_model_id_patchop.py: + id: c78d95bc993e + last_write_checksum: sha1:05bc779806f99e1b60118c351ed0f0267fe7b889 + pristine_git_object: 4975946aef8fc9238adc925e81055d65ea0b6753 + src/opperai/models/update_dataset_entry_datasets_dataset_id_entries_entry_id_patchop.py: + id: 0a5334b3ce83 + last_write_checksum: sha1:7259a7a45b44931337562c00d528e75a3deb1c86 + pristine_git_object: 438bcc4f60c885496bfb11a3b71e333f3ced8f99 + src/opperai/models/update_function_functions_function_id_patchop.py: + id: 1b1d1464ffd7 + last_write_checksum: sha1:f6e64f62d40074508ad63ed63d29a99975d0a4e1 + pristine_git_object: c537cdbe049322d5a15548ac9254dfbb4d2f4226 + src/opperai/models/update_metric_spans_span_id_metrics_metric_id_patchop.py: + id: 3ec182634a28 + last_write_checksum: sha1:a984ec40d8068e6d34ba8b3624b48ba4f1b46baf + pristine_git_object: 110b28b66c1ea250c89dbac48b57251838b3f969 + src/opperai/models/update_model_alias_models_aliases_alias_id_patchop.py: + id: 48d89ef85d3e + last_write_checksum: sha1:884283773e20702a0a592aceff345c7df09a86b7 + pristine_git_object: 708995d4768388dce815f68b8d5e9f501eae0f3d + src/opperai/models/update_span_spans_span_id_patchop.py: + id: e70ad5ca1d93 + last_write_checksum: sha1:2850b46d5b8b90991b868b2fc8082eaffb0ed162 + pristine_git_object: 4d67176d8472b3789ccd5784d8deff5080fdfeea + src/opperai/models/updatecustommodelrequest.py: + id: 55703e79ac25 + last_write_checksum: sha1:49ffd4d278931e972b91940a1ec67422ce9aeac1 + pristine_git_object: 2784d557131eea35ff04bdfabe1af31d3c5bf8f2 + src/opperai/models/updatecustommodelresponse.py: + id: 46af2e3d0c55 + last_write_checksum: sha1:f8983c4262d61f0c07c5f3b1bd1761bd7e4c969b + pristine_git_object: 5e1a84a4cd961f183250674004de95a7925a9295 + src/opperai/models/updatedatasetentryrequest.py: + id: 2c94729d2238 + last_write_checksum: sha1:4efe5e320d2eabb2cbc1620e7f8e3a721aafb159 + pristine_git_object: d82d23bd8ba4e1185e31d922420ef63ed74d0c5c + src/opperai/models/updatedatasetentryresponse.py: + id: 8ed715160bce + last_write_checksum: sha1:57cc219500ec9bcb1d659e2b063121f36dbcddc6 + pristine_git_object: b5f94f390edc44eab9b1dc4653b3368a3ebd26f8 + src/opperai/models/updatefunctionrequest.py: + id: 7a1e428a7516 + last_write_checksum: sha1:114285b3d4ac8679cbabe7c995d269586440ff49 + pristine_git_object: 4a8c1345bf7fe4aa79501e0bb509f86684616b37 + src/opperai/models/updatefunctionresponse.py: + id: 2c98ce1f06db + last_write_checksum: sha1:6224377c24e15f0d90ca53bc4959a8a071030aa2 + pristine_git_object: 964b9b07b3f023e546481cddd2b007c83f1e6cbb + src/opperai/models/updatemodelaliasrequest.py: + id: c0d09b831452 + last_write_checksum: sha1:0cf976f5d3c5dfc3d939de28e1d6dbf96806d1f5 + pristine_git_object: 1732017f26eb37ca145d199b0de4b0f5c7e1d978 + src/opperai/models/updatemodelaliasresponse.py: + id: 49c5386b671a + last_write_checksum: sha1:c39f241cd69fa89ebc1a8abc342401b11457178b + pristine_git_object: 98d5a8a9e86c934815887bda98e9481d1c9bf597 + src/opperai/models/updatespanmetricrequest.py: + id: 9b88f3cfbb61 + last_write_checksum: sha1:62ce023a17f4c7e23fae333502140919cdf4efd6 + pristine_git_object: 2445e5c6b71fb4cfb84f5542284e4243ea438bcb + src/opperai/models/updatespanmetricresponse.py: + id: b7544c7c7735 + last_write_checksum: sha1:aee3c16fe4a890d0ae43ddba8c2065258a8b1f92 + pristine_git_object: e69a112b35200afd72d7e26e20ec9c9c8d1954ec + src/opperai/models/updatespanrequest.py: + id: fad3e7760bb4 + last_write_checksum: sha1:945bd7d0ce5a8677c6a00a6a4ec656af02fa7c52 + pristine_git_object: 569003067cbe71090772e5f94f18dbfa496a49bd + src/opperai/models/updatespanresponse.py: + id: 965e8ccaca38 + last_write_checksum: sha1:a29257b5ef60c1badf68b6a475293afa9d345793 + pristine_git_object: ed9ea325cd19e0dc4590505a8427734433c09ada + src/opperai/models/upload_file_knowledge_knowledge_base_id_upload_postop.py: + id: f5b1debb607d + last_write_checksum: sha1:918b7173cb71463d3e358c3191aaa5b0ec41c647 + pristine_git_object: 74c5027ae445226632dfc5a4107d59ac99e73582 + src/opperai/models/uploadfileresponse.py: + id: 4a6f0985b4e7 + last_write_checksum: sha1:afb079e15243a40aef321667630af581d7df16a2 + pristine_git_object: 2eb71fcbbae8b8e9fc8293fc2f1f78d913f7594a + src/opperai/models/usage_analytics_usage_getop.py: + id: 1a12e8f7a9a3 + last_write_checksum: sha1:aa3e9b13af4f9ce6868e127585666c5a9548a22e + pristine_git_object: 2cb0881e73d5acf1eed67153d2fd50b2c502027e + src/opperai/models/websearchoptions.py: + id: 4bf8fccf1a0e + last_write_checksum: sha1:43829b2ab5bb59c3886f3b5e85e4baea469c628a + pristine_git_object: e83d06480bf455716cec307b570f33fb5d68439d + src/opperai/models/websearchoptionsuserlocation.py: + id: 08bfca88c114 + last_write_checksum: sha1:3498edf903f47d7201f352a8a1d863cbfe37caf8 + pristine_git_object: 99e6cdb4a2af59358e04344c5fe786d3198bea9b + src/opperai/models/websearchoptionsuserlocationapproximate.py: + id: 77817db32bc2 + last_write_checksum: sha1:cf7375763f355e80a8dd318da1ec52b9906159d9 + pristine_git_object: a8288a1def2db11e63f877f1f7aab0f9a5f19e0c + src/opperai/openai.py: + id: 99daf608c2e7 + last_write_checksum: sha1:2db2f792b34529c4505360a96cec9fa8aa9e56df + pristine_git_object: 5aafab850e4f22f740ca76a4d8bd4be89a88f5d3 + src/opperai/py.typed: + id: 1fabbd7047c0 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/opperai/rerank.py: + id: 1f8b4f679e73 + last_write_checksum: sha1:b05ad44471c07d86cc9108e2064f4945b55b6a0d + pristine_git_object: 043dcbe525a2abe59827464b2b24621a3ead0455 + src/opperai/revisions.py: + id: 805db6887351 + last_write_checksum: sha1:ceff7a856ce7855688becf65ee9babcb265e6f42 + pristine_git_object: 3e0547b82b64acf497501f4aaa5a7fed6aac4b25 + src/opperai/sdk.py: + id: 844e4e65e5eb + last_write_checksum: sha1:e4916052eafbb9a8c6bad0971175a20c60f7b420 + pristine_git_object: b6f193ff6474301de0640f9fe2dcd590dacbca52 + src/opperai/sdkconfiguration.py: + id: 17be9a809622 + last_write_checksum: sha1:f4271034a7639fa9bc0a3606a7e307f5df4e562d + pristine_git_object: 11fe3a5db94929b383a8752cfda6cfbcd2422e0d + src/opperai/spanmetrics.py: + id: 51c79ed6aac6 + last_write_checksum: sha1:aadd1580e1a1ee6523dbf9305ad07586ea2d2606 + pristine_git_object: 3f8b0302c3700c1693ca6773d54d0118c1b6b596 + src/opperai/spans.py: + id: 5fee53590ea8 + last_write_checksum: sha1:6f85233fc3093fdc6a97aa62f3f9e32e7496c7a3 + pristine_git_object: 2c9a02c19b99dc4ec3cd100886c290a50f1aa9be + src/opperai/traces.py: + id: edf6ec5126ff + last_write_checksum: sha1:47d3d03aa1f8efa2ea9ce650ed6d97e279e7fbd0 + pristine_git_object: c5e4cd3bbf1daeeba3535c2d4ef47eaf5c974820 + src/opperai/types/__init__.py: + id: c7f5b0729d28 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/opperai/types/basemodel.py: + id: 4d6bfe63351b + last_write_checksum: sha1:615d0b364fa924b0fef719958df34596cc7c1ae2 + pristine_git_object: 231c2e37283a76082f1a064c7aae47f8ee4ee694 + src/opperai/utils/__init__.py: + id: 73e396310fd7 + last_write_checksum: sha1:5d88356c206a32c694ae113178ca6325a9fc612a + pristine_git_object: 87192ddef2589a7778f85e1cd715d6d1e61ae0a6 + src/opperai/utils/annotations.py: + id: e601ea2cfdd8 + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/opperai/utils/datetimes.py: + id: f97cf153b046 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/opperai/utils/enums.py: + id: 59dd3e824c10 + last_write_checksum: sha1:786ba597f79dca6fbc0d87c591752bb8d775ecb7 + pristine_git_object: c3bc13cfc48794c143a64667f02e7949a8ce3fcc + src/opperai/utils/eventstreaming.py: + id: 4da9c3253e2f + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/opperai/utils/forms.py: + id: 80a90bfec5aa + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/opperai/utils/headers.py: + id: b67c08cd89f6 + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/opperai/utils/logger.py: + id: 325187ab52a3 + last_write_checksum: sha1:417dafbcfc0110d83ebc2a918435150dd185e402 + pristine_git_object: 3a65807a0510c5d17848b396331fe00a7b024db1 + src/opperai/utils/metadata.py: + id: e6c4f37fa697 + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/opperai/utils/queryparams.py: + id: dc5d26337c32 + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/opperai/utils/requestbodies.py: + id: 8bcc606c7f45 + last_write_checksum: sha1:e0a3a78158eba39880475d62d61be906625676b8 + pristine_git_object: d5240dd5f5efffabbd9aefa2f4a349511a9c75b4 + src/opperai/utils/retries.py: + id: cbe82b854ffd + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/opperai/utils/security.py: + id: 92e819c1e6f3 + last_write_checksum: sha1:3c093a356babd542cf02f132d7f3442fe525d183 + pristine_git_object: 35d826970d18cf916b816f5fd77351e1e601bf43 + src/opperai/utils/serializers.py: + id: df095b3d06e1 + last_write_checksum: sha1:a0d184ace7371a14a7d005cca7f358a03e3d4b07 + pristine_git_object: 378a14c0f86a867ca7b0eb7e620da82234c0ccc4 + src/opperai/utils/unmarshal_json_response.py: + id: 306703896d34 + last_write_checksum: sha1:01b9e587ec866dad0d531f65643f3abac7f7aba5 + pristine_git_object: c3957b585b937d54e9f8c345059edfbbfee2fb2a + src/opperai/utils/url.py: + id: 932ac5dc3c1d + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/opperai/utils/values.py: + id: 2122bb8d810c + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: function_call_call_post: speakeasy-default-function-call-call-post: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": application/json: {"span_id": "174109ca-ebca-4823-8c0d-08c3789b2fea", "message": "The sum of 1 and 3 is 4", "json_payload": {"sum": 4}, "cached": true, "images": ["image_url"], "usage": {"input_tokens": 25, "output_tokens": 972, "output_tokens_details": {"reasoning_tokens": 704}, "total_tokens": 997}, "cost": {"generation": 0.0001, "platform": 0.00001, "total": 0.00011}} @@ -636,19 +2350,19 @@ examples: text/event-stream: {"data": {"delta": "Hello! How can I assist you today?", "span_id": "123e4567-e89b-12d3-a456-426614174000"}} span_id_only: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"span_id": "123e4567-e89b-12d3-a456-426614174000"}} text_streaming_content: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"delta": "Hello! How can I assist you today?", "chunk_type": "text"}} structured_streaming_content: requestBody: - application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "add_numbers", "instructions": "Calculate the sum of two numbers", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "input": {"x": 4, "y": 5}, "examples": [{"input": {"x": 1, "y": 3}, "output": {"sum": 4}, "comment": "Adds two numbers"}], "parent_span_id": "123e4567-e89b-12d3-a456-426614174000", "tags": {"project": "project_456", "user": "company_123"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "200": text/event-stream: {"data": {"delta": "John Doe", "json_path": "people[0].name", "chunk_type": "json"}} @@ -881,7 +2595,7 @@ examples: path: span_id: "77b258a2-45c1-4b87-a50c-9116bc8ed1d6" requestBody: - application/json: {"name": "my span", "start_time": "2025-11-28T13:52:31.359105Z", "type": "email_tool", "end_time": "2025-11-28T13:52:31.359201Z", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} + application/json: {"name": "my span", "start_time": "2025-12-09T10:00:56.939134Z", "type": "email_tool", "end_time": "2025-12-09T10:00:56.939193Z", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} responses: "200": application/json: {"name": "my span", "start_time": "2024-03-20T10:00:00+00:00", "id": "84d9ffc8-336c-4953-b92f-fe05d5405951", "trace_id": "123e4567-e89b-12d3-a456-426614174000", "parent_id": "123e4567-e89b-12d3-a456-426614174000", "type": "email_tool", "end_time": "2024-03-20T10:00:10+00:00", "input": "Hello, world!", "output": "Hello, world!", "error": "Exception: This is an error message", "meta": {"key": "value"}, "score": 10} @@ -1085,10 +2799,10 @@ examples: create_function_functions_post: speakeasy-default-create-function-functions-post: requestBody: - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}} responses: "201": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "a246e349-93f1-4ae1-9ed0-d3ca4f954a62", "observer_enabled": true} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "a246e349-93f1-4ae1-9ed0-d3ca4f954a62", "observer_enabled": true} "409": application/json: {"type": "ConflictError", "message": "The resource already exists", "detail": "The resource already exists"} "422": @@ -1123,7 +2837,7 @@ examples: function_id: "42016421-16e8-4b50-a2d1-30fc3894763b" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "692b28e0-d3e6-4ea4-adf0-c629aeecfdab", "observer_enabled": true} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "692b28e0-d3e6-4ea4-adf0-c629aeecfdab", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1138,10 +2852,10 @@ examples: path: function_id: "b0f067f3-9aa2-4ce2-aad5-c9832fbf4fa4" requestBody: - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false, "invocation.cache.ttl": 0, "invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5}} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false, "invocation.cache.ttl": 0, "invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5}} responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "7111f9fb-1c8a-4eee-bcaf-a0e751490b7f", "observer_enabled": true} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "7111f9fb-1c8a-4eee-bcaf-a0e751490b7f", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1339,7 +3053,7 @@ examples: revision_id: "e60c7090-8545-4e1f-84d5-0d9e4be6c0d1" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "1af4e1a9-7f11-4e7c-962d-827d5bc836a1", "observer_enabled": true} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "1af4e1a9-7f11-4e7c-962d-827d5bc836a1", "observer_enabled": true} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} "400": @@ -1574,7 +3288,7 @@ examples: name: "" responses: "200": - application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 0, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "6f887222-2826-417a-ba23-ddd2b6bc4cbf", "observer_enabled": true} + application/json: {"name": "my-function", "description": "This function is used to add two numbers and return the result.", "instructions": "You are a calculator that adds two numbers and returns the result.", "input_schema": {"properties": {"x": {"title": "X", "type": "integer"}, "y": {"title": "Y", "type": "integer"}}, "required": ["x", "y"], "title": "OpperInputExample", "type": "object"}, "output_schema": {"properties": {"sum": {"title": "Sum", "type": "integer"}}, "required": ["sum"], "title": "OpperOutputExample", "type": "object"}, "configuration": {"invocation.few_shot.count": 3, "invocation.structured_generation.max_attempts": 5, "invocation.cache.ttl": 0, "beta.invocation.input_validation.enabled": false, "beta.invocation.xml_mode.enabled": false}, "id": "6f887222-2826-417a-ba23-ddd2b6bc4cbf", "observer_enabled": true} "400": application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} "401": @@ -1914,5 +3628,611 @@ examples: application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} "422": application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} + upload_file_knowledge__knowledge_base_id__upload_post: + speakeasy-default-upload-file-knowledge-knowledge-base-id-upload-post: + parameters: + path: + knowledge_base_id: "68275f14-e70f-4536-be7e-03a877ce8be8" + requestBody: + multipart/form-data: {"file": "x-file: example.file", "chunk_size": 2000, "chunk_overlap": 200, "metadata": "{\"category\": \"legal\", \"client\": \"acme\"}"} + responses: + "201": + application/json: {"id": "c3e5f895-19ee-441a-87c8-97e005c826fd", "key": "", "original_filename": "", "document_id": 946193} + "400": + application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} + "401": + application/json: {"type": "UnauthorizedError", "message": "The request is unauthorized", "detail": "The API key is invalid"} + "404": + application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} + "422": + application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} + submit_span_feedback_spans__span_id__feedback_post: + speakeasy-default-submit-span-feedback-spans-span-id-feedback-post: + parameters: + path: + span_id: "c1be9633-665b-418c-a08f-19cb9734dd23" + requestBody: + application/json: {"score": 1, "comment": "Great output, exactly what I needed"} + responses: + "200": + application/json: {"span_id": "6e2c8656-c871-4147-aed9-6b71878d6726", "score": 5330.69, "example_saved": true} + "400": + application/json: {"type": "BadRequestError", "message": "The request is invalid", "detail": "The request is invalid"} + "401": + application/json: {"type": "UnauthorizedError", "message": "The request is unauthorized", "detail": "The API key is invalid"} + "404": + application/json: {"type": "NotFoundError", "message": "The resource was not found", "detail": "Span 123e4567-e89b-12d3-a456-426614174000 not found"} + "422": + application/json: {"type": "RequestValidationError", "message": "The request is invalid", "detail": {"input": "input value", "loc": ["path", "to", "error"], "msg": "error message", "type": "type of error"}} examplesVersion: 1.0.2 generatedTests: {} +generatedFiles: + - .devcontainer/README.md + - .devcontainer/devcontainer.json + - .devcontainer/setup.sh + - .gitattributes + - .vscode/settings.json + - USAGE.md + - docs/errors/badrequesterror.md + - docs/errors/conflicterror.md + - docs/errors/error.md + - docs/errors/notfounderror.md + - docs/errors/requestvalidationerror.md + - docs/errors/unauthorizederror.md + - docs/models/addknowledgeknowledgebaseidaddpostrequest.md + - docs/models/addrequest.md + - docs/models/annotation.md + - docs/models/annotationurlcitation.md + - docs/models/appapipublicv2functioncallcallfunctionrequest.md + - docs/models/appapipublicv2functioncallcallfunctionresponse.md + - docs/models/appapipublicv2functionscallfunctionrequest.md + - docs/models/appapipublicv2functionscallfunctionresponse.md + - docs/models/audio.md + - docs/models/callfunctionfunctionsfunctionidcallpostrequest.md + - docs/models/callfunctionrevisionfunctionsfunctionidcallrevisionidpostrequest.md + - docs/models/chatcompletion.md + - docs/models/chatcompletionallowedtoolchoiceparam.md + - docs/models/chatcompletionallowedtoolsparam.md + - docs/models/chatcompletionassistantmessageparam.md + - docs/models/chatcompletionassistantmessageparamcontent1.md + - docs/models/chatcompletionassistantmessageparamcontent2.md + - docs/models/chatcompletionassistantmessageparamtoolcall.md + - docs/models/chatcompletionaudio.md + - docs/models/chatcompletionaudioparam.md + - docs/models/chatcompletionaudioparamformat.md + - docs/models/chatcompletioncontentpartimageparam.md + - docs/models/chatcompletioncontentpartinputaudioparam.md + - docs/models/chatcompletioncontentpartrefusalparam.md + - docs/models/chatcompletioncontentparttextparam.md + - docs/models/chatcompletioncustomtoolparam.md + - docs/models/chatcompletiondevelopermessageparam.md + - docs/models/chatcompletiondevelopermessageparamcontent.md + - docs/models/chatcompletionfunctioncalloptionparam.md + - docs/models/chatcompletionfunctionmessageparam.md + - docs/models/chatcompletionfunctiontoolparam.md + - docs/models/chatcompletionmessage.md + - docs/models/chatcompletionmessagecustomtoolcall.md + - docs/models/chatcompletionmessagecustomtoolcallparam.md + - docs/models/chatcompletionmessagefunctiontoolcall.md + - docs/models/chatcompletionmessagefunctiontoolcallparam.md + - docs/models/chatcompletionmessagetoolcall.md + - docs/models/chatcompletionnamedtoolchoicecustomparam.md + - docs/models/chatcompletionnamedtoolchoiceparam.md + - docs/models/chatcompletionnonstreaming.md + - docs/models/chatcompletionnonstreamingfunctioncallenum.md + - docs/models/chatcompletionnonstreamingfunctioncallunion.md + - docs/models/chatcompletionnonstreamingmessage.md + - docs/models/chatcompletionnonstreamingmodality.md + - docs/models/chatcompletionnonstreamingreasoningeffort.md + - docs/models/chatcompletionnonstreamingresponseformat.md + - docs/models/chatcompletionnonstreamingservicetier.md + - docs/models/chatcompletionnonstreamingstop.md + - docs/models/chatcompletionnonstreamingtool.md + - docs/models/chatcompletionnonstreamingtoolchoiceenum.md + - docs/models/chatcompletionnonstreamingtoolchoiceunion.md + - docs/models/chatcompletionnonstreamingverbosity.md + - docs/models/chatcompletionpredictioncontentparam.md + - docs/models/chatcompletionpredictioncontentparamcontent.md + - docs/models/chatcompletionservicetier.md + - docs/models/chatcompletionstreaming.md + - docs/models/chatcompletionstreamingfunctioncallenum.md + - docs/models/chatcompletionstreamingfunctioncallunion.md + - docs/models/chatcompletionstreamingmessage.md + - docs/models/chatcompletionstreamingmodality.md + - docs/models/chatcompletionstreamingreasoningeffort.md + - docs/models/chatcompletionstreamingresponseformat.md + - docs/models/chatcompletionstreamingservicetier.md + - docs/models/chatcompletionstreamingstop.md + - docs/models/chatcompletionstreamingtool.md + - docs/models/chatcompletionstreamingtoolchoiceenum.md + - docs/models/chatcompletionstreamingtoolchoiceunion.md + - docs/models/chatcompletionstreamingverbosity.md + - docs/models/chatcompletionstreamoptionsparam.md + - docs/models/chatcompletionsystemmessageparam.md + - docs/models/chatcompletionsystemmessageparamcontent.md + - docs/models/chatcompletiontokenlogprob.md + - docs/models/chatcompletiontoolmessageparam.md + - docs/models/chatcompletiontoolmessageparamcontent.md + - docs/models/chatcompletionusermessageparam.md + - docs/models/chatcompletionusermessageparamcontent1.md + - docs/models/chatcompletionusermessageparamcontent2.md + - docs/models/choice.md + - docs/models/choicelogprobs.md + - docs/models/completiontokensdetails.md + - docs/models/completionusage.md + - docs/models/createdatasetentrydatasetsdatasetidpostrequest.md + - docs/models/createdatasetentryrequest.md + - docs/models/createdatasetentryresponse.md + - docs/models/createembeddingrequest.md + - docs/models/createembeddingresponse.md + - docs/models/createfunctionrequest.md + - docs/models/createfunctionresponse.md + - docs/models/createknowledgebaserequest.md + - docs/models/createknowledgebaseresponse.md + - docs/models/createmetricspansspanidmetricspostrequest.md + - docs/models/createmodelaliasrequest.md + - docs/models/createmodelaliasresponse.md + - docs/models/createspanmetricrequest.md + - docs/models/createspanmetricresponse.md + - docs/models/createspanrequest.md + - docs/models/createspanresponse.md + - docs/models/customformatgrammar.md + - docs/models/customformatgrammargrammar.md + - docs/models/customformattext.md + - docs/models/customoutput.md + - docs/models/deletecustommodelmodelscustommodeliddeleterequest.md + - docs/models/deletedatasetentrydatasetsdatasetidentriesentryiddeleterequest.md + - docs/models/deletedocumentsknowledgeknowledgebaseidquerydeleterequest.md + - docs/models/deletefilefromknowledgebaseknowledgeknowledgebaseidfilesfileiddeleterequest.md + - docs/models/deletefunctionfunctionsfunctioniddeleterequest.md + - docs/models/deleteknowledgebaseknowledgeknowledgebaseiddeleterequest.md + - docs/models/deleteknowledgebaserequest.md + - docs/models/deleteknowledgebaseresponse.md + - docs/models/deletemetricspansspanidmetricsmetriciddeleterequest.md + - docs/models/deletemodelaliasmodelsaliasesaliasiddeleterequest.md + - docs/models/deletespanspansspaniddeleterequest.md + - docs/models/delta.md + - docs/models/detail.md + - docs/models/example.md + - docs/models/examplein.md + - docs/models/file.md + - docs/models/filedownloadurlresponse.md + - docs/models/filefile.md + - docs/models/filter_.md + - docs/models/finishreason.md + - docs/models/format_.md + - docs/models/functioncallconfigurationinput.md + - docs/models/functioncallconfigurationoutput.md + - docs/models/functioncallinput.md + - docs/models/functioncalloutput.md + - docs/models/functiondefinition.md + - docs/models/functionoutput.md + - docs/models/functionstreamcallstreampostresponse.md + - docs/models/functionstreamcallstreampostresponsebody.md + - docs/models/getcustommodelbynamemodelscustombynamenamegetrequest.md + - docs/models/getcustommodelmodelscustommodelidgetrequest.md + - docs/models/getcustommodelresponse.md + - docs/models/getdatasetentriesresponse.md + - docs/models/getdatasetentrydatasetsdatasetidentriesentryidgetrequest.md + - docs/models/getdatasetentryresponse.md + - docs/models/getfiledownloadurlknowledgeknowledgebaseidfilesfileiddownloadurlgetrequest.md + - docs/models/getfunctionbynamefunctionsbynamenamegetrequest.md + - docs/models/getfunctionbyrevisionfunctionsfunctionidrevisionsrevisionidgetrequest.md + - docs/models/getfunctionfunctionsfunctionidgetrequest.md + - docs/models/getfunctionresponse.md + - docs/models/getknowledgebasebynameknowledgebynameknowledgebasenamegetrequest.md + - docs/models/getknowledgebaseknowledgeknowledgebaseidgetrequest.md + - docs/models/getknowledgebaseresponse.md + - docs/models/getmetricspansspanidmetricsmetricidgetrequest.md + - docs/models/getmodelaliasbynamemodelsaliasesbynamenamegetrequest.md + - docs/models/getmodelaliasmodelsaliasesaliasidgetrequest.md + - docs/models/getmodelaliasresponse.md + - docs/models/getspanmetricresponse.md + - docs/models/getspanresponse.md + - docs/models/getspanspansspanidgetrequest.md + - docs/models/gettraceresponse.md + - docs/models/gettracetracestraceidgetrequest.md + - docs/models/getuploadurlknowledgeknowledgebaseiduploadurlgetrequest.md + - docs/models/getuploadurlresponse.md + - docs/models/getusageresultitem.md + - docs/models/granularity.md + - docs/models/imageurl.md + - docs/models/input.md + - docs/models/inputaudio.md + - docs/models/inputaudioformat.md + - docs/models/jsonpayload.md + - docs/models/jsonschema.md + - docs/models/listcustommodelsmodelscustomgetrequest.md + - docs/models/listcustommodelsresponseitem.md + - docs/models/listdatasetentriesdatasetsdatasetidentriesgetrequest.md + - docs/models/listfilesknowledgeknowledgebaseidfilesgetrequest.md + - docs/models/listfilesresponse.md + - docs/models/listfunctionrevisionresponse.md + - docs/models/listfunctionrevisionsfunctionsfunctionidrevisionsgetrequest.md + - docs/models/listfunctionsfunctionsgetrequest.md + - docs/models/listfunctionsresponseitem.md + - docs/models/listknowledgebasesknowledgegetrequest.md + - docs/models/listknowledgebasesresponse.md + - docs/models/listlanguagemodelsresponse.md + - docs/models/listmetricsspansspanidmetricsgetrequest.md + - docs/models/listmodelaliasesmodelsaliasesgetrequest.md + - docs/models/listmodelaliasesresponseitem.md + - docs/models/listmodelsmodelsgetrequest.md + - docs/models/listrerankmodelsresponse.md + - docs/models/listspanmetricsresponse.md + - docs/models/listtracesresponse.md + - docs/models/listtracestracesgetrequest.md + - docs/models/meta.md + - docs/models/mode.md + - docs/models/model.md + - docs/models/op.md + - docs/models/openaitypeschatchatcompletioncustomtoolparamcustom.md + - docs/models/openaitypeschatchatcompletionmessagecustomtoolcallparamcustom.md + - docs/models/openaitypeschatchatcompletionmessagefunctiontoolcallparamfunction.md + - docs/models/openaitypeschatchatcompletionnamedtoolchoicecustomparamcustom.md + - docs/models/openaitypeschatchatcompletionnamedtoolchoiceparamfunction.md + - docs/models/openaitypeschatcompletioncreateparamsfunction.md + - docs/models/paginatedresponsegetdatasetentriesresponse.md + - docs/models/paginatedresponselistcustommodelsresponseitem.md + - docs/models/paginatedresponselistfilesresponse.md + - docs/models/paginatedresponselistfunctionrevisionresponse.md + - docs/models/paginatedresponselistfunctionsresponseitem.md + - docs/models/paginatedresponselistknowledgebasesresponse.md + - docs/models/paginatedresponselistlanguagemodelsresponse.md + - docs/models/paginatedresponselistmodelaliasesresponseitem.md + - docs/models/paginatedresponselistrerankmodelsresponse.md + - docs/models/paginatedresponselistspanmetricsresponse.md + - docs/models/paginatedresponselisttracesresponse.md + - docs/models/payload.md + - docs/models/prompttokensdetails.md + - docs/models/querydatasetentriesdatasetsdatasetidentriesquerypostrequest.md + - docs/models/querydatasetentriesresponse.md + - docs/models/queryknowledgebaseknowledgeknowledgebaseidquerypostrequest.md + - docs/models/queryknowledgebaserequest.md + - docs/models/queryknowledgebaseresponse.md + - docs/models/registercustommodelrequest.md + - docs/models/registercustommodelresponse.md + - docs/models/registerfileuploadknowledgeknowledgebaseidregisterfilepostrequest.md + - docs/models/registerfileuploadrequest.md + - docs/models/registerfileuploadresponse.md + - docs/models/rerankcost.md + - docs/models/rerankdocument.md + - docs/models/rerankrequestmodel.md + - docs/models/rerankresponsemodel.md + - docs/models/rerankresult.md + - docs/models/responseformatjsonobject.md + - docs/models/responseformatjsonschema.md + - docs/models/responseformattext.md + - docs/models/savetodatasetresponse.md + - docs/models/savetodatasetspansspanidsaveexamplespostrequest.md + - docs/models/searchcontextsize.md + - docs/models/security.md + - docs/models/spandata.md + - docs/models/spanmetricdata.md + - docs/models/spanschema.md + - docs/models/streamfunctionfunctionsfunctionidcallstreampostdata.md + - docs/models/streamfunctionfunctionsfunctionidcallstreampostrequest.md + - docs/models/streamfunctionfunctionsfunctionidcallstreampostresponse.md + - docs/models/streamfunctionfunctionsfunctionidcallstreampostresponsebody.md + - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostdata.md + - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostrequest.md + - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponse.md + - docs/models/streamfunctionrevisionfunctionsfunctionidcallstreamrevisionidpostresponsebody.md + - docs/models/streamingchunk.md + - docs/models/syntax.md + - docs/models/textprocessingconfiguration.md + - docs/models/tmodel.md + - docs/models/tmodel1.md + - docs/models/toplogprob.md + - docs/models/updatecustommodelmodelscustommodelidpatchrequest.md + - docs/models/updatecustommodelrequest.md + - docs/models/updatecustommodelresponse.md + - docs/models/updatedatasetentrydatasetsdatasetidentriesentryidpatchrequest.md + - docs/models/updatedatasetentryrequest.md + - docs/models/updatedatasetentryresponse.md + - docs/models/updatefunctionfunctionsfunctionidpatchrequest.md + - docs/models/updatefunctionrequest.md + - docs/models/updatefunctionresponse.md + - docs/models/updatemetricspansspanidmetricsmetricidpatchrequest.md + - docs/models/updatemodelaliasmodelsaliasesaliasidpatchrequest.md + - docs/models/updatemodelaliasrequest.md + - docs/models/updatemodelaliasresponse.md + - docs/models/updatespanmetricrequest.md + - docs/models/updatespanmetricresponse.md + - docs/models/updatespanrequest.md + - docs/models/updatespanresponse.md + - docs/models/updatespanspansspanidpatchrequest.md + - docs/models/usageanalyticsusagegetrequest.md + - docs/models/utils/retryconfig.md + - docs/models/value1.md + - docs/models/value2.md + - docs/models/voice.md + - docs/models/voiceenum.md + - docs/models/websearchoptions.md + - docs/models/websearchoptionsuserlocation.md + - docs/models/websearchoptionsuserlocationapproximate.md + - docs/sdks/analytics/README.md + - docs/sdks/datasets/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/entries/README.md + - docs/sdks/functions/README.md + - docs/sdks/knowledge/README.md + - docs/sdks/languagemodels/README.md + - docs/sdks/openai/README.md + - docs/sdks/opper/README.md + - docs/sdks/rerank/README.md + - docs/sdks/revisions/README.md + - docs/sdks/spanmetrics/README.md + - docs/sdks/spans/README.md + - docs/sdks/traces/README.md + - poetry.toml + - py.typed + - pylintrc + - pyproject.toml + - scripts/publish.sh + - src/opperai/__init__.py + - src/opperai/_hooks/__init__.py + - src/opperai/_hooks/sdkhooks.py + - src/opperai/_hooks/types.py + - src/opperai/_version.py + - src/opperai/analytics.py + - src/opperai/basesdk.py + - src/opperai/datasets.py + - src/opperai/embeddings.py + - src/opperai/entries.py + - src/opperai/errors/__init__.py + - src/opperai/errors/apierror.py + - src/opperai/errors/badrequesterror.py + - src/opperai/errors/conflicterror.py + - src/opperai/errors/error.py + - src/opperai/errors/no_response_error.py + - src/opperai/errors/notfounderror.py + - src/opperai/errors/oppererror.py + - src/opperai/errors/requestvalidationerror.py + - src/opperai/errors/responsevalidationerror.py + - src/opperai/errors/unauthorizederror.py + - src/opperai/functions.py + - src/opperai/httpclient.py + - src/opperai/knowledge.py + - src/opperai/language_models.py + - src/opperai/models/__init__.py + - src/opperai/models/add_knowledge_knowledge_base_id_add_postop.py + - src/opperai/models/addrequest.py + - src/opperai/models/annotation.py + - src/opperai/models/annotationurlcitation.py + - src/opperai/models/app_api_public_v2_function_call_callfunctionrequest.py + - src/opperai/models/app_api_public_v2_function_call_callfunctionresponse.py + - src/opperai/models/app_api_public_v2_functions_callfunctionrequest.py + - src/opperai/models/app_api_public_v2_functions_callfunctionresponse.py + - src/opperai/models/audio.py + - src/opperai/models/call_function_functions_function_id_call_postop.py + - src/opperai/models/call_function_revision_functions_function_id_call_revision_id_postop.py + - src/opperai/models/chat_completions_openai_chat_completions_postop.py + - src/opperai/models/chatcompletion.py + - src/opperai/models/chatcompletionallowedtoolchoiceparam.py + - src/opperai/models/chatcompletionallowedtoolsparam.py + - src/opperai/models/chatcompletionassistantmessageparam.py + - src/opperai/models/chatcompletionaudio.py + - src/opperai/models/chatcompletionaudioparam.py + - src/opperai/models/chatcompletioncontentpartimageparam.py + - src/opperai/models/chatcompletioncontentpartinputaudioparam.py + - src/opperai/models/chatcompletioncontentpartrefusalparam.py + - src/opperai/models/chatcompletioncontentparttextparam.py + - src/opperai/models/chatcompletioncustomtoolparam.py + - src/opperai/models/chatcompletiondevelopermessageparam.py + - src/opperai/models/chatcompletionfunctioncalloptionparam.py + - src/opperai/models/chatcompletionfunctionmessageparam.py + - src/opperai/models/chatcompletionfunctiontoolparam.py + - src/opperai/models/chatcompletionmessage.py + - src/opperai/models/chatcompletionmessagecustomtoolcall.py + - src/opperai/models/chatcompletionmessagecustomtoolcallparam.py + - src/opperai/models/chatcompletionmessagefunctiontoolcall.py + - src/opperai/models/chatcompletionmessagefunctiontoolcallparam.py + - src/opperai/models/chatcompletionnamedtoolchoicecustomparam.py + - src/opperai/models/chatcompletionnamedtoolchoiceparam.py + - src/opperai/models/chatcompletionnonstreaming.py + - src/opperai/models/chatcompletionpredictioncontentparam.py + - src/opperai/models/chatcompletionstreaming.py + - src/opperai/models/chatcompletionstreamoptionsparam.py + - src/opperai/models/chatcompletionsystemmessageparam.py + - src/opperai/models/chatcompletiontokenlogprob.py + - src/opperai/models/chatcompletiontoolmessageparam.py + - src/opperai/models/chatcompletionusermessageparam.py + - src/opperai/models/choice.py + - src/opperai/models/choicelogprobs.py + - src/opperai/models/completiontokensdetails.py + - src/opperai/models/completionusage.py + - src/opperai/models/create_dataset_entry_datasets_dataset_id_postop.py + - src/opperai/models/create_metric_spans_span_id_metrics_postop.py + - src/opperai/models/createdatasetentryrequest.py + - src/opperai/models/createdatasetentryresponse.py + - src/opperai/models/createembeddingrequest.py + - src/opperai/models/createembeddingresponse.py + - src/opperai/models/createfunctionrequest.py + - src/opperai/models/createfunctionresponse.py + - src/opperai/models/createknowledgebaserequest.py + - src/opperai/models/createknowledgebaseresponse.py + - src/opperai/models/createmodelaliasrequest.py + - src/opperai/models/createmodelaliasresponse.py + - src/opperai/models/createspanmetricrequest.py + - src/opperai/models/createspanmetricresponse.py + - src/opperai/models/createspanrequest.py + - src/opperai/models/createspanresponse.py + - src/opperai/models/custom_output.py + - src/opperai/models/customformatgrammar.py + - src/opperai/models/customformatgrammargrammar.py + - src/opperai/models/customformattext.py + - src/opperai/models/delete_custom_model_models_custom_model_id_deleteop.py + - src/opperai/models/delete_dataset_entry_datasets_dataset_id_entries_entry_id_deleteop.py + - src/opperai/models/delete_documents_knowledge_knowledge_base_id_query_deleteop.py + - src/opperai/models/delete_file_from_knowledge_base_knowledge_knowledge_base_id_files_file_id_deleteop.py + - src/opperai/models/delete_function_functions_function_id_deleteop.py + - src/opperai/models/delete_knowledge_base_knowledge_knowledge_base_id_deleteop.py + - src/opperai/models/delete_metric_spans_span_id_metrics_metric_id_deleteop.py + - src/opperai/models/delete_model_alias_models_aliases_alias_id_deleteop.py + - src/opperai/models/delete_span_spans_span_id_deleteop.py + - src/opperai/models/deleteknowledgebaserequest.py + - src/opperai/models/deleteknowledgebaseresponse.py + - src/opperai/models/example.py + - src/opperai/models/examplein.py + - src/opperai/models/file.py + - src/opperai/models/filedownloadurlresponse.py + - src/opperai/models/filefile.py + - src/opperai/models/filter_.py + - src/opperai/models/function_output.py + - src/opperai/models/function_stream_call_stream_postop.py + - src/opperai/models/functioncall_input.py + - src/opperai/models/functioncall_output.py + - src/opperai/models/functioncallconfiguration_input.py + - src/opperai/models/functioncallconfiguration_output.py + - src/opperai/models/functiondefinition.py + - src/opperai/models/get_custom_model_by_name_models_custom_by_name_name_getop.py + - src/opperai/models/get_custom_model_models_custom_model_id_getop.py + - src/opperai/models/get_dataset_entry_datasets_dataset_id_entries_entry_id_getop.py + - src/opperai/models/get_file_download_url_knowledge_knowledge_base_id_files_file_id_download_url_getop.py + - src/opperai/models/get_function_by_name_functions_by_name_name_getop.py + - src/opperai/models/get_function_by_revision_functions_function_id_revisions_revision_id_getop.py + - src/opperai/models/get_function_functions_function_id_getop.py + - src/opperai/models/get_knowledge_base_by_name_knowledge_by_name_knowledge_base_name_getop.py + - src/opperai/models/get_knowledge_base_knowledge_knowledge_base_id_getop.py + - src/opperai/models/get_metric_spans_span_id_metrics_metric_id_getop.py + - src/opperai/models/get_model_alias_by_name_models_aliases_by_name_name_getop.py + - src/opperai/models/get_model_alias_models_aliases_alias_id_getop.py + - src/opperai/models/get_span_spans_span_id_getop.py + - src/opperai/models/get_trace_traces_trace_id_getop.py + - src/opperai/models/get_upload_url_knowledge_knowledge_base_id_upload_url_getop.py + - src/opperai/models/getcustommodelresponse.py + - src/opperai/models/getdatasetentriesresponse.py + - src/opperai/models/getdatasetentryresponse.py + - src/opperai/models/getfunctionresponse.py + - src/opperai/models/getknowledgebaseresponse.py + - src/opperai/models/getmodelaliasresponse.py + - src/opperai/models/getspanmetricresponse.py + - src/opperai/models/getspanresponse.py + - src/opperai/models/gettraceresponse.py + - src/opperai/models/getuploadurlresponse.py + - src/opperai/models/getusageresultitem.py + - src/opperai/models/granularity.py + - src/opperai/models/imageurl.py + - src/opperai/models/inputaudio.py + - src/opperai/models/jsonschema.py + - src/opperai/models/list_custom_models_models_custom_getop.py + - src/opperai/models/list_dataset_entries_datasets_dataset_id_entries_getop.py + - src/opperai/models/list_files_knowledge_knowledge_base_id_files_getop.py + - src/opperai/models/list_function_revisions_functions_function_id_revisions_getop.py + - src/opperai/models/list_functions_functions_getop.py + - src/opperai/models/list_knowledge_bases_knowledge_getop.py + - src/opperai/models/list_metrics_spans_span_id_metrics_getop.py + - src/opperai/models/list_model_aliases_models_aliases_getop.py + - src/opperai/models/list_models_models_getop.py + - src/opperai/models/list_traces_traces_getop.py + - src/opperai/models/listcustommodelsresponseitem.py + - src/opperai/models/listfilesresponse.py + - src/opperai/models/listfunctionrevisionresponse.py + - src/opperai/models/listfunctionsresponseitem.py + - src/opperai/models/listknowledgebasesresponse.py + - src/opperai/models/listlanguagemodelsresponse.py + - src/opperai/models/listmodelaliasesresponseitem.py + - src/opperai/models/listrerankmodelsresponse.py + - src/opperai/models/listspanmetricsresponse.py + - src/opperai/models/listtracesresponse.py + - src/opperai/models/meta.py + - src/opperai/models/model.py + - src/opperai/models/op.py + - src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py + - src/opperai/models/openai_types_chat_chat_completion_message_custom_tool_call_param_custom.py + - src/opperai/models/openai_types_chat_chat_completion_message_function_tool_call_param_function.py + - src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_custom_param_custom.py + - src/opperai/models/openai_types_chat_chat_completion_named_tool_choice_param_function.py + - src/opperai/models/openai_types_chat_completion_create_params_function.py + - src/opperai/models/paginatedresponse_getdatasetentriesresponse_.py + - src/opperai/models/paginatedresponse_listcustommodelsresponseitem_.py + - src/opperai/models/paginatedresponse_listfilesresponse_.py + - src/opperai/models/paginatedresponse_listfunctionrevisionresponse_.py + - src/opperai/models/paginatedresponse_listfunctionsresponseitem_.py + - src/opperai/models/paginatedresponse_listknowledgebasesresponse_.py + - src/opperai/models/paginatedresponse_listlanguagemodelsresponse_.py + - src/opperai/models/paginatedresponse_listmodelaliasesresponseitem_.py + - src/opperai/models/paginatedresponse_listrerankmodelsresponse_.py + - src/opperai/models/paginatedresponse_listspanmetricsresponse_.py + - src/opperai/models/paginatedresponse_listtracesresponse_.py + - src/opperai/models/prompttokensdetails.py + - src/opperai/models/query_dataset_entries_datasets_dataset_id_entries_query_postop.py + - src/opperai/models/query_knowledge_base_knowledge_knowledge_base_id_query_postop.py + - src/opperai/models/querydatasetentriesresponse.py + - src/opperai/models/queryknowledgebaserequest.py + - src/opperai/models/queryknowledgebaseresponse.py + - src/opperai/models/register_file_upload_knowledge_knowledge_base_id_register_file_postop.py + - src/opperai/models/registercustommodelrequest.py + - src/opperai/models/registercustommodelresponse.py + - src/opperai/models/registerfileuploadrequest.py + - src/opperai/models/registerfileuploadresponse.py + - src/opperai/models/rerankcost.py + - src/opperai/models/rerankdocument.py + - src/opperai/models/rerankrequestmodel.py + - src/opperai/models/rerankresponsemodel.py + - src/opperai/models/rerankresult.py + - src/opperai/models/responseformatjsonobject.py + - src/opperai/models/responseformatjsonschema.py + - src/opperai/models/responseformattext.py + - src/opperai/models/save_to_dataset_spans_span_id_save_examples_postop.py + - src/opperai/models/savetodatasetresponse.py + - src/opperai/models/security.py + - src/opperai/models/spandata.py + - src/opperai/models/spanmetricdata.py + - src/opperai/models/spanschema.py + - src/opperai/models/stream_function_functions_function_id_call_stream_postop.py + - src/opperai/models/stream_function_revision_functions_function_id_call_stream_revision_id_postop.py + - src/opperai/models/textprocessingconfiguration.py + - src/opperai/models/tmodel.py + - src/opperai/models/toplogprob.py + - src/opperai/models/update_custom_model_models_custom_model_id_patchop.py + - src/opperai/models/update_dataset_entry_datasets_dataset_id_entries_entry_id_patchop.py + - src/opperai/models/update_function_functions_function_id_patchop.py + - src/opperai/models/update_metric_spans_span_id_metrics_metric_id_patchop.py + - src/opperai/models/update_model_alias_models_aliases_alias_id_patchop.py + - src/opperai/models/update_span_spans_span_id_patchop.py + - src/opperai/models/updatecustommodelrequest.py + - src/opperai/models/updatecustommodelresponse.py + - src/opperai/models/updatedatasetentryrequest.py + - src/opperai/models/updatedatasetentryresponse.py + - src/opperai/models/updatefunctionrequest.py + - src/opperai/models/updatefunctionresponse.py + - src/opperai/models/updatemodelaliasrequest.py + - src/opperai/models/updatemodelaliasresponse.py + - src/opperai/models/updatespanmetricrequest.py + - src/opperai/models/updatespanmetricresponse.py + - src/opperai/models/updatespanrequest.py + - src/opperai/models/updatespanresponse.py + - src/opperai/models/usage_analytics_usage_getop.py + - src/opperai/models/websearchoptions.py + - src/opperai/models/websearchoptionsuserlocation.py + - src/opperai/models/websearchoptionsuserlocationapproximate.py + - src/opperai/openai.py + - src/opperai/py.typed + - src/opperai/rerank.py + - src/opperai/revisions.py + - src/opperai/sdk.py + - src/opperai/sdkconfiguration.py + - src/opperai/spanmetrics.py + - src/opperai/spans.py + - src/opperai/traces.py + - src/opperai/types/__init__.py + - src/opperai/types/basemodel.py + - src/opperai/utils/__init__.py + - src/opperai/utils/annotations.py + - src/opperai/utils/datetimes.py + - src/opperai/utils/enums.py + - src/opperai/utils/eventstreaming.py + - src/opperai/utils/forms.py + - src/opperai/utils/headers.py + - src/opperai/utils/logger.py + - src/opperai/utils/metadata.py + - src/opperai/utils/queryparams.py + - src/opperai/utils/requestbodies.py + - src/opperai/utils/retries.py + - src/opperai/utils/security.py + - src/opperai/utils/serializers.py + - src/opperai/utils/unmarshal_json_response.py + - src/opperai/utils/url.py + - src/opperai/utils/values.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 379404c..e4945d5 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -24,12 +24,13 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.6.5 + version: 1.7.1 additionalDependencies: dev: {} main: {} @@ -70,6 +71,7 @@ python: outputModelSuffix: output packageManager: poetry packageName: opperai + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/.speakeasy/speakeasy-modifications-overlay.yaml b/.speakeasy/speakeasy-modifications-overlay.yaml index b4fd881..15a2316 100644 --- a/.speakeasy/speakeasy-modifications-overlay.yaml +++ b/.speakeasy/speakeasy-modifications-overlay.yaml @@ -2,7 +2,7 @@ overlay: 1.0.0 x-speakeasy-jsonpath: rfc9535 info: title: Speakeasy Modifications - version: 0.0.32 + version: 0.0.34 x-speakeasy-metadata: after: "" before: "" @@ -720,8 +720,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases/{alias_id}"]["delete"] update: - x-speakeasy-name-override: delete_alias x-speakeasy-group: language_models + x-speakeasy-name-override: delete_alias x-speakeasy-metadata: after: sdk.models.delete_alias() before: sdk.models.delete_model_alias_models_aliases__alias_id__delete() @@ -730,8 +730,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases"]["get"] update: - x-speakeasy-name-override: list_aliases x-speakeasy-group: language_models + x-speakeasy-name-override: list_aliases x-speakeasy-metadata: after: sdk.models.list_aliases() before: sdk.models.list_model_aliases_models_aliases_get() @@ -740,8 +740,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases/{alias_id}"]["patch"] update: - x-speakeasy-name-override: update_alias x-speakeasy-group: language_models + x-speakeasy-name-override: update_alias x-speakeasy-metadata: after: sdk.models.update_alias() before: sdk.models.update_model_alias_models_aliases__alias_id__patch() @@ -750,8 +750,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases/by-name/{name}"]["get"] update: - x-speakeasy-name-override: get_alias_by_name x-speakeasy-group: language_models + x-speakeasy-name-override: get_alias_by_name x-speakeasy-metadata: after: sdk.models.get_alias_by_name() before: sdk.models.get_model_alias_by_name_models_aliases_by_name__name__get() @@ -760,8 +760,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases"]["post"] update: - x-speakeasy-name-override: create_alias x-speakeasy-group: language_models + x-speakeasy-name-override: create_alias x-speakeasy-metadata: after: sdk.models.create_alias() before: sdk.models.create_model_alias_models_aliases_post() @@ -788,8 +788,8 @@ actions: type: method-name - target: $["paths"]["/models/aliases/{alias_id}"]["get"] update: - x-speakeasy-name-override: get_alias x-speakeasy-group: language_models + x-speakeasy-name-override: get_alias x-speakeasy-metadata: after: sdk.models.get_alias() before: sdk.models.get_model_alias_models_aliases__alias_id__get() @@ -805,15 +805,6 @@ actions: created_at: 1762265966139 reviewed_at: 1762265978991 type: method-name - - target: $["paths"]["/knowledge/{knowledge_base_id}/query"]["delete"] - update: - x-speakeasy-name-override: deleteDocuments - x-speakeasy-metadata: - after: sdk.knowledge.deleteDocuments() - before: sdk.knowledge.deleteDocumentsKnowledgeKnowledgeBaseIdQueryDelete() - created_at: 1751029113444 - reviewed_at: 1751029118113 - type: method-name - target: $["paths"]["/knowledge/{knowledge_base_id}/files"]["get"] update: x-speakeasy-name-override: listFiles @@ -832,3 +823,20 @@ actions: created_at: 1732872000000 reviewed_at: 1732872000000 type: method-name + - target: $["paths"]["/knowledge/{knowledge_base_id}/upload"]["post"] + update: + x-speakeasy-name-override: upload_file + x-speakeasy-metadata: + after: sdk.knowledge.upload_file() + before: sdk.knowledge.upload_file_knowledge__knowledge_base_id__upload_post() + created_at: 1765275433136 + type: method-name + - target: $["paths"]["/spans/{span_id}/feedback"]["post"] + update: + x-speakeasy-name-override: submit_feedback + x-speakeasy-metadata: + after: sdk.spans.submit_feedback() + before: sdk.spans.submit_span_feedback_spans__span_id__feedback_post() + created_at: 1765275433136 + reviewed_at: 1765276561875 + type: method-name diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index b6a05ad..f286761 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,9 +1,9 @@ -speakeasyVersion: 1.664.0 +speakeasyVersion: 1.673.0 sources: FastAPI: sourceNamespace: fast-api - sourceRevisionDigest: sha256:13e03f4bb2d2e45398714ef484f5d060570653dbf652ca0bc2d3b03c051da7f1 - sourceBlobDigest: sha256:675e1b26aa256bad5e337faa07b1a3b907a34836d515f33542cee6b2b52ff195 + sourceRevisionDigest: sha256:d977bb721b29c73ca744581d4fde9fd8fef611af59b37749a1aeeb49d8a24a13 + sourceBlobDigest: sha256:e4866bcc37feac400aef98e48d26be97f467df4bb5a6c955e7741e5b4dfca423 tags: - latest - 2.0.0 @@ -11,10 +11,10 @@ targets: opper: source: FastAPI sourceNamespace: fast-api - sourceRevisionDigest: sha256:13e03f4bb2d2e45398714ef484f5d060570653dbf652ca0bc2d3b03c051da7f1 - sourceBlobDigest: sha256:675e1b26aa256bad5e337faa07b1a3b907a34836d515f33542cee6b2b52ff195 + sourceRevisionDigest: sha256:d977bb721b29c73ca744581d4fde9fd8fef611af59b37749a1aeeb49d8a24a13 + sourceBlobDigest: sha256:e4866bcc37feac400aef98e48d26be97f467df4bb5a6c955e7741e5b4dfca423 codeSamplesNamespace: fast-api-python-code-samples - codeSamplesRevisionDigest: sha256:0d7ea2f726f0c386f9ddb792d1f74d817de3821fc0c04618ffb9b20b7e86620d + codeSamplesRevisionDigest: sha256:eed9d0b848bcc293201ef1129a63d9f6e15052b4cc04435ed9fa831c66e4061a workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/Makefile b/Makefile index c63baf3..e26f474 100644 --- a/Makefile +++ b/Makefile @@ -84,10 +84,9 @@ clean: echo "❌ Error: This is not a git repository. Cannot clean safely."; \ exit 1; \ fi - git checkout HEAD -- src/ docs/ USAGE.md README.md README-PYPI.md test.py test_example_app.py || { \ - echo "❌ Error: Failed to reset files. Make sure you're in a git repository with committed changes."; \ - exit 1; \ - } + @git checkout HEAD -- src/ docs/ USAGE.md README.md 2>/dev/null || true + @git checkout HEAD -- README-PYPI.md 2>/dev/null || true + @git checkout HEAD -- test.py test_example_app.py 2>/dev/null || true @echo "✅ Cleaned successfully - files reset to last committed state" # Run tests diff --git a/README.md b/README.md index e28b219..d390a4d 100644 --- a/README.md +++ b/README.md @@ -1,1108 +1,117 @@ -# opperai +# Opper Python SDK +This is the Opper Python SDK. See below for getting started, and the [docs](https://docs.opper.ai) for more information. The SDK has builtin documentation and examples in function docstrings, which should be visible in your code editor as you are using the functions. - -
- - - - -
- - - -## Summary - - - - - -## Table of Contents - -* [opperai](#opperai) - * [SDK Installation](#sdk-installation) - * [IDE Support](#ide-support) - * [SDK Example Usage](#sdk-example-usage) - * [Authentication](#authentication) - * [Available Resources and Operations](#available-resources-and-operations) - * [Server-sent event streaming](#server-sent-event-streaming) - * [Retries](#retries) - * [Error Handling](#error-handling) - * [Custom HTTP Client](#custom-http-client) - * [Server Selection](#server-selection) - * [Custom HTTP Client](#custom-http-client-1) - * [Resource Management](#resource-management) - * [Debugging](#debugging) -* [Development](#development) - * [Maturity](#maturity) - * [Contributions](#contributions) - - - - -## SDK Installation - -> [!TIP] -> To finish publishing your SDK to PyPI you must [run your first generation action](https://www.speakeasy.com/docs/github-setup#step-by-step-guide). - - -> [!NOTE] -> **Python version upgrade policy** -> -> Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. - -The SDK can be installed with *uv*, *pip*, or *poetry* package managers. - -### uv - -*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. +## Install ```bash -uv add git+.git -``` - -### PIP - -*PIP* is the default package installer for Python, enabling easy installation and management of packages from PyPI via the command line. - -```bash -pip install git+.git -``` - -### Poetry - -*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. - -```bash -poetry add git+.git -``` - -### Shell and script usage with `uv` - -You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: - -```shell -uvx --from opperai python -``` - -It's also possible to write a standalone Python script without needing to set up a whole project like so: - -```python -#!/usr/bin/env -S uv run --script -# /// script -# requires-python = ">=3.9" -# dependencies = [ -# "opperai", -# ] -# /// - -from opperai import Opper - -sdk = Opper( - # SDK arguments -) - -# Rest of script here... +pip install opperai ``` -Once that is saved to a file, you can run it with `uv run script.py` where -`script.py` can be replaced with the actual file name. - - - -## IDE Support - -### PyCharm - -Generally, the SDK will work well with most IDEs out of the box. However, when using PyCharm, you can enjoy much better integration with Pydantic by installing an additional plugin. +## Configuration -- [PyCharm Pydantic Plugin](https://docs.pydantic.dev/latest/integrations/pycharm/) - +### Environment variable - -## SDK Example Usage +- `OPPER_API_KEY` environment variable is read by the SDK if no `api_key` is provided to the `Client` object. -### Example +## Using opper ```python -# Synchronous Example -from opperai import Opper, models -import os - - -with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - # Handle response - print(res) -``` - -
- -The same SDK client can also be used to make asynchronous requests by importing asyncio. - -```python -# Asynchronous Example -import asyncio -from opperai import Opper, models -import os - -async def main(): - - async with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), - ) as opper: - - res = await opper.call_async(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, +from typing import List + +from opperai import Index, Opper, trace +from opperai.types import DocumentIn +from opperai.types.indexes import DocumentIn, RetrievalResponse +from pydantic import BaseModel + + +class Answer(BaseModel): + steps: List[str] + + +class QuestionAndContext(BaseModel): + question: str + context: List[RetrievalResponse] + + +@trace +def answer_question(index: Index, question: str) -> Answer: + results = index.query(question, 1, None) + + result, response = opper.call( + name="answer_question", + instructions="Answer the question and provide the steps to do so", + input=QuestionAndContext(question=question, context=results), + output_type=Answer, + ) + response.span.save_metric("artificial_score", 5) + + return result + + +@trace +def translate(answer: Answer, language: str) -> str: + result, _ = opper.call( + name="translate", + instructions="Translate the answer to the given language", + input=answer, + output_type=Answer, + ) + return result + + +qna = [ + { + "question": "I cannot log in to my account", + "answer": "Use the reset password feature by clicking on 'Forgot password?' and then follow the instructions from email", + "id": "1", + }, + { + "question": "How can I see my invoices?", + "answer": "Go to the billing section and click on 'Invoices'", + "id": "2", + }, + { + "question": "How can I add a new user to my account?", + "answer": "Upgrade account and add the user to your account", + "id": "3", + }, +] + +opper = Opper() + + +def index_qna(qnas: list[dict]): + index = opper.indexes.create("qna") + + for qna in qnas: + index.add( + DocumentIn( + key=qna["id"], + content=f"question: {qna['question']}\nanswer: {qna['answer']}", + metadata={ + "id": qna["id"], }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - # Handle response - print(res) - -asyncio.run(main()) -``` - - - -## Authentication - -### Per-Client Security Schemes - -This SDK supports the following security scheme globally: - -| Name | Type | Scheme | Environment Variable | -| ------------- | ---- | ----------- | -------------------- | -| `http_bearer` | http | HTTP Bearer | `OPPER_HTTP_BEARER` | - -To authenticate with the API the `http_bearer` parameter must be set when initializing the SDK client instance. For example: -```python -from opperai import Opper, models -import os - - -with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - # Handle response - print(res) - -``` - - - -## Available Resources and Operations - -
-Available methods - -### [analytics](docs/sdks/analytics/README.md) - -* [get_usage](docs/sdks/analytics/README.md#get_usage) - Usage - -### [datasets](docs/sdks/datasets/README.md) - -* [create_entry](docs/sdks/datasets/README.md#create_entry) - Create Dataset Entry -* [list_entries](docs/sdks/datasets/README.md#list_entries) - List Dataset Entries -* [get_entry](docs/sdks/datasets/README.md#get_entry) - Get Dataset Entry -* [delete_entry](docs/sdks/datasets/README.md#delete_entry) - Delete Dataset Entry -* [query_entries](docs/sdks/datasets/README.md#query_entries) - Query Dataset Entries - -#### [datasets.entries](docs/sdks/entries/README.md) - -* [update](docs/sdks/entries/README.md#update) - Update Dataset Entry - -### [embeddings](docs/sdks/embeddings/README.md) - -* [create](docs/sdks/embeddings/README.md#create) - Create Embedding - -### [functions](docs/sdks/functions/README.md) - -* [create](docs/sdks/functions/README.md#create) - Create Function -* [list](docs/sdks/functions/README.md#list) - List Functions -* [get](docs/sdks/functions/README.md#get) - Get Function -* [update](docs/sdks/functions/README.md#update) - Update Function -* [delete](docs/sdks/functions/README.md#delete) - Delete Function -* [get_by_name](docs/sdks/functions/README.md#get_by_name) - Get Function By Name -* [get_by_revision](docs/sdks/functions/README.md#get_by_revision) - Get Function By Revision -* [call](docs/sdks/functions/README.md#call) - Call Function -* [stream](docs/sdks/functions/README.md#stream) - Stream Function -* [call_revision](docs/sdks/functions/README.md#call_revision) - Call Function Revision -* [stream_revision](docs/sdks/functions/README.md#stream_revision) - Stream Function Revision - -#### [functions.revisions](docs/sdks/revisions/README.md) - -* [list](docs/sdks/revisions/README.md#list) - List Function Revisions - -### [knowledge](docs/sdks/knowledge/README.md) - -* [create](docs/sdks/knowledge/README.md#create) - Create Knowledge Base -* [list](docs/sdks/knowledge/README.md#list) - List Knowledge Bases -* [get](docs/sdks/knowledge/README.md#get) - Get Knowledge Base -* [delete](docs/sdks/knowledge/README.md#delete) - Delete Knowledge Base -* [get_by_name](docs/sdks/knowledge/README.md#get_by_name) - Get Knowledge Base By Name -* [get_upload_url](docs/sdks/knowledge/README.md#get_upload_url) - Get Upload Url -* [register_file_upload](docs/sdks/knowledge/README.md#register_file_upload) - Register File Upload -* [delete_file](docs/sdks/knowledge/README.md#delete_file) - Delete File From Knowledge Base -* [query](docs/sdks/knowledge/README.md#query) - Query Knowledge Base -* [delete_documents](docs/sdks/knowledge/README.md#delete_documents) - Delete Documents -* [add](docs/sdks/knowledge/README.md#add) - Add - -### [language_models](docs/sdks/languagemodels/README.md) - -* [list](docs/sdks/languagemodels/README.md#list) - List Models -* [register_custom](docs/sdks/languagemodels/README.md#register_custom) - Register Custom Model -* [list_custom](docs/sdks/languagemodels/README.md#list_custom) - List Custom Models -* [get_custom](docs/sdks/languagemodels/README.md#get_custom) - Get Custom Model -* [update_custom](docs/sdks/languagemodels/README.md#update_custom) - Update Custom Model -* [delete_custom](docs/sdks/languagemodels/README.md#delete_custom) - Delete Custom Model -* [get_custom_by_name](docs/sdks/languagemodels/README.md#get_custom_by_name) - Get Custom Model By Name -* [create_alias](docs/sdks/languagemodels/README.md#create_alias) - Create Model Alias -* [list_aliases](docs/sdks/languagemodels/README.md#list_aliases) - List Model Aliases -* [get_alias](docs/sdks/languagemodels/README.md#get_alias) - Get Model Alias -* [update_alias](docs/sdks/languagemodels/README.md#update_alias) - Update Model Alias -* [delete_alias](docs/sdks/languagemodels/README.md#delete_alias) - Delete Model Alias -* [get_alias_by_name](docs/sdks/languagemodels/README.md#get_alias_by_name) - Get Model Alias By Name - -### [openai](docs/sdks/openai/README.md) - -* [create_chat_completion](docs/sdks/openai/README.md#create_chat_completion) - Chat Completions - -### [Opper SDK](docs/sdks/opper/README.md) - -* [call](docs/sdks/opper/README.md#call) - Function Call -* [stream](docs/sdks/opper/README.md#stream) - Function Stream - -### [rerank](docs/sdks/rerank/README.md) - -* [documents](docs/sdks/rerank/README.md#documents) - Rerank Documents -* [list_models](docs/sdks/rerank/README.md#list_models) - List Rerank Models - -### [span_metrics](docs/sdks/spanmetrics/README.md) - -* [create_metric](docs/sdks/spanmetrics/README.md#create_metric) - Create Metric -* [list](docs/sdks/spanmetrics/README.md#list) - List Metrics -* [get](docs/sdks/spanmetrics/README.md#get) - Get Metric -* [update_metric](docs/sdks/spanmetrics/README.md#update_metric) - Update Metric -* [delete](docs/sdks/spanmetrics/README.md#delete) - Delete Metric - -### [spans](docs/sdks/spans/README.md) - -* [create](docs/sdks/spans/README.md#create) - Create Span -* [get](docs/sdks/spans/README.md#get) - Get Span -* [update](docs/sdks/spans/README.md#update) - Update Span -* [delete](docs/sdks/spans/README.md#delete) - Delete Span -* [save_examples](docs/sdks/spans/README.md#save_examples) - Save To Dataset - -### [traces](docs/sdks/traces/README.md) - -* [list](docs/sdks/traces/README.md#list) - List Traces -* [get](docs/sdks/traces/README.md#get) - Get Trace - -
- - - -## Server-sent event streaming - -[Server-sent events][mdn-sse] are used to stream content from certain -operations. These operations will expose the stream as [Generator][generator] that -can be consumed using a simple `for` loop. The loop will -terminate when the server no longer has any events to send and closes the -underlying connection. - -The stream is also a [Context Manager][context-manager] and can be used with the `with` statement and will close the -underlying connection when the context is exited. - -```python -from opperai import Opper, models -import os - - -with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - - res = opper.stream(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - with res as event_stream: - for event in event_stream: - # handle event - print(event, flush=True) - -``` - -[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events -[generator]: https://book.pythontips.com/en/latest/generators.html -[context-manager]: https://book.pythontips.com/en/latest/context_managers.html - - - -## Retries - -Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. - -To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: -```python -from opperai import Opper, models -from opperai.utils import BackoffStrategy, RetryConfig -import os - - -with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - ), - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - - # Handle response - print(res) - -``` - -If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: -```python -from opperai import Opper, models -from opperai.utils import BackoffStrategy, RetryConfig -import os - - -with Opper( - retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - # Handle response - print(res) - -``` - - - -## Error Handling - -[`OpperError`](./src/opperai/errors/oppererror.py) is the base class for all HTTP error responses. It has the following properties: - -| Property | Type | Description | -| ------------------ | ---------------- | --------------------------------------------------------------------------------------- | -| `err.message` | `str` | Error message | -| `err.status_code` | `int` | HTTP response status code eg `404` | -| `err.headers` | `httpx.Headers` | HTTP response headers | -| `err.body` | `str` | HTTP body. Can be empty string if no body is returned. | -| `err.raw_response` | `httpx.Response` | Raw HTTP response | -| `err.data` | | Optional. Some errors may contain structured data. [See Error Classes](#error-classes). | - -### Example -```python -from opperai import Opper, errors, models -import os - - -with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: - res = None - try: - - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) - - # Handle response - print(res) - - - except errors.OpperError as e: - # The base class for HTTP error responses - print(e.message) - print(e.status_code) - print(e.body) - print(e.headers) - print(e.raw_response) - - # Depending on the method different errors may be thrown - if isinstance(e, errors.BadRequestError): - print(e.data.type) # Optional[str] - print(e.data.message) # Optional[str] - print(e.data.detail) # Any -``` - -### Error Classes -**Primary errors:** -* [`OpperError`](./src/opperai/errors/oppererror.py): The base class for HTTP error responses. - * [`BadRequestError`](./src/opperai/errors/badrequesterror.py): Bad Request. Status code `400`. - * [`UnauthorizedError`](./src/opperai/errors/unauthorizederror.py): Unauthorized. Status code `401`. - * [`NotFoundError`](./src/opperai/errors/notfounderror.py): Not Found. Status code `404`. - * [`RequestValidationError`](./src/opperai/errors/requestvalidationerror.py): Request Validation Error. Status code `422`. * - -
Less common errors (7) - -
- -**Network errors:** -* [`httpx.RequestError`](https://www.python-httpx.org/exceptions/#httpx.RequestError): Base class for request errors. - * [`httpx.ConnectError`](https://www.python-httpx.org/exceptions/#httpx.ConnectError): HTTP client was unable to make a request to a server. - * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. - - -**Inherit from [`OpperError`](./src/opperai/errors/oppererror.py)**: -* [`ConflictError`](./src/opperai/errors/conflicterror.py): Conflict. Status code `409`. Applicable to 4 of 61 methods.* -* [`Error`](./src/opperai/errors/error.py): Request validation error. Applicable to 1 of 61 methods.* -* [`ResponseValidationError`](./src/opperai/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. - -
- -\* Check [the method documentation](#available-resources-and-operations) to see if the error is applicable. - - -## Custom HTTP Client - -The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. -Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. -This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. - -For example, you could specify a header for every request that this sdk makes as follows: -```python -from opperai import Opper -import httpx - -http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = Opper(client=http_client) -``` - -or you could wrap the client with your own custom logic: -```python -from opperai import Opper -from opperai.httpclient import AsyncHttpClient -import httpx - -class CustomClient(AsyncHttpClient): - client: AsyncHttpClient - - def __init__(self, client: AsyncHttpClient): - self.client = client - - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - request.headers["Client-Level-Header"] = "added by client" - - return await self.client.send( - request, stream=stream, auth=auth, follow_redirects=follow_redirects - ) - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - return self.client.build_request( - method, - url, - content=content, - data=data, - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - extensions=extensions, + ) ) -s = Opper(async_client=CustomClient(httpx.AsyncClient())) -``` - + return index - -## Server Selection - -### Override Server URL Per-Client - -The default server can be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: -```python -from opperai import Opper, models -import os +def run(): + index = index_qna(qna) -with Opper( - server_url="https://api.opper.ai/v2", - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), -) as opper: + question = "How can I see my invoices?" - res = opper.call(name="add_numbers", instructions="Calculate the sum of two numbers", input_schema={ - "properties": { - "x": { - "title": "X", - "type": "integer", - }, - "y": { - "title": "Y", - "type": "integer", - }, - }, - "required": [ - "x", - "y", - ], - "title": "OpperInputExample", - "type": "object", - }, output_schema={ - "properties": { - "sum": { - "title": "Sum", - "type": "integer", - }, - }, - "required": [ - "sum", - ], - "title": "OpperOutputExample", - "type": "object", - }, input={ - "x": 4, - "y": 5, - }, examples=[ - { - "input": { - "x": 1, - "y": 3, - }, - "output": { - "sum": 4, - }, - "comment": "Adds two numbers", - }, - ], parent_span_id="123e4567-e89b-12d3-a456-426614174000", tags={ - "project": "project_456", - "user": "company_123", - }, configuration=models.FunctionCallConfiguration( - beta_evaluation=models.EvaluationConfig( - scorers=models.ScorersEnum1.BASE, - ), - )) + with opper.traces.start("answer_question") as trace: + answer = answer_question(index, question) + print(answer) - # Handle response - print(res) +run() ``` - - - -## Custom HTTP Client - -The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. -Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. -This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. - -For example, you could specify a header for every request that this sdk makes as follows: -```python -from opperai import Opper -import httpx - -http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = Opper(client=http_client) -``` - -or you could wrap the client with your own custom logic: -```python -from opperai import Opper -from opperai.httpclient import AsyncHttpClient -import httpx - -class CustomClient(AsyncHttpClient): - client: AsyncHttpClient - - def __init__(self, client: AsyncHttpClient): - self.client = client - - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - request.headers["Client-Level-Header"] = "added by client" - - return await self.client.send( - request, stream=stream, auth=auth, follow_redirects=follow_redirects - ) - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - return self.client.build_request( - method, - url, - content=content, - data=data, - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - timeout=timeout, - extensions=extensions, - ) - -s = Opper(async_client=CustomClient(httpx.AsyncClient())) -``` - - - -## Resource Management - -The `Opper` class implements the context manager protocol and registers a finalizer function to close the underlying sync and async HTTPX clients it uses under the hood. This will close HTTP connections, release memory and free up other resources held by the SDK. In short-lived Python programs and notebooks that make a few SDK method calls, resource management may not be a concern. However, in longer-lived programs, it is beneficial to create a single SDK instance via a [context manager][context-manager] and reuse it across the application. - -[context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers - -```python -from opperai import Opper -import os -def main(): - - with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), - ) as opper: - # Rest of application here... - - -# Or when using async: -async def amain(): - - async with Opper( - http_bearer=os.getenv("OPPER_HTTP_BEARER", ""), - ) as opper: - # Rest of application here... -``` - - - -## Debugging - -You can setup your SDK to emit debug logs for SDK requests and responses. - -You can pass your own logger class directly into your SDK. -```python -from opperai import Opper -import logging - -logging.basicConfig(level=logging.DEBUG) -s = Opper(debug_logger=logging.getLogger("opperai")) -``` - -You can also enable a default debug logger by setting an environment variable `OPPER_DEBUG` to true. - - - - -# Development - -## Maturity - -This SDK is in beta, and there may be breaking changes between versions without a major version update. Therefore, we recommend pinning usage -to a specific package version. This way, you can install the same version each time without breaking changes unless you are intentionally -looking for the latest version. - -## Contributions -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +# More examples -### SDK Created by [Speakeasy](https://www.speakeasy.com/?utm_source=opperai&utm_campaign=python) +See examples in our [documentation](https://docs.opper.ai) +and [examples](https://github.com/opper-ai/opper-python/tree/main/examples) folder. diff --git a/docs/models/appapipublicv2functioncallcallfunctionrequest.md b/docs/models/appapipublicv2functioncallcallfunctionrequest.md index f80e620..d57bfe8 100644 --- a/docs/models/appapipublicv2functioncallcallfunctionrequest.md +++ b/docs/models/appapipublicv2functioncallcallfunctionrequest.md @@ -14,4 +14,4 @@ | `examples` | List[[models.Example](../models/example.md)] | :heavy_minus_sign: | Optionally provide examples of successful task completions. Will be added to the prompt to help the model understand the task from examples. | [
{
"comment": "Adds two numbers",
"input": {
"x": 1,
"y": 3
},
"output": {
"sum": 4
}
}
] | | `parent_span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Optionally provide the parent span ID to add to the call event. This will automatically tie the call to a parent span in the UI. | 123e4567-e89b-12d3-a456-426614174000 | | `tags` | Dict[str, *str*] | :heavy_minus_sign: | Optionally provide a list of tags to add to the call event. Useful for being able to understand aggregate analytics on some dimension. | {
"project": "project_456",
"user": "company_123"
} | -| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md b/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md new file mode 100644 index 0000000..2a88e22 --- /dev/null +++ b/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md @@ -0,0 +1,11 @@ +# BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile](../models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md) | :heavy_check_mark: | The file to upload | | +| `chunk_size` | *Optional[int]* | :heavy_minus_sign: | The chunk size to use for the document (number of characters) | | +| `chunk_overlap` | *Optional[int]* | :heavy_minus_sign: | The chunk overlap to use for the document (number of characters) | | +| `metadata` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional JSON object metadata to attach to the file | {"category": "legal", "client": "acme"} | \ No newline at end of file diff --git a/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md b/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md new file mode 100644 index 0000000..25c60f4 --- /dev/null +++ b/docs/models/bodyuploadfileknowledgeknowledgebaseiduploadpostfile.md @@ -0,0 +1,10 @@ +# BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `file_name` | *str* | :heavy_check_mark: | N/A | +| `content` | *Union[bytes, IO[bytes], io.BufferedReader]* | :heavy_check_mark: | N/A | +| `content_type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/createfunctionrequest.md b/docs/models/createfunctionrequest.md index 1cb29bc..bfb0d83 100644 --- a/docs/models/createfunctionrequest.md +++ b/docs/models/createfunctionrequest.md @@ -11,4 +11,4 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationInput]](../models/functioncallconfigurationinput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/createfunctionresponse.md b/docs/models/createfunctionresponse.md index 04987f5..9fbe8dc 100644 --- a/docs/models/createfunctionresponse.md +++ b/docs/models/createfunctionresponse.md @@ -11,7 +11,7 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | | `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | diff --git a/docs/models/feedbackinfo.md b/docs/models/feedbackinfo.md new file mode 100644 index 0000000..0768d33 --- /dev/null +++ b/docs/models/feedbackinfo.md @@ -0,0 +1,11 @@ +# FeedbackInfo + +Human feedback information for a span. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `score` | *float* | :heavy_check_mark: | Feedback score (0.0-1.0) | +| `comment` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional feedback comment | \ No newline at end of file diff --git a/docs/models/functioncallconfigurationinput.md b/docs/models/functioncallconfigurationinput.md index bc49b65..9063f6a 100644 --- a/docs/models/functioncallconfigurationinput.md +++ b/docs/models/functioncallconfigurationinput.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `invocation_few_shot_count` | *Optional[int]* | :heavy_minus_sign: | The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input. | -| `beta_evaluation` | *OptionalNullable[Any]* | :heavy_minus_sign: | [DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled. | -| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | -| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | -| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | -| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ~~`invocation_few_shot_count`~~ | *Optional[int]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset. | +| `beta_evaluation` | *OptionalNullable[Any]* | :heavy_minus_sign: | [DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled. | +| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | +| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | +| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | +| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file diff --git a/docs/models/functioncallconfigurationoutput.md b/docs/models/functioncallconfigurationoutput.md index 5fa418d..dac7c47 100644 --- a/docs/models/functioncallconfigurationoutput.md +++ b/docs/models/functioncallconfigurationoutput.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `invocation_few_shot_count` | *Optional[int]* | :heavy_minus_sign: | The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input. | -| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | -| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | -| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | -| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ~~`invocation_few_shot_count`~~ | *Optional[int]* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset. | +| `invocation_structured_generation_max_attempts` | *Optional[int]* | :heavy_minus_sign: | The maximum number of attempts to make when generating a response matching the output schema if provided. | +| `invocation_cache_ttl` | *Optional[int]* | :heavy_minus_sign: | The time to live for the cache in seconds. If 0, the cache is disabled. | +| `beta_invocation_input_validation_enabled` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable input validation against the input schema. This is a beta feature and is disabled by default. | +| `beta_invocation_xml_mode_enabled` | *Optional[bool]* | :heavy_minus_sign: | Experimental: enable XML structured output. The model receives an XML schema and its response is converted back to JSON. We have observed better adherence to multi-paragraph text fields (especially with Anthropic models) when this is enabled. | \ No newline at end of file diff --git a/docs/models/getfunctionresponse.md b/docs/models/getfunctionresponse.md index 7ea1732..2139f14 100644 --- a/docs/models/getfunctionresponse.md +++ b/docs/models/getfunctionresponse.md @@ -11,7 +11,7 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | | `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | diff --git a/docs/models/getspanresponse.md b/docs/models/getspanresponse.md index ba2432f..f3117c5 100644 --- a/docs/models/getspanresponse.md +++ b/docs/models/getspanresponse.md @@ -16,4 +16,5 @@ | `output` | *OptionalNullable[Any]* | :heavy_minus_sign: | The output of the span | Hello, world! | | `error` | *OptionalNullable[str]* | :heavy_minus_sign: | In case of an error, the error message | Exception: This is an error message | | `meta` | Dict[str, *Any*] | :heavy_minus_sign: | The metadata of the span | {
"key": "value"
} | -| `score` | *OptionalNullable[int]* | :heavy_minus_sign: | The score of the span | 10 | \ No newline at end of file +| `score` | *OptionalNullable[int]* | :heavy_minus_sign: | The score of the span | 10 | +| `feedback` | [OptionalNullable[models.FeedbackInfo]](../models/feedbackinfo.md) | :heavy_minus_sign: | Human feedback if submitted | | \ No newline at end of file diff --git a/docs/models/streamingchunk.md b/docs/models/streamingchunk.md index f9a2345..dc272ae 100644 --- a/docs/models/streamingchunk.md +++ b/docs/models/streamingchunk.md @@ -27,4 +27,6 @@ The fields present depend on the streaming mode: | `delta` | [OptionalNullable[models.Delta]](../models/delta.md) | :heavy_minus_sign: | Incremental content for streaming. Used for both unstructured text streaming (when no output_schema) and structured streaming (when output_schema is provided). For structured streaming, contains actual field values being streamed to the json_path location. Supports all JSON types: strings, numbers, booleans. | Hello | | `json_path` | *OptionalNullable[str]* | :heavy_minus_sign: | Dot-notation path showing exactly which field in your output_schema is being populated. Enables precise UI updates by routing content to specific components. Format: field[index].nested_field | summary | | `span_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Unique identifier for the execution span, included in the first streaming chunk for tracing | 123e4567-e89b-12d3-a456-426614174000 | -| `chunk_type` | *OptionalNullable[str]* | :heavy_minus_sign: | Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema. Only present when delta content is included. | text | \ No newline at end of file +| `chunk_type` | *OptionalNullable[str]* | :heavy_minus_sign: | Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema, 'error' for error events. Only present when delta content is included. | text | +| `error_type` | *OptionalNullable[str]* | :heavy_minus_sign: | Error type when chunk_type is 'error' | RateLimitError | +| `error_message` | *OptionalNullable[str]* | :heavy_minus_sign: | Error message when chunk_type is 'error' | | \ No newline at end of file diff --git a/docs/models/submitfeedbackrequest.md b/docs/models/submitfeedbackrequest.md new file mode 100644 index 0000000..61824cd --- /dev/null +++ b/docs/models/submitfeedbackrequest.md @@ -0,0 +1,10 @@ +# SubmitFeedbackRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `score` | *float* | :heavy_check_mark: | Feedback score (0.0=negative, 1.0=positive) | 1 | +| `comment` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional comment explaining the feedback | Great output, exactly what I needed | +| `save_to_dataset` | *OptionalNullable[bool]* | :heavy_minus_sign: | Force save to dataset (True=force save, False=never save, None=use auto-save config) | | \ No newline at end of file diff --git a/docs/models/submitfeedbackresponse.md b/docs/models/submitfeedbackresponse.md new file mode 100644 index 0000000..98f7d4a --- /dev/null +++ b/docs/models/submitfeedbackresponse.md @@ -0,0 +1,10 @@ +# SubmitFeedbackResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `span_id` | *str* | :heavy_check_mark: | The ID of the span | +| `score` | *float* | :heavy_check_mark: | The feedback score that was submitted | +| `example_saved` | *bool* | :heavy_check_mark: | Whether the example was saved to the dataset | \ No newline at end of file diff --git a/docs/models/submitspanfeedbackspansspanidfeedbackpostrequest.md b/docs/models/submitspanfeedbackspansspanidfeedbackpostrequest.md new file mode 100644 index 0000000..619b627 --- /dev/null +++ b/docs/models/submitspanfeedbackspansspanidfeedbackpostrequest.md @@ -0,0 +1,9 @@ +# SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `span_id` | *str* | :heavy_check_mark: | The ID of the span to provide feedback on | +| `submit_feedback_request` | [models.SubmitFeedbackRequest](../models/submitfeedbackrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatefunctionrequest.md b/docs/models/updatefunctionrequest.md index c2efb38..b3ef3dc 100644 --- a/docs/models/updatefunctionrequest.md +++ b/docs/models/updatefunctionrequest.md @@ -11,4 +11,4 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [OptionalNullable[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | Optionally provide a model to use for completing the task.
If not provided, a default model will be used. Currently the default model is `azure/gpt-4o-eu`

To specify options for the model, use a dictionary of key-value pairs. The options are passed to the model on invocation.
An example of passing temperature to `gpt-4o-mini` hosted on OpenAI is shown below.

```json
{
"model": "openai/gpt-4o-mini", # the model name
"options": {
"temperature": 0.5 # the options for the model
}
}
```

To specify a fallback model, use a list of models. The models will then be tried in order.
The second model will be used if the first model is not available, and so on.

```json
[
"openai/gpt-4o-mini", # first model to try
"openai/gpt-4.1-nano", # second model to try
]
```
| | -| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file +| `configuration` | Dict[str, *Any*] | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | \ No newline at end of file diff --git a/docs/models/updatefunctionresponse.md b/docs/models/updatefunctionresponse.md index 5b93412..37521e4 100644 --- a/docs/models/updatefunctionresponse.md +++ b/docs/models/updatefunctionresponse.md @@ -11,7 +11,7 @@ | `input_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional input schema for the function. Can preferably include field descriptions to allow the model to reason about the input variables. Schema is validated against the input data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"x": {
"title": "X",
"type": "integer"
},
"y": {
"title": "Y",
"type": "integer"
}
},
"required": [
"x",
"y"
],
"title": "OpperInputExample",
"type": "object"
} | | `output_schema` | Dict[str, *Any*] | :heavy_minus_sign: | Optional output schema for the function. Can preferably include field descriptions to allow the model to reason about the output variables. Schema is validated against the output data and issues an error if it does not match. With the Opper SDKs you can define these schemas through libraries like Pydantic and Zod. For schemas with definitions, prefer using '$defs' and '#/$defs/...' references. | {
"properties": {
"sum": {
"title": "Sum",
"type": "integer"
}
},
"required": [
"sum"
],
"title": "OpperOutputExample",
"type": "object"
} | | `model` | [Optional[models.TModel]](../models/tmodel.md) | :heavy_minus_sign: | N/A | | -| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 0,
"invocation.structured_generation.max_attempts": 5
} | +| `configuration` | [OptionalNullable[models.FunctionCallConfigurationOutput]](../models/functioncallconfigurationoutput.md) | :heavy_minus_sign: | Optional configuration for the function.Configuration is a dictionary of key-value pairs that can be used to override the default configuration for the function. | {
"beta.invocation.input_validation.enabled": false,
"beta.invocation.xml_mode.enabled": false,
"invocation.cache.ttl": 0,
"invocation.few_shot.count": 3,
"invocation.structured_generation.max_attempts": 5
} | | `id` | *str* | :heavy_check_mark: | The ID of the function | | | `dataset_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the dataset associated with the function | | | `revision_id` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the latest revision of the function | | diff --git a/docs/models/updatespanrequest.md b/docs/models/updatespanrequest.md index d3dea27..259d8fa 100644 --- a/docs/models/updatespanrequest.md +++ b/docs/models/updatespanrequest.md @@ -6,9 +6,9 @@ | Field | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the span, something descriptive about the span that will be used to identify it when querying | my span | -| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-11-28T13:52:31.359105Z | +| `start_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The start time of the span in UTC | 2025-12-09T10:00:56.939134Z | | `type` | *OptionalNullable[str]* | :heavy_minus_sign: | The type of the span | email_tool | -| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-11-28T13:52:31.359201Z | +| `end_time` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The end time of the span in UTC | 2025-12-09T10:00:56.939193Z | | `input` | *OptionalNullable[str]* | :heavy_minus_sign: | The input of the span | Hello, world! | | `output` | *OptionalNullable[str]* | :heavy_minus_sign: | The output of the span | Hello, world! | | `error` | *OptionalNullable[str]* | :heavy_minus_sign: | In case of an error, the error message | Exception: This is an error message | diff --git a/docs/models/uploadfileknowledgeknowledgebaseiduploadpostrequest.md b/docs/models/uploadfileknowledgeknowledgebaseiduploadpostrequest.md new file mode 100644 index 0000000..d426be2 --- /dev/null +++ b/docs/models/uploadfileknowledgeknowledgebaseiduploadpostrequest.md @@ -0,0 +1,9 @@ +# UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `knowledge_base_id` | *str* | :heavy_check_mark: | The id of the knowledge base to upload the file to | +| `body_upload_file_knowledge_knowledge_base_id_upload_post` | [models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost](../models/bodyuploadfileknowledgeknowledgebaseiduploadpost.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileresponse.md b/docs/models/uploadfileresponse.md new file mode 100644 index 0000000..cdff9d1 --- /dev/null +++ b/docs/models/uploadfileresponse.md @@ -0,0 +1,12 @@ +# UploadFileResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------- | ------------------- | ------------------- | ------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `key` | *str* | :heavy_check_mark: | N/A | +| `original_filename` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *int* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 1648503..448493d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "opperai" -version = "1.6.5" +version = "1.7.1" description = "Python Client SDK Generated by Speakeasy." authors = [{ name = "Speakeasy" },] readme = "README.md" diff --git a/src/opperai/_version.py b/src/opperai/_version.py index 8ada501..bc661c8 100644 --- a/src/opperai/_version.py +++ b/src/opperai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "opperai" -__version__: str = "1.6.5" +__version__: str = "1.7.1" __openapi_doc_version__: str = "2.0.0" -__gen_version__: str = "2.766.1" -__user_agent__: str = "speakeasy-sdk/python 1.6.5 2.766.1 2.0.0 opperai" +__gen_version__: str = "2.776.1" +__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.776.1 2.0.0 opperai" try: if __package__ is not None: diff --git a/src/opperai/knowledge.py b/src/opperai/knowledge.py index 38dfb74..0eda2fe 100644 --- a/src/opperai/knowledge.py +++ b/src/opperai/knowledge.py @@ -1572,6 +1572,276 @@ async def register_file_upload_async( raise errors.APIError("Unexpected response received", http_res) + def upload_file( + self, + *, + knowledge_base_id: str, + file: Union[ + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile, + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict, + ], + chunk_size: Optional[int] = 2000, + chunk_overlap: Optional[int] = 200, + metadata: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileResponse: + r"""Upload File + + Upload a file directly to a knowledge base. + + This is a simplified alternative to the three-step upload process + (get_upload_url -> upload to S3 -> register_file). Use this endpoint + for smaller files or when you prefer a simpler API. + + The file will be uploaded to S3 and queued for processing automatically. + + :param knowledge_base_id: The id of the knowledge base to upload the file to + :param file: The file to upload + :param chunk_size: The chunk size to use for the document (number of characters) + :param chunk_overlap: The chunk overlap to use for the document (number of characters) + :param metadata: Optional JSON object metadata to attach to the file + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest( + knowledge_base_id=knowledge_base_id, + body_upload_file_knowledge_knowledge_base_id_upload_post=models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost( + file=utils.get_pydantic_model( + file, models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile + ), + chunk_size=chunk_size, + chunk_overlap=chunk_overlap, + metadata=metadata, + ), + ) + + req = self._build_request( + method="POST", + path="/knowledge/{knowledge_base_id}/upload", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.body_upload_file_knowledge_knowledge_base_id_upload_post, + False, + False, + "multipart", + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="upload_file_knowledge__knowledge_base_id__upload_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.UploadFileResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + + async def upload_file_async( + self, + *, + knowledge_base_id: str, + file: Union[ + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile, + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict, + ], + chunk_size: Optional[int] = 2000, + chunk_overlap: Optional[int] = 200, + metadata: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileResponse: + r"""Upload File + + Upload a file directly to a knowledge base. + + This is a simplified alternative to the three-step upload process + (get_upload_url -> upload to S3 -> register_file). Use this endpoint + for smaller files or when you prefer a simpler API. + + The file will be uploaded to S3 and queued for processing automatically. + + :param knowledge_base_id: The id of the knowledge base to upload the file to + :param file: The file to upload + :param chunk_size: The chunk size to use for the document (number of characters) + :param chunk_overlap: The chunk overlap to use for the document (number of characters) + :param metadata: Optional JSON object metadata to attach to the file + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest( + knowledge_base_id=knowledge_base_id, + body_upload_file_knowledge_knowledge_base_id_upload_post=models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost( + file=utils.get_pydantic_model( + file, models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile + ), + chunk_size=chunk_size, + chunk_overlap=chunk_overlap, + metadata=metadata, + ), + ) + + req = self._build_request_async( + method="POST", + path="/knowledge/{knowledge_base_id}/upload", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.body_upload_file_knowledge_knowledge_base_id_upload_post, + False, + False, + "multipart", + models.BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="upload_file_knowledge__knowledge_base_id__upload_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.UploadFileResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + def delete_file( self, *, diff --git a/src/opperai/models/__init__.py b/src/opperai/models/__init__.py index 8438010..a190a30 100644 --- a/src/opperai/models/__init__.py +++ b/src/opperai/models/__init__.py @@ -35,6 +35,12 @@ AppAPIPublicV2FunctionsCallFunctionResponseTypedDict, ) from .audio import Audio, AudioTypedDict + from .body_upload_file_knowledge_knowledge_base_id_upload_post import ( + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost, + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile, + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict, + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict, + ) from .call_function_functions_function_id_call_postop import ( CallFunctionFunctionsFunctionIDCallPostRequest, CallFunctionFunctionsFunctionIDCallPostRequestTypedDict, @@ -347,6 +353,7 @@ ) from .example import Example, ExampleTypedDict from .examplein import ExampleIn, ExampleInTypedDict + from .feedbackinfo import FeedbackInfo, FeedbackInfoTypedDict from .file import File, FileTypedDict from .filedownloadurlresponse import ( FileDownloadURLResponse, @@ -713,6 +720,18 @@ StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseBodyTypedDict, StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseTypedDict, ) + from .submit_span_feedback_spans_span_id_feedback_postop import ( + SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest, + SubmitSpanFeedbackSpansSpanIDFeedbackPostRequestTypedDict, + ) + from .submitfeedbackrequest import ( + SubmitFeedbackRequest, + SubmitFeedbackRequestTypedDict, + ) + from .submitfeedbackresponse import ( + SubmitFeedbackResponse, + SubmitFeedbackResponseTypedDict, + ) from .textprocessingconfiguration import ( TextProcessingConfiguration, TextProcessingConfigurationTypedDict, @@ -785,6 +804,11 @@ ) from .updatespanrequest import UpdateSpanRequest, UpdateSpanRequestTypedDict from .updatespanresponse import UpdateSpanResponse, UpdateSpanResponseTypedDict + from .upload_file_knowledge_knowledge_base_id_upload_postop import ( + UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest, + UploadFileKnowledgeKnowledgeBaseIDUploadPostRequestTypedDict, + ) + from .uploadfileresponse import UploadFileResponse, UploadFileResponseTypedDict from .usage_analytics_usage_getop import ( UsageAnalyticsUsageGetRequest, UsageAnalyticsUsageGetRequestTypedDict, @@ -822,6 +846,10 @@ "AppAPIPublicV2FunctionsCallFunctionResponseTypedDict", "Audio", "AudioTypedDict", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict", "CallFunctionFunctionsFunctionIDCallPostRequest", "CallFunctionFunctionsFunctionIDCallPostRequestTypedDict", "CallFunctionRevisionFunctionsFunctionIDCallRevisionIDPostRequest", @@ -1021,6 +1049,8 @@ "ExampleIn", "ExampleInTypedDict", "ExampleTypedDict", + "FeedbackInfo", + "FeedbackInfoTypedDict", "File", "FileDownloadURLResponse", "FileDownloadURLResponseTypedDict", @@ -1263,6 +1293,12 @@ "StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseTypedDict", "StreamingChunk", "StreamingChunkTypedDict", + "SubmitFeedbackRequest", + "SubmitFeedbackRequestTypedDict", + "SubmitFeedbackResponse", + "SubmitFeedbackResponseTypedDict", + "SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest", + "SubmitSpanFeedbackSpansSpanIDFeedbackPostRequestTypedDict", "Syntax", "TModel", "TModel1", @@ -1308,6 +1344,10 @@ "UpdateSpanResponseTypedDict", "UpdateSpanSpansSpanIDPatchRequest", "UpdateSpanSpansSpanIDPatchRequestTypedDict", + "UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest", + "UploadFileKnowledgeKnowledgeBaseIDUploadPostRequestTypedDict", + "UploadFileResponse", + "UploadFileResponseTypedDict", "UsageAnalyticsUsageGetRequest", "UsageAnalyticsUsageGetRequestTypedDict", "Value1", @@ -1346,6 +1386,10 @@ "AppAPIPublicV2FunctionsCallFunctionResponseTypedDict": ".app_api_public_v2_functions_callfunctionresponse", "Audio": ".audio", "AudioTypedDict": ".audio", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost": ".body_upload_file_knowledge_knowledge_base_id_upload_post", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile": ".body_upload_file_knowledge_knowledge_base_id_upload_post", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict": ".body_upload_file_knowledge_knowledge_base_id_upload_post", + "BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict": ".body_upload_file_knowledge_knowledge_base_id_upload_post", "CallFunctionFunctionsFunctionIDCallPostRequest": ".call_function_functions_function_id_call_postop", "CallFunctionFunctionsFunctionIDCallPostRequestTypedDict": ".call_function_functions_function_id_call_postop", "CallFunctionRevisionFunctionsFunctionIDCallRevisionIDPostRequest": ".call_function_revision_functions_function_id_call_revision_id_postop", @@ -1552,6 +1596,8 @@ "ExampleTypedDict": ".example", "ExampleIn": ".examplein", "ExampleInTypedDict": ".examplein", + "FeedbackInfo": ".feedbackinfo", + "FeedbackInfoTypedDict": ".feedbackinfo", "File": ".file", "FileTypedDict": ".file", "FileDownloadURLResponse": ".filedownloadurlresponse", @@ -1792,6 +1838,12 @@ "StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseBody": ".stream_function_revision_functions_function_id_call_stream_revision_id_postop", "StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseBodyTypedDict": ".stream_function_revision_functions_function_id_call_stream_revision_id_postop", "StreamFunctionRevisionFunctionsFunctionIDCallStreamRevisionIDPostResponseTypedDict": ".stream_function_revision_functions_function_id_call_stream_revision_id_postop", + "SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest": ".submit_span_feedback_spans_span_id_feedback_postop", + "SubmitSpanFeedbackSpansSpanIDFeedbackPostRequestTypedDict": ".submit_span_feedback_spans_span_id_feedback_postop", + "SubmitFeedbackRequest": ".submitfeedbackrequest", + "SubmitFeedbackRequestTypedDict": ".submitfeedbackrequest", + "SubmitFeedbackResponse": ".submitfeedbackresponse", + "SubmitFeedbackResponseTypedDict": ".submitfeedbackresponse", "TextProcessingConfiguration": ".textprocessingconfiguration", "TextProcessingConfigurationTypedDict": ".textprocessingconfiguration", "TModel": ".tmodel", @@ -1836,6 +1888,10 @@ "UpdateSpanRequestTypedDict": ".updatespanrequest", "UpdateSpanResponse": ".updatespanresponse", "UpdateSpanResponseTypedDict": ".updatespanresponse", + "UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest": ".upload_file_knowledge_knowledge_base_id_upload_postop", + "UploadFileKnowledgeKnowledgeBaseIDUploadPostRequestTypedDict": ".upload_file_knowledge_knowledge_base_id_upload_postop", + "UploadFileResponse": ".uploadfileresponse", + "UploadFileResponseTypedDict": ".uploadfileresponse", "UsageAnalyticsUsageGetRequest": ".usage_analytics_usage_getop", "UsageAnalyticsUsageGetRequestTypedDict": ".usage_analytics_usage_getop", "SearchContextSize": ".websearchoptions", diff --git a/src/opperai/models/body_upload_file_knowledge_knowledge_base_id_upload_post.py b/src/opperai/models/body_upload_file_knowledge_knowledge_base_id_upload_post.py new file mode 100644 index 0000000..b045539 --- /dev/null +++ b/src/opperai/models/body_upload_file_knowledge_knowledge_base_id_upload_post.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from opperai.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from pydantic import model_serializer +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None + + +class BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict(TypedDict): + file: BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFileTypedDict + r"""The file to upload""" + chunk_size: NotRequired[int] + r"""The chunk size to use for the document (number of characters)""" + chunk_overlap: NotRequired[int] + r"""The chunk overlap to use for the document (number of characters)""" + metadata: NotRequired[Nullable[str]] + r"""Optional JSON object metadata to attach to the file""" + + +class BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost(BaseModel): + file: Annotated[ + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostFile, + FieldMetadata(multipart=MultipartFormMetadata(file=True)), + ] + r"""The file to upload""" + + chunk_size: Annotated[Optional[int], FieldMetadata(multipart=True)] = 2000 + r"""The chunk size to use for the document (number of characters)""" + + chunk_overlap: Annotated[Optional[int], FieldMetadata(multipart=True)] = 200 + r"""The chunk overlap to use for the document (number of characters)""" + + metadata: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Optional JSON object metadata to attach to the file""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["chunk_size", "chunk_overlap", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/opperai/models/chatcompletionassistantmessageparam.py b/src/opperai/models/chatcompletionassistantmessageparam.py index 1241759..feb0681 100644 --- a/src/opperai/models/chatcompletionassistantmessageparam.py +++ b/src/opperai/models/chatcompletionassistantmessageparam.py @@ -22,7 +22,7 @@ from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from opperai.utils import get_discriminator, validate_const import pydantic -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Discriminator, Field, Tag, model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -69,10 +69,10 @@ ChatCompletionAssistantMessageParamToolCall = Annotated[ Union[ - Annotated[ChatCompletionMessageFunctionToolCallParam, Tag("function")], - Annotated[ChatCompletionMessageCustomToolCallParam, Tag("custom")], + ChatCompletionMessageFunctionToolCallParam, + ChatCompletionMessageCustomToolCallParam, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] diff --git a/src/opperai/models/chatcompletionmessage.py b/src/opperai/models/chatcompletionmessage.py index 5c68540..8359041 100644 --- a/src/opperai/models/chatcompletionmessage.py +++ b/src/opperai/models/chatcompletionmessage.py @@ -13,9 +13,9 @@ ) from .functioncall_output import FunctionCallOutput, FunctionCallOutputTypedDict from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import get_discriminator, validate_const +from opperai.utils import validate_const import pydantic -from pydantic import ConfigDict, Discriminator, Tag, model_serializer +from pydantic import ConfigDict, Field, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -31,11 +31,8 @@ ChatCompletionMessageToolCall = Annotated[ - Union[ - Annotated[ChatCompletionMessageFunctionToolCall, Tag("function")], - Annotated[ChatCompletionMessageCustomToolCall, Tag("custom")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], + Field(discriminator="TYPE"), ] diff --git a/src/opperai/models/chatcompletionnonstreaming.py b/src/opperai/models/chatcompletionnonstreaming.py index 0cdd9c1..1bd293a 100644 --- a/src/opperai/models/chatcompletionnonstreaming.py +++ b/src/opperai/models/chatcompletionnonstreaming.py @@ -78,9 +78,9 @@ from .websearchoptions import WebSearchOptions, WebSearchOptionsTypedDict from enum import Enum from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import get_discriminator, validate_const +from opperai.utils import validate_const import pydantic -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -101,14 +101,14 @@ ChatCompletionNonStreamingMessage = Annotated[ Union[ - Annotated[ChatCompletionDeveloperMessageParam, Tag("developer")], - Annotated[ChatCompletionSystemMessageParam, Tag("system")], - Annotated[ChatCompletionUserMessageParam, Tag("user")], - Annotated[ChatCompletionAssistantMessageParam, Tag("assistant")], - Annotated[ChatCompletionToolMessageParam, Tag("tool")], - Annotated[ChatCompletionFunctionMessageParam, Tag("function")], + ChatCompletionDeveloperMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionUserMessageParam, + ChatCompletionAssistantMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionFunctionMessageParam, ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), + Field(discriminator="ROLE"), ] @@ -158,12 +158,8 @@ class ChatCompletionNonStreamingReasoningEffort(str, Enum): ChatCompletionNonStreamingResponseFormat = Annotated[ - Union[ - Annotated[ResponseFormatText, Tag("text")], - Annotated[ResponseFormatJSONSchema, Tag("json_schema")], - Annotated[ResponseFormatJSONObject, Tag("json_object")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject], + Field(discriminator="TYPE"), ] @@ -222,11 +218,8 @@ class ChatCompletionNonStreamingToolChoiceEnum(str, Enum): ChatCompletionNonStreamingTool = Annotated[ - Union[ - Annotated[ChatCompletionFunctionToolParam, Tag("function")], - Annotated[ChatCompletionCustomToolParam, Tag("custom")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam], + Field(discriminator="TYPE"), ] diff --git a/src/opperai/models/chatcompletionstreaming.py b/src/opperai/models/chatcompletionstreaming.py index 12deecf..5ad5d8d 100644 --- a/src/opperai/models/chatcompletionstreaming.py +++ b/src/opperai/models/chatcompletionstreaming.py @@ -78,9 +78,9 @@ from .websearchoptions import WebSearchOptions, WebSearchOptionsTypedDict from enum import Enum from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from opperai.utils import get_discriminator, validate_const +from opperai.utils import validate_const import pydantic -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -101,14 +101,14 @@ ChatCompletionStreamingMessage = Annotated[ Union[ - Annotated[ChatCompletionDeveloperMessageParam, Tag("developer")], - Annotated[ChatCompletionSystemMessageParam, Tag("system")], - Annotated[ChatCompletionUserMessageParam, Tag("user")], - Annotated[ChatCompletionAssistantMessageParam, Tag("assistant")], - Annotated[ChatCompletionToolMessageParam, Tag("tool")], - Annotated[ChatCompletionFunctionMessageParam, Tag("function")], + ChatCompletionDeveloperMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionUserMessageParam, + ChatCompletionAssistantMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionFunctionMessageParam, ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), + Field(discriminator="ROLE"), ] @@ -157,12 +157,8 @@ class ChatCompletionStreamingReasoningEffort(str, Enum): ChatCompletionStreamingResponseFormat = Annotated[ - Union[ - Annotated[ResponseFormatText, Tag("text")], - Annotated[ResponseFormatJSONSchema, Tag("json_schema")], - Annotated[ResponseFormatJSONObject, Tag("json_object")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject], + Field(discriminator="TYPE"), ] @@ -221,11 +217,8 @@ class ChatCompletionStreamingToolChoiceEnum(str, Enum): ChatCompletionStreamingTool = Annotated[ - Union[ - Annotated[ChatCompletionFunctionToolParam, Tag("function")], - Annotated[ChatCompletionCustomToolParam, Tag("custom")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam], + Field(discriminator="TYPE"), ] diff --git a/src/opperai/models/feedbackinfo.py b/src/opperai/models/feedbackinfo.py new file mode 100644 index 0000000..3ab88dd --- /dev/null +++ b/src/opperai/models/feedbackinfo.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class FeedbackInfoTypedDict(TypedDict): + r"""Human feedback information for a span.""" + + score: float + r"""Feedback score (0.0-1.0)""" + comment: NotRequired[Nullable[str]] + r"""Optional feedback comment""" + + +class FeedbackInfo(BaseModel): + r"""Human feedback information for a span.""" + + score: float + r"""Feedback score (0.0-1.0)""" + + comment: OptionalNullable[str] = UNSET + r"""Optional feedback comment""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["comment"] + nullable_fields = ["comment"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/opperai/models/function_stream_call_stream_postop.py b/src/opperai/models/function_stream_call_stream_postop.py index c367941..ebb96a3 100644 --- a/src/opperai/models/function_stream_call_stream_postop.py +++ b/src/opperai/models/function_stream_call_stream_postop.py @@ -44,7 +44,11 @@ class StreamingChunkTypedDict(TypedDict): span_id: NotRequired[Nullable[str]] r"""Unique identifier for the execution span, included in the first streaming chunk for tracing""" chunk_type: NotRequired[Nullable[str]] - r"""Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema. Only present when delta content is included.""" + r"""Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema, 'error' for error events. Only present when delta content is included.""" + error_type: NotRequired[Nullable[str]] + r"""Error type when chunk_type is 'error'""" + error_message: NotRequired[Nullable[str]] + r"""Error message when chunk_type is 'error'""" class StreamingChunk(BaseModel): @@ -78,12 +82,32 @@ class StreamingChunk(BaseModel): r"""Unique identifier for the execution span, included in the first streaming chunk for tracing""" chunk_type: OptionalNullable[str] = UNSET - r"""Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema. Only present when delta content is included.""" + r"""Indicates the streaming mode: 'text' for unstructured streaming, 'json' for structured streaming with output_schema, 'error' for error events. Only present when delta content is included.""" + + error_type: OptionalNullable[str] = UNSET + r"""Error type when chunk_type is 'error'""" + + error_message: OptionalNullable[str] = UNSET + r"""Error message when chunk_type is 'error'""" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["delta", "json_path", "span_id", "chunk_type"] - nullable_fields = ["delta", "json_path", "span_id", "chunk_type"] + optional_fields = [ + "delta", + "json_path", + "span_id", + "chunk_type", + "error_type", + "error_message", + ] + nullable_fields = [ + "delta", + "json_path", + "span_id", + "chunk_type", + "error_type", + "error_message", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/opperai/models/functioncallconfiguration_input.py b/src/opperai/models/functioncallconfiguration_input.py index c3d10c1..db54d9e 100644 --- a/src/opperai/models/functioncallconfiguration_input.py +++ b/src/opperai/models/functioncallconfiguration_input.py @@ -10,7 +10,7 @@ class FunctionCallConfigurationInputTypedDict(TypedDict): invocation_few_shot_count: NotRequired[int] - r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + r"""[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset.""" beta_evaluation: NotRequired[Nullable[Any]] r"""[DEPRECATED] This field is ignored. Evaluation is controlled by observer_enabled.""" invocation_structured_generation_max_attempts: NotRequired[int] @@ -25,9 +25,13 @@ class FunctionCallConfigurationInputTypedDict(TypedDict): class FunctionCallConfigurationInput(BaseModel): invocation_few_shot_count: Annotated[ - Optional[int], pydantic.Field(alias="invocation.few_shot.count") - ] = 0 - r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + Optional[int], + pydantic.Field( + deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible.", + alias="invocation.few_shot.count", + ), + ] = 3 + r"""[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset.""" beta_evaluation: Annotated[ OptionalNullable[Any], pydantic.Field(alias="beta.evaluation") diff --git a/src/opperai/models/functioncallconfiguration_output.py b/src/opperai/models/functioncallconfiguration_output.py index 6ad1125..ceb1e2f 100644 --- a/src/opperai/models/functioncallconfiguration_output.py +++ b/src/opperai/models/functioncallconfiguration_output.py @@ -9,7 +9,7 @@ class FunctionCallConfigurationOutputTypedDict(TypedDict): invocation_few_shot_count: NotRequired[int] - r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + r"""[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset.""" invocation_structured_generation_max_attempts: NotRequired[int] r"""The maximum number of attempts to make when generating a response matching the output schema if provided.""" invocation_cache_ttl: NotRequired[int] @@ -22,9 +22,13 @@ class FunctionCallConfigurationOutputTypedDict(TypedDict): class FunctionCallConfigurationOutput(BaseModel): invocation_few_shot_count: Annotated[ - Optional[int], pydantic.Field(alias="invocation.few_shot.count") - ] = 0 - r"""The number of few-shot examples to use for the call. The examples are selected using nearest neighbor search of the function's dataset for items that are similar to the input.""" + Optional[int], + pydantic.Field( + deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible.", + alias="invocation.few_shot.count", + ), + ] = 3 + r"""[DEPRECATED via /call] This field is ignored when passed via /call endpoint. The system enforces a default of 3. Configure via PATCH /v2/functions/{uuid} or the Platform UI instead. The number of few-shot examples to use for the call, selected using nearest neighbor search of the function's dataset.""" invocation_structured_generation_max_attempts: Annotated[ Optional[int], diff --git a/src/opperai/models/getspanresponse.py b/src/opperai/models/getspanresponse.py index c929f73..ed51d79 100644 --- a/src/opperai/models/getspanresponse.py +++ b/src/opperai/models/getspanresponse.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .feedbackinfo import FeedbackInfo, FeedbackInfoTypedDict from datetime import datetime from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer @@ -32,6 +33,8 @@ class GetSpanResponseTypedDict(TypedDict): r"""The metadata of the span""" score: NotRequired[Nullable[int]] r"""The score of the span""" + feedback: NotRequired[Nullable[FeedbackInfoTypedDict]] + r"""Human feedback if submitted""" class GetSpanResponse(BaseModel): @@ -70,6 +73,9 @@ class GetSpanResponse(BaseModel): score: OptionalNullable[int] = UNSET r"""The score of the span""" + feedback: OptionalNullable[FeedbackInfo] = UNSET + r"""Human feedback if submitted""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -83,6 +89,7 @@ def serialize_model(self, handler): "error", "meta", "score", + "feedback", ] nullable_fields = [ "start_time", @@ -95,6 +102,7 @@ def serialize_model(self, handler): "error", "meta", "score", + "feedback", ] null_default_fields = [] diff --git a/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py b/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py index 9642be5..16acb0e 100644 --- a/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py +++ b/src/opperai/models/openai_types_chat_chat_completion_custom_tool_param_custom.py @@ -4,9 +4,8 @@ from .customformatgrammar import CustomFormatGrammar, CustomFormatGrammarTypedDict from .customformattext import CustomFormatText, CustomFormatTextTypedDict from opperai.types import BaseModel -from opperai.utils import get_discriminator import pydantic -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -17,11 +16,7 @@ Format = Annotated[ - Union[ - Annotated[CustomFormatText, Tag("text")], - Annotated[CustomFormatGrammar, Tag("grammar")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[CustomFormatText, CustomFormatGrammar], Field(discriminator="TYPE") ] diff --git a/src/opperai/models/submit_span_feedback_spans_span_id_feedback_postop.py b/src/opperai/models/submit_span_feedback_spans_span_id_feedback_postop.py new file mode 100644 index 0000000..0396e6e --- /dev/null +++ b/src/opperai/models/submit_span_feedback_spans_span_id_feedback_postop.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .submitfeedbackrequest import SubmitFeedbackRequest, SubmitFeedbackRequestTypedDict +from opperai.types import BaseModel +from opperai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class SubmitSpanFeedbackSpansSpanIDFeedbackPostRequestTypedDict(TypedDict): + span_id: str + r"""The ID of the span to provide feedback on""" + submit_feedback_request: SubmitFeedbackRequestTypedDict + + +class SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest(BaseModel): + span_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the span to provide feedback on""" + + submit_feedback_request: Annotated[ + SubmitFeedbackRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/opperai/models/submitfeedbackrequest.py b/src/opperai/models/submitfeedbackrequest.py new file mode 100644 index 0000000..71c61b2 --- /dev/null +++ b/src/opperai/models/submitfeedbackrequest.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SubmitFeedbackRequestTypedDict(TypedDict): + score: float + r"""Feedback score (0.0=negative, 1.0=positive)""" + comment: NotRequired[Nullable[str]] + r"""Optional comment explaining the feedback""" + save_to_dataset: NotRequired[Nullable[bool]] + r"""Force save to dataset (True=force save, False=never save, None=use auto-save config)""" + + +class SubmitFeedbackRequest(BaseModel): + score: float + r"""Feedback score (0.0=negative, 1.0=positive)""" + + comment: OptionalNullable[str] = UNSET + r"""Optional comment explaining the feedback""" + + save_to_dataset: OptionalNullable[bool] = UNSET + r"""Force save to dataset (True=force save, False=never save, None=use auto-save config)""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["comment", "save_to_dataset"] + nullable_fields = ["comment", "save_to_dataset"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/opperai/models/submitfeedbackresponse.py b/src/opperai/models/submitfeedbackresponse.py new file mode 100644 index 0000000..11ce9db --- /dev/null +++ b/src/opperai/models/submitfeedbackresponse.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from typing_extensions import TypedDict + + +class SubmitFeedbackResponseTypedDict(TypedDict): + span_id: str + r"""The ID of the span""" + score: float + r"""The feedback score that was submitted""" + example_saved: bool + r"""Whether the example was saved to the dataset""" + + +class SubmitFeedbackResponse(BaseModel): + span_id: str + r"""The ID of the span""" + + score: float + r"""The feedback score that was submitted""" + + example_saved: bool + r"""Whether the example was saved to the dataset""" diff --git a/src/opperai/models/upload_file_knowledge_knowledge_base_id_upload_postop.py b/src/opperai/models/upload_file_knowledge_knowledge_base_id_upload_postop.py new file mode 100644 index 0000000..74c5027 --- /dev/null +++ b/src/opperai/models/upload_file_knowledge_knowledge_base_id_upload_postop.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .body_upload_file_knowledge_knowledge_base_id_upload_post import ( + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost, + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict, +) +from opperai.types import BaseModel +from opperai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class UploadFileKnowledgeKnowledgeBaseIDUploadPostRequestTypedDict(TypedDict): + knowledge_base_id: str + r"""The id of the knowledge base to upload the file to""" + body_upload_file_knowledge_knowledge_base_id_upload_post: ( + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPostTypedDict + ) + + +class UploadFileKnowledgeKnowledgeBaseIDUploadPostRequest(BaseModel): + knowledge_base_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The id of the knowledge base to upload the file to""" + + body_upload_file_knowledge_knowledge_base_id_upload_post: Annotated[ + BodyUploadFileKnowledgeKnowledgeBaseIDUploadPost, + FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), + ] diff --git a/src/opperai/models/uploadfileresponse.py b/src/opperai/models/uploadfileresponse.py new file mode 100644 index 0000000..2eb71fc --- /dev/null +++ b/src/opperai/models/uploadfileresponse.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from opperai.types import BaseModel +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UploadFileResponseTypedDict(TypedDict): + id: str + key: str + original_filename: str + document_id: int + metadata: NotRequired[Dict[str, Any]] + + +class UploadFileResponse(BaseModel): + id: str + + key: str + + original_filename: str + + document_id: int + + metadata: Optional[Dict[str, Any]] = None diff --git a/src/opperai/spans.py b/src/opperai/spans.py index ec964d3..aa9e438 100644 --- a/src/opperai/spans.py +++ b/src/opperai/spans.py @@ -1184,3 +1184,261 @@ async def save_examples_async( raise errors.APIError("API error occurred", http_res, http_res_text) raise errors.APIError("Unexpected response received", http_res) + + def submit_feedback( + self, + *, + span_id: str, + score: float, + comment: OptionalNullable[str] = UNSET, + save_to_dataset: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SubmitFeedbackResponse: + r"""Submit Span Feedback + + Submit human feedback for a span. + + This endpoint allows you to provide feedback (thumbs up/down) on a span's output. + The feedback is stored on the associated generation and can trigger auto-save + to the function's dataset based on the observer's configuration. + + - score=1.0: Positive feedback (thumbs up) + - score=0.0: Negative feedback (thumbs down) + - Intermediate values (e.g., 0.5) are supported for nuanced feedback + + :param span_id: The ID of the span to provide feedback on + :param score: Feedback score (0.0=negative, 1.0=positive) + :param comment: Optional comment explaining the feedback + :param save_to_dataset: Force save to dataset (True=force save, False=never save, None=use auto-save config) + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest( + span_id=span_id, + submit_feedback_request=models.SubmitFeedbackRequest( + score=score, + comment=comment, + save_to_dataset=save_to_dataset, + ), + ) + + req = self._build_request( + method="POST", + path="/spans/{span_id}/feedback", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.submit_feedback_request, + False, + False, + "json", + models.SubmitFeedbackRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="submit_span_feedback_spans__span_id__feedback_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SubmitFeedbackResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res) + + async def submit_feedback_async( + self, + *, + span_id: str, + score: float, + comment: OptionalNullable[str] = UNSET, + save_to_dataset: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SubmitFeedbackResponse: + r"""Submit Span Feedback + + Submit human feedback for a span. + + This endpoint allows you to provide feedback (thumbs up/down) on a span's output. + The feedback is stored on the associated generation and can trigger auto-save + to the function's dataset based on the observer's configuration. + + - score=1.0: Positive feedback (thumbs up) + - score=0.0: Negative feedback (thumbs down) + - Intermediate values (e.g., 0.5) are supported for nuanced feedback + + :param span_id: The ID of the span to provide feedback on + :param score: Feedback score (0.0=negative, 1.0=positive) + :param comment: Optional comment explaining the feedback + :param save_to_dataset: Force save to dataset (True=force save, False=never save, None=use auto-save config) + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.SubmitSpanFeedbackSpansSpanIDFeedbackPostRequest( + span_id=span_id, + submit_feedback_request=models.SubmitFeedbackRequest( + score=score, + comment=comment, + save_to_dataset=save_to_dataset, + ), + ) + + req = self._build_request_async( + method="POST", + path="/spans/{span_id}/feedback", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.submit_feedback_request, + False, + False, + "json", + models.SubmitFeedbackRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="submit_span_feedback_spans__span_id__feedback_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "401", "404", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SubmitFeedbackResponse, http_res) + if utils.match_response(http_res, "400", "application/json"): + response_data = unmarshal_json_response( + errors.BadRequestErrorData, http_res + ) + raise errors.BadRequestError(response_data, http_res) + if utils.match_response(http_res, "401", "application/json"): + response_data = unmarshal_json_response( + errors.UnauthorizedErrorData, http_res + ) + raise errors.UnauthorizedError(response_data, http_res) + if utils.match_response(http_res, "404", "application/json"): + response_data = unmarshal_json_response(errors.NotFoundErrorData, http_res) + raise errors.NotFoundError(response_data, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.RequestValidationErrorData, http_res + ) + raise errors.RequestValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.APIError("API error occurred", http_res, http_res_text) + + raise errors.APIError("Unexpected response received", http_res)