diff --git a/node_modules/.bin/mime b/node_modules/.bin/mime
new file mode 120000
index 0000000..fbb7ee0
--- /dev/null
+++ b/node_modules/.bin/mime
@@ -0,0 +1 @@
+../mime/cli.js
\ No newline at end of file
diff --git a/node_modules/.bin/nodemon b/node_modules/.bin/nodemon
new file mode 120000
index 0000000..1056ddc
--- /dev/null
+++ b/node_modules/.bin/nodemon
@@ -0,0 +1 @@
+../nodemon/bin/nodemon.js
\ No newline at end of file
diff --git a/node_modules/.bin/nodetouch b/node_modules/.bin/nodetouch
new file mode 120000
index 0000000..3409fdb
--- /dev/null
+++ b/node_modules/.bin/nodetouch
@@ -0,0 +1 @@
+../touch/bin/nodetouch.js
\ No newline at end of file
diff --git a/node_modules/.bin/semver b/node_modules/.bin/semver
new file mode 120000
index 0000000..5aaadf4
--- /dev/null
+++ b/node_modules/.bin/semver
@@ -0,0 +1 @@
+../semver/bin/semver.js
\ No newline at end of file
diff --git a/node_modules/.bin/uuid b/node_modules/.bin/uuid
new file mode 100644
index 0000000..c3ec003
--- /dev/null
+++ b/node_modules/.bin/uuid
@@ -0,0 +1,12 @@
+#!/bin/sh
+basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
+
+case `uname` in
+ *CYGWIN*|*MINGW*|*MSYS*) basedir=`cygpath -w "$basedir"`;;
+esac
+
+if [ -x "$basedir/node" ]; then
+ exec "$basedir/node" "$basedir/../uuid/dist/bin/uuid" "$@"
+else
+ exec node "$basedir/../uuid/dist/bin/uuid" "$@"
+fi
diff --git a/node_modules/.bin/uuid.cmd b/node_modules/.bin/uuid.cmd
new file mode 100644
index 0000000..0f2376e
--- /dev/null
+++ b/node_modules/.bin/uuid.cmd
@@ -0,0 +1,17 @@
+@ECHO off
+GOTO start
+:find_dp0
+SET dp0=%~dp0
+EXIT /b
+:start
+SETLOCAL
+CALL :find_dp0
+
+IF EXIST "%dp0%\node.exe" (
+ SET "_prog=%dp0%\node.exe"
+) ELSE (
+ SET "_prog=node"
+ SET PATHEXT=%PATHEXT:;.JS;=;%
+)
+
+endLocal & goto #_undefined_# 2>NUL || title %COMSPEC% & "%_prog%" "%dp0%\..\uuid\dist\bin\uuid" %*
diff --git a/node_modules/.bin/uuid.ps1 b/node_modules/.bin/uuid.ps1
new file mode 100644
index 0000000..7804628
--- /dev/null
+++ b/node_modules/.bin/uuid.ps1
@@ -0,0 +1,28 @@
+#!/usr/bin/env pwsh
+$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent
+
+$exe=""
+if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) {
+ # Fix case when both the Windows and Linux builds of Node
+ # are installed in the same directory
+ $exe=".exe"
+}
+$ret=0
+if (Test-Path "$basedir/node$exe") {
+ # Support pipeline input
+ if ($MyInvocation.ExpectingInput) {
+ $input | & "$basedir/node$exe" "$basedir/../uuid/dist/bin/uuid" $args
+ } else {
+ & "$basedir/node$exe" "$basedir/../uuid/dist/bin/uuid" $args
+ }
+ $ret=$LASTEXITCODE
+} else {
+ # Support pipeline input
+ if ($MyInvocation.ExpectingInput) {
+ $input | & "node$exe" "$basedir/../uuid/dist/bin/uuid" $args
+ } else {
+ & "node$exe" "$basedir/../uuid/dist/bin/uuid" $args
+ }
+ $ret=$LASTEXITCODE
+}
+exit $ret
diff --git a/node_modules/.package-lock.json b/node_modules/.package-lock.json
new file mode 100644
index 0000000..f73335d
--- /dev/null
+++ b/node_modules/.package-lock.json
@@ -0,0 +1,1485 @@
+{
+ "name": "Business_Analyst_AI",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "node_modules/@google/genai": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/@google/genai/-/genai-0.7.0.tgz",
+ "integrity": "sha512-r+Fwj/emnXZN5R+4JCxDXboY4AGTmTn7+Wnori5dgyJiStP0P82f9YYL0CVsCnDIumNY2i0UIcZ1zGZdtHJ34w==",
+ "dependencies": {
+ "google-auth-library": "^9.14.2",
+ "ws": "^8.18.0"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ }
+ },
+ "node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/agent-base": {
+ "version": "7.1.3",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz",
+ "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "license": "ISC",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
+ "license": "MIT"
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "license": "MIT"
+ },
+ "node_modules/base64-js": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
+ "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/bignumber.js": {
+ "version": "9.1.2",
+ "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.1.2.tgz",
+ "integrity": "sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
+ "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
+ "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.13.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/body-parser/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/body-parser/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "license": "MIT",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/buffer-equal-constant-time": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
+ "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/call-bound": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
+ "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "get-intrinsic": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/chokidar": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
+ "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
+ "license": "MIT",
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "funding": {
+ "url": "https://paulmillr.com/funding/"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "license": "MIT"
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
+ "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
+ "license": "MIT"
+ },
+ "node_modules/cors": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
+ "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
+ "license": "MIT",
+ "dependencies": {
+ "object-assign": "^4",
+ "vary": "^1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
+ "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/dotenv": {
+ "version": "16.4.7",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
+ "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://dotenvx.com"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/ecdsa-sig-formatter": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
+ "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==",
+ "dependencies": {
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
+ "license": "MIT"
+ },
+ "node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
+ "license": "MIT"
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.21.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
+ "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
+ "license": "MIT",
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.3",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.7.1",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.3.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.3",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.12",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.13.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.19.0",
+ "serve-static": "1.16.2",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/express/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/express/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "license": "MIT",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
+ "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/finalhandler/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/finalhandler/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gaxios": {
+ "version": "6.7.1",
+ "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz",
+ "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==",
+ "dependencies": {
+ "extend": "^3.0.2",
+ "https-proxy-agent": "^7.0.1",
+ "is-stream": "^2.0.0",
+ "node-fetch": "^2.6.9",
+ "uuid": "^9.0.1"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/gcp-metadata": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz",
+ "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==",
+ "dependencies": {
+ "gaxios": "^6.1.1",
+ "google-logging-utils": "^0.0.2",
+ "json-bigint": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "license": "ISC",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/google-auth-library": {
+ "version": "9.15.1",
+ "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.15.1.tgz",
+ "integrity": "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==",
+ "dependencies": {
+ "base64-js": "^1.3.0",
+ "ecdsa-sig-formatter": "^1.0.11",
+ "gaxios": "^6.1.1",
+ "gcp-metadata": "^6.1.0",
+ "gtoken": "^7.0.0",
+ "jws": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/google-logging-utils": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz",
+ "integrity": "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==",
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gtoken": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz",
+ "integrity": "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==",
+ "dependencies": {
+ "gaxios": "^6.0.0",
+ "jws": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/https-proxy-agent": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
+ "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
+ "dependencies": {
+ "agent-base": "^7.1.2",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/ignore-by-default": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz",
+ "integrity": "sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA==",
+ "license": "ISC"
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "license": "ISC"
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "license": "MIT",
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "license": "MIT",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/json-bigint": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz",
+ "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==",
+ "dependencies": {
+ "bignumber.js": "^9.0.0"
+ }
+ },
+ "node_modules/jwa": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz",
+ "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==",
+ "dependencies": {
+ "buffer-equal-constant-time": "1.0.1",
+ "ecdsa-sig-formatter": "1.0.11",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/jws": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz",
+ "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==",
+ "dependencies": {
+ "jwa": "^2.0.0",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
+ "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "license": "MIT",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/nodemon": {
+ "version": "3.1.9",
+ "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.1.9.tgz",
+ "integrity": "sha512-hdr1oIb2p6ZSxu3PB2JWWYS7ZQ0qvaZsc3hK8DR8f02kRzc8rjYmxAIvdz+aYC+8F2IjNaB7HMcSDg8nQpJxyg==",
+ "license": "MIT",
+ "dependencies": {
+ "chokidar": "^3.5.2",
+ "debug": "^4",
+ "ignore-by-default": "^1.0.1",
+ "minimatch": "^3.1.2",
+ "pstree.remy": "^1.1.8",
+ "semver": "^7.5.3",
+ "simple-update-notifier": "^2.0.0",
+ "supports-color": "^5.5.0",
+ "touch": "^3.1.0",
+ "undefsafe": "^2.0.5"
+ },
+ "bin": {
+ "nodemon": "bin/nodemon.js"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/nodemon"
+ }
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.4",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+ "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "license": "MIT",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "0.1.12",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
+ "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
+ "license": "MIT"
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "license": "MIT",
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/pstree.remy": {
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz",
+ "integrity": "sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==",
+ "license": "MIT"
+ },
+ "node_modules/qs": {
+ "version": "6.13.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
+ "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "side-channel": "^1.0.6"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "license": "MIT",
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT"
+ },
+ "node_modules/semver": {
+ "version": "7.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz",
+ "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/send": {
+ "version": "0.19.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
+ "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/send/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/send/node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "1.16.2",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz",
+ "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
+ "license": "MIT",
+ "dependencies": {
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.19.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "license": "ISC"
+ },
+ "node_modules/side-channel": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+ "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3",
+ "side-channel-list": "^1.0.0",
+ "side-channel-map": "^1.0.1",
+ "side-channel-weakmap": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-list": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+ "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+ "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-weakmap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+ "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3",
+ "side-channel-map": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/simple-update-notifier": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz",
+ "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==",
+ "license": "MIT",
+ "dependencies": {
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/touch": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.1.tgz",
+ "integrity": "sha512-r0eojU4bI8MnHr8c5bNo7lJDdI2qXlWWJk6a9EAFG7vbhTjElYhBVS3/miuE0uOuoLdb8Mc/rVfsmm6eo5o9GA==",
+ "license": "ISC",
+ "bin": {
+ "nodetouch": "bin/nodetouch.js"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/undefsafe": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.5.tgz",
+ "integrity": "sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==",
+ "license": "MIT"
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
+ "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
+ "funding": [
+ "https://github.com/sponsors/broofa",
+ "https://github.com/sponsors/ctavan"
+ ],
+ "bin": {
+ "uuid": "dist/bin/uuid"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/ws": {
+ "version": "8.18.1",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.1.tgz",
+ "integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ }
+ }
+}
diff --git a/node_modules/@google/genai/LICENSE b/node_modules/@google/genai/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/node_modules/@google/genai/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/node_modules/@google/genai/README.md b/node_modules/@google/genai/README.md
new file mode 100644
index 0000000..7f0180f
--- /dev/null
+++ b/node_modules/@google/genai/README.md
@@ -0,0 +1,255 @@
+# Google Gen AI SDK for TypeScript and JavaScript
+
+[](https://www.npmjs.com/package/@google/genai)
+[](https://www.npmjs.com/package/@google/genai)
+
+----------------------
+**Documentation:** https://googleapis.github.io/js-genai/
+
+----------------------
+
+The Google Gen AI JavaScript SDK is designed for
+TypeScript and JavaScript developers to build applications powered by Gemini. The SDK
+supports both the [Gemini Developer API](https://ai.google.dev/gemini-api/docs)
+and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview).
+
+The Google Gen AI SDK is designed to work with Gemini 2.0 features.
+
+> [!NOTE]
+> **SDK Preview:**
+> See: [Preview Launch](#preview-launch).
+
+> [!CAUTION]
+> **API Key Security:** Avoid exposing API keys in client-side code.
+> Use server-side implementations in production environments.
+
+
+## Prerequisites
+
+* Node.js version 18 or later
+
+## Installation
+
+To install the SDK, run the following command:
+
+```shell
+npm install @google/genai
+```
+
+## Quickstart
+
+The simplest way to get started is to using an API key from
+[Google AI Studio](https://aistudio.google.com/apikey):
+
+```typescript
+import {GoogleGenAI} from '@google/genai';
+const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
+
+const ai = new GoogleGenAI({apiKey: GEMINI_API_KEY});
+
+async function main() {
+ const response = await ai.models.generateContent({
+ model: 'gemini-2.0-flash-001',
+ contents: 'Why is the sky blue?',
+ });
+ console.log(response.text);
+}
+
+main();
+```
+
+## Web quickstart
+
+The package contents are also available unzipped in the
+`package/` directory of the bucket, so an equivalent web example is:
+
+```html
+
+
+
+
+
+ Using My Package
+
+
+
+
+
+```
+
+## Initialization
+
+The Google Gen AI SDK provides support for both the
+[Google AI Studio](https://ai.google.dev/gemini-api/docs) and
+[Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)
+ implementations of the Gemini API.
+
+### Gemini Developer API
+
+For server-side applications, initialize using an API key, which can
+be acquired from [Google AI Studio](https://aistudio.google.com/apikey):
+
+```typescript
+import { GoogleGenAI } from '@google/genai';
+const ai = new GoogleGenAI({apiKey: 'GEMINI_API_KEY'});
+```
+
+#### Browser
+
+> [!CAUTION]
+> **API Key Security:** Avoid exposing API keys in client-side code.
+> Use server-side implementations in production environments.
+
+In the browser the initialization code is identical:
+
+
+```typescript
+import { GoogleGenAI } from '@google/genai';
+const ai = new GoogleGenAI({apiKey: 'GEMINI_API_KEY'});
+```
+
+### Vertex AI
+
+Sample code for VertexAI initialization:
+
+```typescript
+import { GoogleGenAI } from '@google/genai';
+
+const ai = new GoogleGenAI({
+ vertexai: true,
+ project: 'your_project',
+ location: 'your_location',
+});
+```
+
+## GoogleGenAI overview
+
+All API features are accessed through an instance of the `GoogleGenAI` classes.
+The submodules bundle together related API methods:
+
+- [`ai.models`](https://googleapis.github.io/js-genai/classes/models.Models.html):
+ Use `models` to query models (`generateContent`, `generateImages`, ...), or
+ examine their metadata.
+- [`ai.caches`](https://googleapis.github.io/js-genai/classes/caches.Caches.html):
+ Create and manage `caches` to reduce costs when repeatedly using the same
+ large prompt prefix.
+- [`ai.chats`](https://googleapis.github.io/js-genai/classes/chats.Chats.html):
+ Create local stateful `chat` objects to simplify multi turn interactions.
+- [`ai.files`](https://googleapis.github.io/js-genai/classes/files.Files.html):
+ Upload `files` to the API and reference them in your prompts.
+ This reduces bandwidth if you use a file many times, and handles files too
+ large to fit inline with your prompt.
+- [`ai.live`](https://googleapis.github.io/js-genai/classes/live.Live.html):
+ Start a `live` session for real time interaction, allows text + audio + video
+ input, and text or audio output.
+
+## Samples
+
+More samples can be found in the
+[github samples directory](https://github.com/googleapis/js-genai/tree/main/sdk-samples).
+
+
+### Streaming
+
+For quicker, more responsive API interactions use the `generateContentStream`
+method which yields chunks as they're generated:
+
+```typescript
+import {GoogleGenAI} from '@google/genai';
+const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
+
+const ai = new GoogleGenAI({apiKey: GEMINI_API_KEY});
+
+async function main() {
+ const response = await ai.models.generateContentStream({
+ model: 'gemini-2.0-flash-001',
+ contents: 'Write a 100-word poem.',
+ });
+ for await (const chunk of response) {
+ console.log(chunk.text);
+ }
+}
+
+main();
+```
+
+### Function Calling
+
+To let Gemini to interact with external systems, you can provide provide
+`functionDeclaration` objects as `tools`. To use these tools it's a 4 step
+
+1. **Declare the function name, description, and parameters**
+2. **Call `generateContent` with function calling enabled**
+3. **Use the returned `FunctionCall` parameters to call your actual function**
+3. **Send the result back to the model (with history, easier in `ai.chat`)
+ as a `FunctionResponse`**
+
+```typescript
+import {GoogleGenAI, FunctionCallingConfigMode, FunctionDeclaration, Type} from '@google/genai';
+const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
+
+async function main() {
+ const controlLightDeclaration: FunctionDeclaration = {
+ name: 'controlLight',
+ parameters: {
+ type: Type.OBJECT,
+ description: 'Set the brightness and color temperature of a room light.',
+ properties: {
+ brightness: {
+ type: Type.NUMBER,
+ description:
+ 'Light level from 0 to 100. Zero is off and 100 is full brightness.',
+ },
+ colorTemperature: {
+ type: Type.STRING,
+ description:
+ 'Color temperature of the light fixture which can be `daylight`, `cool`, or `warm`.',
+ },
+ },
+ required: ['brightness', 'colorTemperature'],
+ },
+ };
+
+ const ai = new GoogleGenAI({apiKey: GEMINI_API_KEY});
+ const response = await ai.models.generateContent({
+ model: 'gemini-2.0-flash-001',
+ contents: 'Dim the lights so the room feels cozy and warm.',
+ config: {
+ toolConfig: {
+ functionCallingConfig: {
+ // Force it to call any function
+ mode: FunctionCallingConfigMode.ANY,
+ allowedFunctionNames: ['controlLight'],
+ }
+ },
+ tools: [{functionDeclarations: [controlLightDeclaration]}]
+ }
+ });
+
+ console.log(response.functionCalls);
+}
+
+main();
+```
+
+
+## Preview Launch
+
+The SDK is curently in a preview launch stage, per [Google's launch stages](https://cloud.google.com/products?hl=en#section-22) this means:
+
+> At Preview, products or features are ready for testing by customers. Preview offerings are often publicly announced, but are not necessarily feature-complete, and no SLAs or technical support commitments are provided for these. Unless stated otherwise by Google, Preview offerings are intended for use in test environments only. The average Preview stage lasts about six months.
+
diff --git a/node_modules/@google/genai/dist/genai.d.ts b/node_modules/@google/genai/dist/genai.d.ts
new file mode 100644
index 0000000..c2ced80
--- /dev/null
+++ b/node_modules/@google/genai/dist/genai.d.ts
@@ -0,0 +1,3658 @@
+import { GoogleAuthOptions } from 'google-auth-library';
+
+/**
+ * The ApiClient class is used to send requests to the Gemini API or Vertex AI
+ * endpoints.
+ */
+declare class ApiClient {
+ readonly clientOptions: ApiClientInitOptions;
+ constructor(opts: ApiClientInitOptions);
+ isVertexAI(): boolean;
+ getProject(): string | undefined;
+ getLocation(): string | undefined;
+ getApiVersion(): string;
+ getBaseUrl(): string;
+ getRequestUrl(): string;
+ getHeaders(): Record;
+ private getRequestUrlInternal;
+ getBaseResourcePath(): string;
+ getApiKey(): string | undefined;
+ getWebsocketBaseUrl(): string;
+ setBaseUrl(url: string): void;
+ private constructUrl;
+ request(request: HttpRequest): Promise;
+ private patchHttpOptions;
+ requestStream(request: HttpRequest): Promise;
+ private includeExtraHttpOptionsToRequestInit;
+ private unaryApiCall;
+ private streamApiCall;
+ processStreamResponse(response: Response): AsyncGenerator;
+ private apiCall;
+ getDefaultHeaders(): Record;
+ private getHeadersInternal;
+ /**
+ * Uploads a file asynchronously using Gemini API only, this is not supported
+ * in Vertex AI.
+ *
+ * @param file The string path to the file to be uploaded or a Blob object.
+ * @param config Optional parameters specified in the `UploadFileConfig`
+ * interface. @see {@link UploadFileConfig}
+ * @return A promise that resolves to a `File` object.
+ * @throws An error if called on a Vertex AI client.
+ * @throws An error if the `mimeType` is not provided and can not be inferred,
+ */
+ uploadFile(file: string | Blob, config?: UploadFileConfig): Promise;
+ private fetchUploadUrl;
+}
+
+/**
+ * Options for initializing the ApiClient. The ApiClient uses the parameters
+ * for authentication purposes as well as to infer if SDK should send the
+ * request to Vertex AI or Gemini API.
+ */
+declare interface ApiClientInitOptions {
+ /**
+ * The object used for adding authentication headers to API requests.
+ */
+ auth: Auth;
+ /**
+ * The uploader to use for uploading files. This field is required for
+ * creating a client, will be set through the Node_client or Web_client.
+ */
+ uploader: Uploader;
+ /**
+ * Optional. The Google Cloud project ID for Vertex AI users.
+ * It is not the numeric project name.
+ * If not provided, SDK will try to resolve it from runtime environment.
+ */
+ project?: string;
+ /**
+ * Optional. The Google Cloud project location for Vertex AI users.
+ * If not provided, SDK will try to resolve it from runtime environment.
+ */
+ location?: string;
+ /**
+ * The API Key. This is required for Gemini API users.
+ */
+ apiKey?: string;
+ /**
+ * Optional. Set to true if you intend to call Vertex AI endpoints.
+ * If unset, default SDK behavior is to call Gemini API.
+ */
+ vertexai?: boolean;
+ /**
+ * Optional. The API version for the endpoint.
+ * If unset, SDK will choose a default api version.
+ */
+ apiVersion?: string;
+ /**
+ * Optional. A set of customizable configuration for HTTP requests.
+ */
+ httpOptions?: HttpOptions;
+ /**
+ * Optional. An extra string to append at the end of the User-Agent header.
+ *
+ * This can be used to e.g specify the runtime and its version.
+ */
+ userAgentExtra?: string;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+/**
+ * The Auth interface is used to authenticate with the API service.
+ */
+declare interface Auth {
+ /**
+ * Sets the headers needed to authenticate with the API service.
+ *
+ * @param headers - The Headers object that will be updated with the authentication headers.
+ */
+ addAuthHeaders(headers: Headers): Promise;
+}
+
+declare class BaseModule {
+}
+
+/** Content blob. */
+declare interface Blob_2 {
+ /** Required. Raw bytes. */
+ data?: string;
+ /** Required. The IANA standard MIME type of the source data. */
+ mimeType?: string;
+}
+export { Blob_2 as Blob }
+
+export declare enum BlockedReason {
+ BLOCKED_REASON_UNSPECIFIED = "BLOCKED_REASON_UNSPECIFIED",
+ SAFETY = "SAFETY",
+ OTHER = "OTHER",
+ BLOCKLIST = "BLOCKLIST",
+ PROHIBITED_CONTENT = "PROHIBITED_CONTENT"
+}
+
+/** A resource used in LLM queries for users to explicitly specify what to cache. */
+export declare interface CachedContent {
+ /** The server-generated resource name of the cached content. */
+ name?: string;
+ /** The user-generated meaningful display name of the cached content. */
+ displayName?: string;
+ /** The name of the publisher model to use for cached content. */
+ model?: string;
+ /** Creation time of the cache entry. */
+ createTime?: string;
+ /** When the cache entry was last updated in UTC time. */
+ updateTime?: string;
+ /** Expiration time of the cached content. */
+ expireTime?: string;
+ /** Metadata on the usage of the cached content. */
+ usageMetadata?: CachedContentUsageMetadata;
+}
+
+/** Metadata on the usage of the cached content. */
+export declare interface CachedContentUsageMetadata {
+ /** Duration of audio in seconds. */
+ audioDurationSeconds?: number;
+ /** Number of images. */
+ imageCount?: number;
+ /** Number of text characters. */
+ textCount?: number;
+ /** Total number of tokens that the cached content consumes. */
+ totalTokenCount?: number;
+ /** Duration of video in seconds. */
+ videoDurationSeconds?: number;
+}
+
+export declare class Caches extends BaseModule {
+ private readonly apiClient;
+ constructor(apiClient: ApiClient);
+ /**
+ * Lists cached content configurations.
+ *
+ * @param params - The parameters for the list request.
+ * @return The paginated results of the list of cached contents.
+ *
+ * @example
+ * ```ts
+ * const cachedContents = await ai.caches.list({config: {'pageSize': 2}});
+ * for (const cachedContent of cachedContents) {
+ * console.log(cachedContent);
+ * }
+ * ```
+ */
+ list: (params?: types.ListCachedContentsParameters) => Promise>;
+ /**
+ * Creates a cached contents resource.
+ *
+ * @remarks
+ * Context caching is only supported for specific models. See [Gemini
+ * Developer API reference] (https://ai.google.dev/gemini-api/docs/caching?lang=node/context-cac)
+ * and [Vertex AI reference] (https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview#supported_models)
+ * for more information.
+ *
+ * @param params - The parameters for the create request.
+ * @return The created cached content.
+ *
+ * @example
+ * ```ts
+ * const contents = ...; // Initialize the content to cache.
+ * const response = await ai.caches.create({
+ * model: 'gemini-1.5-flash',
+ * config: {
+ * 'contents': contents,
+ * 'displayName': 'test cache',
+ * 'systemInstruction': 'What is the sum of the two pdfs?',
+ * 'ttl': '86400s',
+ * }
+ * });
+ * ```
+ */
+ create(params: types.CreateCachedContentParameters): Promise;
+ /**
+ * Gets cached content configurations.
+ *
+ * @param params - The parameters for the get request.
+ * @return The cached content.
+ *
+ * @example
+ * ```ts
+ * await ai.caches.get({name: 'gemini-1.5-flash'});
+ * ```
+ */
+ get(params: types.GetCachedContentParameters): Promise;
+ /**
+ * Deletes cached content.
+ *
+ * @param params - The parameters for the delete request.
+ * @return The empty response returned by the API.
+ *
+ * @example
+ * ```ts
+ * await ai.caches.delete({name: 'gemini-1.5-flash'});
+ * ```
+ */
+ delete(params: types.DeleteCachedContentParameters): Promise;
+ /**
+ * Updates cached content configurations.
+ *
+ * @param params - The parameters for the update request.
+ * @return The updated cached content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.caches.update({
+ * name: 'gemini-1.5-flash',
+ * config: {'ttl': '7600s'}
+ * });
+ * ```
+ */
+ update(params: types.UpdateCachedContentParameters): Promise;
+ private listInternal;
+}
+
+/** A response candidate generated from the model. */
+export declare interface Candidate {
+ /** Contains the multi-part content of the response.
+ */
+ content?: Content;
+ /** Source attribution of the generated content.
+ */
+ citationMetadata?: CitationMetadata;
+ /** Describes the reason the model stopped generating tokens.
+ */
+ finishMessage?: string;
+ /** Number of tokens for this candidate.
+ */
+ tokenCount?: number;
+ /** The reason why the model stopped generating tokens.
+ If empty, the model has not stopped generating the tokens.
+ */
+ finishReason?: FinishReason;
+ /** Output only. Average log probability score of the candidate. */
+ avgLogprobs?: number;
+ /** Output only. Metadata specifies sources used to ground generated content. */
+ groundingMetadata?: GroundingMetadata;
+ /** Output only. Index of the candidate. */
+ index?: number;
+ /** Output only. Log-likelihood scores for the response tokens and top tokens */
+ logprobsResult?: LogprobsResult;
+ /** Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. */
+ safetyRatings?: SafetyRating[];
+}
+
+/**
+ * Chat session that enables sending messages to the model with previous
+ * conversation context.
+ *
+ * @remarks
+ * The session maintains all the turns between user and model.
+ */
+export declare class Chat {
+ private readonly apiClient;
+ private readonly modelsModule;
+ private readonly model;
+ private readonly config;
+ private history;
+ private sendPromise;
+ constructor(apiClient: ApiClient, modelsModule: Models, model: string, config?: types.GenerateContentConfig, history?: types.Content[]);
+ /**
+ * Sends a message to the model and returns the response.
+ *
+ * @remarks
+ * This method will wait for the previous message to be processed before
+ * sending the next message.
+ *
+ * @see {@link Chat#sendMessageStream} for streaming method.
+ * @param params - parameters for sending messages within a chat session.
+ * @returns The model's response.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
+ * const response = await chat.sendMessage({
+ * message: 'Why is the sky blue?'
+ * });
+ * console.log(response.text);
+ * ```
+ */
+ sendMessage(params: types.SendMessageParameters): Promise;
+ /**
+ * Sends a message to the model and returns the response in chunks.
+ *
+ * @remarks
+ * This method will wait for the previous message to be processed before
+ * sending the next message.
+ *
+ * @see {@link Chat#sendMessage} for non-streaming method.
+ * @param params - parameters for sending the message.
+ * @return The model's response.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
+ * const response = await chat.sendMessageStream({
+ * message: 'Why is the sky blue?'
+ * });
+ * for await (const chunk of response) {
+ * console.log(chunk.text);
+ * }
+ * ```
+ */
+ sendMessageStream(params: types.SendMessageParameters): Promise>;
+ /**
+ * Returns the chat history.
+ *
+ * @remarks
+ * The history is a list of contents alternating between user and model.
+ *
+ * There are two types of history:
+ * - The `curated history` contains only the valid turns between user and
+ * model, which will be included in the subsequent requests sent to the model.
+ * - The `comprehensive history` contains all turns, including invalid or
+ * empty model outputs, providing a complete record of the history.
+ *
+ * The history is updated after receiving the response from the model,
+ * for streaming response, it means receiving the last chunk of the response.
+ *
+ * The `comprehensive history` is returned by default. To get the `curated
+ * history`, set the `curated` parameter to `true`.
+ *
+ * @param curated - whether to return the curated history or the comprehensive
+ * history.
+ * @return History contents alternating between user and model for the entire
+ * chat session.
+ */
+ getHistory(curated?: boolean): types.Content[];
+ private processStreamResponse;
+ private recordHistory;
+}
+
+/**
+ * A utility class to create a chat session.
+ */
+export declare class Chats {
+ private readonly modelsModule;
+ private readonly apiClient;
+ constructor(modelsModule: Models, apiClient: ApiClient);
+ /**
+ * Creates a new chat session.
+ *
+ * @remarks
+ * The config in the params will be used for all requests within the chat
+ * session unless overridden by a per-request `config` in
+ * @see {@link types.SendMessageParameters#config}.
+ *
+ * @param params - Parameters for creating a chat session.
+ * @returns A new chat session.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({
+ * model: 'gemini-2.0-flash'
+ * config: {
+ * temperature: 0.5,
+ * maxOutputTokens: 1024,
+ * }
+ * });
+ * ```
+ */
+ create(params: types.CreateChatParameters): Chat;
+}
+
+/** Source attributions for content. */
+export declare interface Citation {
+ /** Output only. End index into the content. */
+ endIndex?: number;
+ /** Output only. License of the attribution. */
+ license?: string;
+ /** Output only. Publication date of the attribution. */
+ publicationDate?: GoogleTypeDate;
+ /** Output only. Start index into the content. */
+ startIndex?: number;
+ /** Output only. Title of the attribution. */
+ title?: string;
+ /** Output only. Url reference of the attribution. */
+ uri?: string;
+}
+
+/** Citation information when the model quotes another source. */
+export declare interface CitationMetadata {
+ /** Contains citation information when the model directly quotes, at
+ length, from another source. Can include traditional websites and code
+ repositories.
+ */
+ citations?: Citation[];
+}
+
+/** Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. */
+export declare interface CodeExecutionResult {
+ /** Required. Outcome of the code execution. */
+ outcome?: Outcome;
+ /** Optional. Contains stdout when code execution is successful, stderr or other description otherwise. */
+ output?: string;
+}
+
+declare namespace common {
+ export {
+ formatMap,
+ setValueByPath,
+ getValueByPath,
+ BaseModule,
+ UploadFileParameters
+ }
+}
+
+/** Optional parameters for computing tokens. */
+export declare interface ComputeTokensConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Parameters for computing tokens. */
+export declare interface ComputeTokensParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** Input content. */
+ contents: ContentListUnion;
+ /** Optional parameters for the request.
+ */
+ config?: ComputeTokensConfig;
+}
+
+/** Response for computing tokens. */
+export declare class ComputeTokensResponse {
+ /** Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. */
+ tokensInfo?: TokensInfo[];
+}
+
+/** Contains the multi-part content of a message. */
+export declare interface Content {
+ /** List of parts that constitute a single message. Each part may have
+ a different IANA MIME type. */
+ parts?: Part[];
+ /** Optional. The producer of the content. Must be either 'user' or
+ 'model'. Useful to set for multi-turn conversations, otherwise can be
+ left blank or unset. If role is not specified, SDK will determine the role. */
+ role?: string;
+}
+
+/** The embedding generated from an input content. */
+export declare interface ContentEmbedding {
+ /** A list of floats representing an embedding.
+ */
+ values?: number[];
+ /** Vertex API only. Statistics of the input text associated with this
+ embedding.
+ */
+ statistics?: ContentEmbeddingStatistics;
+}
+
+/** Statistics of the input text associated with the result of content embedding. */
+export declare interface ContentEmbeddingStatistics {
+ /** Vertex API only. If the input text was truncated due to having
+ a length longer than the allowed maximum input.
+ */
+ truncated?: boolean;
+ /** Vertex API only. Number of tokens of the input text.
+ */
+ tokenCount?: number;
+}
+
+export declare type ContentListUnion = ContentUnion[] | ContentUnion;
+
+export declare type ContentUnion = Content | PartUnion[] | PartUnion;
+
+/** Configuration for a Control reference image. */
+export declare interface ControlReferenceConfig {
+ /** The type of control reference image to use. */
+ controlType?: ControlReferenceType;
+ /** Defaults to False. When set to True, the control image will be
+ computed by the model based on the control type. When set to False,
+ the control image must be provided by the user. */
+ enableControlImageComputation?: boolean;
+}
+
+/** A control reference image.
+
+ The image of the control reference image is either a control image provided
+ by the user, or a regular image which the backend will use to generate a
+ control image of. In the case of the latter, the
+ enable_control_image_computation field in the config should be set to True.
+
+ A control image is an image that represents a sketch image of areas for the
+ model to fill in based on the prompt.
+ */
+export declare interface ControlReferenceImage {
+ /** The reference image for the editing operation. */
+ referenceImage?: Image_2;
+ /** The id of the reference image. */
+ referenceId?: number;
+ /** The type of the reference image. Only set by the SDK. */
+ referenceType?: string;
+ /** Configuration for the control reference image. */
+ config?: ControlReferenceConfig;
+}
+
+export declare enum ControlReferenceType {
+ CONTROL_TYPE_DEFAULT = "CONTROL_TYPE_DEFAULT",
+ CONTROL_TYPE_CANNY = "CONTROL_TYPE_CANNY",
+ CONTROL_TYPE_SCRIBBLE = "CONTROL_TYPE_SCRIBBLE",
+ CONTROL_TYPE_FACE_MESH = "CONTROL_TYPE_FACE_MESH"
+}
+
+/** Config for the count_tokens method. */
+export declare interface CountTokensConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Instructions for the model to steer it toward better performance.
+ */
+ systemInstruction?: ContentUnion;
+ /** Code that enables the system to interact with external systems to
+ perform an action outside of the knowledge and scope of the model.
+ */
+ tools?: Tool[];
+ /** Configuration that the model uses to generate the response. Not
+ supported by the Gemini Developer API.
+ */
+ generationConfig?: GenerationConfig;
+}
+
+/** Parameters for counting tokens. */
+export declare interface CountTokensParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** Input content. */
+ contents: ContentListUnion;
+ /** Configuration for counting tokens. */
+ config?: CountTokensConfig;
+}
+
+/** Response for counting tokens. */
+export declare class CountTokensResponse {
+ /** Total number of tokens. */
+ totalTokens?: number;
+ /** Number of tokens in the cached part of the prompt (the cached content). */
+ cachedContentTokenCount?: number;
+}
+
+/** Optional configuration for cached content creation. */
+export declare interface CreateCachedContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s". */
+ ttl?: string;
+ /** Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z. */
+ expireTime?: string;
+ /** The user-generated meaningful display name of the cached content.
+ */
+ displayName?: string;
+ /** The content to cache.
+ */
+ contents?: ContentListUnion;
+ /** Developer set system instruction.
+ */
+ systemInstruction?: ContentUnion;
+ /** A list of `Tools` the model may use to generate the next response.
+ */
+ tools?: Tool[];
+ /** Configuration for the tools to use. This config is shared for all tools.
+ */
+ toolConfig?: ToolConfig;
+}
+
+/** Parameters for caches.create method. */
+export declare interface CreateCachedContentParameters {
+ /** ID of the model to use. Example: gemini-1.5-flash */
+ model: string;
+ /** Configuration that contains optional parameters.
+ */
+ config?: CreateCachedContentConfig;
+}
+
+/** Parameters for initializing a new chat session.
+
+ These parameters are used when creating a chat session with the
+ `chats.create()` method.
+ */
+export declare interface CreateChatParameters {
+ /** The name of the model to use for the chat session.
+
+ For example: 'gemini-2.0-flash', 'gemini-1.5-pro', etc. See gemini API
+ docs to find the available models.
+ */
+ model: string;
+ /** Config for the entire chat session.
+
+ This config applies to all requests within the session
+ unless overridden by a per-request `config` in `SendMessageParameters`.
+ */
+ config?: GenerateContentConfig;
+ /** The initial conversation history for the chat session.
+
+ This allows you to start the chat with a pre-existing history. The history
+ must be a list of `Content` alternating between 'user' and 'model' roles.
+ It should start with a 'user' message.
+ */
+ history?: Content[];
+}
+
+/** Used to override the default configuration. */
+export declare interface CreateFileConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Generates the parameters for the private _create method. */
+export declare interface CreateFileParameters {
+ /** The file to be uploaded.
+ mime_type: (Required) The MIME type of the file. Must be provided.
+ name: (Optional) The name of the file in the destination (e.g.
+ 'files/sample-image').
+ display_name: (Optional) The display name of the file.
+ */
+ file: File_2;
+ /** Used to override the default configuration. */
+ config?: CreateFileConfig;
+}
+
+/** Response for the create file method. */
+export declare class CreateFileResponse {
+ /** Used to retain the full HTTP response. */
+ sdkHttpResponse?: HttpResponse;
+}
+
+/**
+ * Creates a `Content` object with a model role from a `PartListUnion` object or `string`.
+ */
+export declare function createModelContent(partOrString: PartListUnion | string): Content;
+
+/**
+ * Creates a `Part` object from a `base64` `string`.
+ */
+export declare function createPartFromBase64(data: string, mimeType: string): Part;
+
+/**
+ * Creates a `Part` object from the `outcome` and `output` of a `CodeExecutionResult` object.
+ */
+export declare function createPartFromCodeExecutionResult(outcome: Outcome, output: string): Part;
+
+/**
+ * Creates a `Part` object from the `code` and `language` of an `ExecutableCode` object.
+ */
+export declare function createPartFromExecutableCode(code: string, language: Language): Part;
+
+/**
+ * Creates a `Part` object from a `FunctionCall` object.
+ */
+export declare function createPartFromFunctionCall(name: string, args: Record): Part;
+
+/**
+ * Creates a `Part` object from a `FunctionResponse` object.
+ */
+export declare function createPartFromFunctionResponse(id: string, name: string, response: Record): Part;
+
+/**
+ * Creates a `Part` object from a `text` string.
+ */
+export declare function createPartFromText(text: string): Part;
+
+/**
+ * Creates a `Part` object from a `URI` string.
+ */
+export declare function createPartFromUri(uri: string, mimeType: string): Part;
+
+/**
+ * Creates a `Content` object with a user role from a `PartListUnion` object or `string`.
+ */
+export declare function createUserContent(partOrString: PartListUnion | string): Content;
+
+/** Optional parameters for caches.delete method. */
+export declare interface DeleteCachedContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Parameters for caches.delete method. */
+export declare interface DeleteCachedContentParameters {
+ /** The server-generated resource name of the cached content.
+ */
+ name: string;
+ /** Optional parameters for the request.
+ */
+ config?: DeleteCachedContentConfig;
+}
+
+/** Empty response for caches.delete method. */
+export declare class DeleteCachedContentResponse {
+}
+
+/** Used to override the default configuration. */
+export declare interface DeleteFileConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Generates the parameters for the get method. */
+export declare interface DeleteFileParameters {
+ /** The name identifier for the file to be deleted. */
+ name: string;
+ /** Used to override the default configuration. */
+ config?: DeleteFileConfig;
+}
+
+/** Response for the delete file method. */
+export declare class DeleteFileResponse {
+}
+
+/** Used to override the default configuration. */
+export declare interface DownloadFileConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Describes the options to customize dynamic retrieval. */
+export declare interface DynamicRetrievalConfig {
+ /** The mode of the predictor to be used in dynamic retrieval. */
+ mode?: DynamicRetrievalConfigMode;
+ /** Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used. */
+ dynamicThreshold?: number;
+}
+
+export declare enum DynamicRetrievalConfigMode {
+ MODE_UNSPECIFIED = "MODE_UNSPECIFIED",
+ MODE_DYNAMIC = "MODE_DYNAMIC"
+}
+
+export declare interface EmbedContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Type of task for which the embedding will be used.
+ */
+ taskType?: string;
+ /** Title for the text. Only applicable when TaskType is
+ `RETRIEVAL_DOCUMENT`.
+ */
+ title?: string;
+ /** Reduced dimension for the output embedding. If set,
+ excessive values in the output embedding are truncated from the end.
+ Supported by newer models since 2024 only. You cannot set this value if
+ using the earlier model (`models/embedding-001`).
+ */
+ outputDimensionality?: number;
+ /** Vertex API only. The MIME type of the input.
+ */
+ mimeType?: string;
+ /** Vertex API only. Whether to silently truncate inputs longer than
+ the max sequence length. If this option is set to false, oversized inputs
+ will lead to an INVALID_ARGUMENT error, similar to other text APIs.
+ */
+ autoTruncate?: boolean;
+}
+
+/** Request-level metadata for the Vertex Embed Content API. */
+export declare interface EmbedContentMetadata {
+ /** Vertex API only. The total number of billable characters included
+ in the request.
+ */
+ billableCharacterCount?: number;
+}
+
+/** Parameters for the embed_content method. */
+export declare interface EmbedContentParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** The content to embed. Only the `parts.text` fields will be counted.
+ */
+ contents: ContentListUnion;
+ /** Configuration that contains optional parameters.
+ */
+ config?: EmbedContentConfig;
+}
+
+/** Response for the embed_content method. */
+export declare class EmbedContentResponse {
+ /** The embeddings for each request, in the same order as provided in
+ the batch request.
+ */
+ embeddings?: ContentEmbedding[];
+ /** Vertex API only. Metadata about the request.
+ */
+ metadata?: EmbedContentMetadata;
+}
+
+/** Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. */
+export declare interface ExecutableCode {
+ /** Required. The code to be executed. */
+ code?: string;
+ /** Required. Programming language of the `code`. */
+ language?: Language;
+}
+
+export declare interface FetchPredictOperationConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Parameters for the fetchPredictOperation method. */
+export declare interface FetchPredictOperationParameters {
+ /** The server-assigned name for the operation. */
+ operationName: string;
+ resourceName: string;
+ /** Used to override the default configuration. */
+ config?: FetchPredictOperationConfig;
+}
+
+/** A file uploaded to the API. */
+declare interface File_2 {
+ /** The `File` resource name. The ID (name excluding the "files/" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456` */
+ name?: string;
+ /** Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image' */
+ displayName?: string;
+ /** Output only. MIME type of the file. */
+ mimeType?: string;
+ /** Output only. Size of the file in bytes. */
+ sizeBytes?: string;
+ /** Output only. The timestamp of when the `File` was created. */
+ createTime?: string;
+ /** Output only. The timestamp of when the `File` will be deleted. Only set if the `File` is scheduled to expire. */
+ expirationTime?: string;
+ /** Output only. The timestamp of when the `File` was last updated. */
+ updateTime?: string;
+ /** Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded in base64 format. */
+ sha256Hash?: string;
+ /** Output only. The URI of the `File`. */
+ uri?: string;
+ /** Output only. The URI of the `File`, only set for downloadable (generated) files. */
+ downloadUri?: string;
+ /** Output only. Processing state of the File. */
+ state?: FileState;
+ /** Output only. The source of the `File`. */
+ source?: FileSource;
+ /** Output only. Metadata for a video. */
+ videoMetadata?: Record;
+ /** Output only. Error status if File processing failed. */
+ error?: FileStatus;
+}
+export { File_2 as File }
+
+/** URI based data. */
+export declare interface FileData {
+ /** Required. URI. */
+ fileUri?: string;
+ /** Required. The IANA standard MIME type of the source data. */
+ mimeType?: string;
+}
+
+export declare class Files extends BaseModule {
+ private readonly apiClient;
+ constructor(apiClient: ApiClient);
+ /**
+ * Lists all current project files from the service.
+ *
+ * @param params - The parameters for the list request
+ * @return The paginated results of the list of files
+ *
+ * @example
+ * The following code prints the names of all files from the service, the
+ * size of each page is 10.
+ *
+ * ```ts
+ * const listResponse = await ai.files.list({config: {'pageSize': 10}});
+ * for await (const file of listResponse) {
+ * console.log(file.name);
+ * }
+ * ```
+ */
+ list: (params?: types.ListFilesParameters) => Promise>;
+ /**
+ * Uploads a file asynchronously to the Gemini API.
+ * This method is not available in Vertex AI.
+ * Supported upload sources:
+ * - Node.js: File path (string) or Blob object.
+ * - Browser: Blob object (e.g., File).
+ *
+ * @remarks
+ * The `mimeType` can be specified in the `config` parameter. If omitted:
+ * - For file path (string) inputs, the `mimeType` will be inferred from the
+ * file extension.
+ * - For Blob object inputs, the `mimeType` will be set to the Blob's `type`
+ * property.
+ * Somex eamples for file extension to mimeType mapping:
+ * .txt -> text/plain
+ * .json -> application/json
+ * .jpg -> image/jpeg
+ * .png -> image/png
+ * .mp3 -> audio/mpeg
+ * .mp4 -> video/mp4
+ *
+ * This section can contain multiple paragraphs and code examples.
+ *
+ * @param params - Optional parameters specified in the
+ * `common.UploadFileParameters` interface.
+ * @return A promise that resolves to a `types.File` object.
+ * @throws An error if called on a Vertex AI client.
+ * @throws An error if the `mimeType` is not provided and can not be inferred,
+ * the `mimeType` can be provided in the `params.config` parameter.
+ * @throws An error occurs if a suitable upload location cannot be established.
+ *
+ * @example
+ * The following code uploads a file to Gemini API.
+ *
+ * ```ts
+ * const file = await ai.files.upload({file: 'file.txt', config: {
+ * mimeType: 'text/plain',
+ * }});
+ * console.log(file.name);
+ * ```
+ */
+ upload(params: common.UploadFileParameters): Promise;
+ private listInternal;
+ private createInternal;
+ /**
+ * Retrieves the file information from the service.
+ *
+ * @param params - The parameters for the get request
+ * @return The Promise that resolves to the types.File object requested.
+ *
+ * @example
+ * ```ts
+ * const config: GetFileParameters = {
+ * name: fileName,
+ * };
+ * file = await ai.files.get(config);
+ * console.log(file.name);
+ * ```
+ */
+ get(params: types.GetFileParameters): Promise;
+ /**
+ * Deletes a remotely stored file.
+ *
+ * @param params - The parameters for the delete request.
+ * @return The DeleteFileResponse, the response for the delete method.
+ *
+ * @example
+ * The following code deletes an example file named "files/mehozpxf877d".
+ *
+ * ```ts
+ * await ai.files.delete({name: file.name});
+ * ```
+ */
+ delete(params: types.DeleteFileParameters): Promise;
+}
+
+export declare enum FileSource {
+ SOURCE_UNSPECIFIED = "SOURCE_UNSPECIFIED",
+ UPLOADED = "UPLOADED",
+ GENERATED = "GENERATED"
+}
+
+/**
+ * Represents the size and mimeType of a file. The information is used to
+ * request the upload URL from the https://generativelanguage.googleapis.com/upload/v1beta/files endpoint.
+ * This interface defines the structure for constructing and executing HTTP
+ * requests.
+ */
+declare interface FileStat {
+ /**
+ * The size of the file in bytes.
+ */
+ size: number;
+ /**
+ * The MIME type of the file.
+ */
+ type: string | undefined;
+}
+
+export declare enum FileState {
+ STATE_UNSPECIFIED = "STATE_UNSPECIFIED",
+ PROCESSING = "PROCESSING",
+ ACTIVE = "ACTIVE",
+ FAILED = "FAILED"
+}
+
+/** Status of a File that uses a common error model. */
+export declare interface FileStatus {
+ /** A list of messages that carry the error details. There is a common set of message types for APIs to use. */
+ details?: Record[];
+ /** A list of messages that carry the error details. There is a common set of message types for APIs to use. */
+ message?: string;
+ /** The status code. 0 for OK, 1 for CANCELLED */
+ code?: number;
+}
+
+export declare enum FinishReason {
+ FINISH_REASON_UNSPECIFIED = "FINISH_REASON_UNSPECIFIED",
+ STOP = "STOP",
+ MAX_TOKENS = "MAX_TOKENS",
+ SAFETY = "SAFETY",
+ RECITATION = "RECITATION",
+ OTHER = "OTHER",
+ BLOCKLIST = "BLOCKLIST",
+ PROHIBITED_CONTENT = "PROHIBITED_CONTENT",
+ SPII = "SPII",
+ MALFORMED_FUNCTION_CALL = "MALFORMED_FUNCTION_CALL",
+ IMAGE_SAFETY = "IMAGE_SAFETY"
+}
+
+declare function formatMap(templateString: string, valueMap: Record): string;
+
+/** A function call. */
+export declare interface FunctionCall {
+ /** The unique id of the function call. If populated, the client to execute the
+ `function_call` and return the response with the matching `id`. */
+ id?: string;
+ /** Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. */
+ args?: Record;
+ /** Required. The name of the function to call. Matches [FunctionDeclaration.name]. */
+ name?: string;
+}
+
+/** Function calling config. */
+export declare interface FunctionCallingConfig {
+ /** Optional. Function calling mode. */
+ mode?: FunctionCallingConfigMode;
+ /** Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. */
+ allowedFunctionNames?: string[];
+}
+
+export declare enum FunctionCallingConfigMode {
+ MODE_UNSPECIFIED = "MODE_UNSPECIFIED",
+ AUTO = "AUTO",
+ ANY = "ANY",
+ NONE = "NONE"
+}
+
+/** Defines a function that the model can generate JSON inputs for.
+
+ The inputs are based on `OpenAPI 3.0 specifications
+ `_.
+ */
+export declare interface FunctionDeclaration {
+ /** Describes the output from the function in the OpenAPI JSON Schema
+ Object format. */
+ response?: Schema;
+ /** Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. */
+ description?: string;
+ /** Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64. */
+ name?: string;
+ /** Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 */
+ parameters?: Schema;
+}
+
+/** A function response. */
+export declare class FunctionResponse {
+ /** The id of the function call this response is for. Populated by the client
+ to match the corresponding function call `id`. */
+ id?: string;
+ /** Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. */
+ name?: string;
+ /** Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output. */
+ response?: Record;
+}
+
+/** Optional model configuration parameters.
+
+ For more information, see `Content generation parameters
+ `_.
+ */
+export declare interface GenerateContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Instructions for the model to steer it toward better performance.
+ For example, "Answer as concisely as possible" or "Don't use technical
+ terms in your response".
+ */
+ systemInstruction?: ContentUnion;
+ /** Value that controls the degree of randomness in token selection.
+ Lower temperatures are good for prompts that require a less open-ended or
+ creative response, while higher temperatures can lead to more diverse or
+ creative results.
+ */
+ temperature?: number;
+ /** Tokens are selected from the most to least probable until the sum
+ of their probabilities equals this value. Use a lower value for less
+ random responses and a higher value for more random responses.
+ */
+ topP?: number;
+ /** For each token selection step, the ``top_k`` tokens with the
+ highest probabilities are sampled. Then tokens are further filtered based
+ on ``top_p`` with the final token selected using temperature sampling. Use
+ a lower number for less random responses and a higher number for more
+ random responses.
+ */
+ topK?: number;
+ /** Number of response variations to return.
+ */
+ candidateCount?: number;
+ /** Maximum number of tokens that can be generated in the response.
+ */
+ maxOutputTokens?: number;
+ /** List of strings that tells the model to stop generating text if one
+ of the strings is encountered in the response.
+ */
+ stopSequences?: string[];
+ /** Whether to return the log probabilities of the tokens that were
+ chosen by the model at each step.
+ */
+ responseLogprobs?: boolean;
+ /** Number of top candidate tokens to return the log probabilities for
+ at each generation step.
+ */
+ logprobs?: number;
+ /** Positive values penalize tokens that already appear in the
+ generated text, increasing the probability of generating more diverse
+ content.
+ */
+ presencePenalty?: number;
+ /** Positive values penalize tokens that repeatedly appear in the
+ generated text, increasing the probability of generating more diverse
+ content.
+ */
+ frequencyPenalty?: number;
+ /** When ``seed`` is fixed to a specific number, the model makes a best
+ effort to provide the same response for repeated requests. By default, a
+ random number is used.
+ */
+ seed?: number;
+ /** Output response media type of the generated candidate text.
+ */
+ responseMimeType?: string;
+ /** Schema that the generated candidate text must adhere to.
+ */
+ responseSchema?: SchemaUnion;
+ /** Configuration for model router requests.
+ */
+ routingConfig?: GenerationConfigRoutingConfig;
+ /** Safety settings in the request to block unsafe content in the
+ response.
+ */
+ safetySettings?: SafetySetting[];
+ /** Code that enables the system to interact with external systems to
+ perform an action outside of the knowledge and scope of the model.
+ */
+ tools?: ToolListUnion;
+ /** Associates model output to a specific function call.
+ */
+ toolConfig?: ToolConfig;
+ /** Labels with user-defined metadata to break down billed charges. */
+ labels?: Record;
+ /** Resource name of a context cache that can be used in subsequent
+ requests.
+ */
+ cachedContent?: string;
+ /** The requested modalities of the response. Represents the set of
+ modalities that the model can return.
+ */
+ responseModalities?: string[];
+ /** If specified, the media resolution specified will be used.
+ */
+ mediaResolution?: MediaResolution;
+ /** The speech generation configuration.
+ */
+ speechConfig?: SpeechConfigUnion;
+ /** If enabled, audio timestamp will be included in the request to the
+ model.
+ */
+ audioTimestamp?: boolean;
+ /** The thinking features configuration.
+ */
+ thinkingConfig?: ThinkingConfig;
+}
+
+/** Config for models.generate_content parameters. */
+export declare interface GenerateContentParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** Content of the request.
+ */
+ contents: ContentListUnion;
+ /** Configuration that contains optional model parameters.
+ */
+ config?: GenerateContentConfig;
+}
+
+/** Response message for PredictionService.GenerateContent. */
+export declare class GenerateContentResponse {
+ /** Response variations returned by the model.
+ */
+ candidates?: Candidate[];
+ /** Timestamp when the request is made to the server.
+ */
+ createTime?: string;
+ /** Identifier for each response.
+ */
+ responseId?: string;
+ /** Output only. The model version used to generate the response. */
+ modelVersion?: string;
+ /** Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */
+ promptFeedback?: GenerateContentResponsePromptFeedback;
+ /** Usage metadata about the response(s). */
+ usageMetadata?: GenerateContentResponseUsageMetadata;
+ /**
+ * Returns the concatenation of all text parts from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the text from the first
+ * one will be returned.
+ * If there are non-text parts in the response, the concatenation of all text
+ * parts will be returned, and a warning will be logged.
+ * If there are thought parts in the response, the concatenation of all text
+ * parts excluding the thought parts will be returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'Why is the sky blue?',
+ * });
+ *
+ * console.debug(response.text);
+ * ```
+ */
+ get text(): string | undefined;
+ /**
+ * Returns the function calls from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the function calls from
+ * the first one will be returned.
+ * If there are no function calls in the response, undefined will be returned.
+ *
+ * @example
+ * ```ts
+ * const controlLightFunctionDeclaration: FunctionDeclaration = {
+ * name: 'controlLight',
+ * parameters: {
+ * type: Type.OBJECT,
+ * description: 'Set the brightness and color temperature of a room light.',
+ * properties: {
+ * brightness: {
+ * type: Type.NUMBER,
+ * description:
+ * 'Light level from 0 to 100. Zero is off and 100 is full brightness.',
+ * },
+ * colorTemperature: {
+ * type: Type.STRING,
+ * description:
+ * 'Color temperature of the light fixture which can be `daylight`, `cool` or `warm`.',
+ * },
+ * },
+ * required: ['brightness', 'colorTemperature'],
+ * };
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents: 'Dim the lights so the room feels cozy and warm.',
+ * config: {
+ * tools: [{functionDeclarations: [controlLightFunctionDeclaration]}],
+ * toolConfig: {
+ * functionCallingConfig: {
+ * mode: FunctionCallingConfigMode.ANY,
+ * allowedFunctionNames: ['controlLight'],
+ * },
+ * },
+ * },
+ * });
+ * console.debug(JSON.stringify(response.functionCalls));
+ * ```
+ */
+ get functionCalls(): FunctionCall[] | undefined;
+ /**
+ * Returns the first executable code from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the executable code from
+ * the first one will be returned.
+ * If there are no executable code in the response, undefined will be
+ * returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'
+ * config: {
+ * tools: [{codeExecution: {}}],
+ * },
+ * });
+ *
+ * console.debug(response.executableCode);
+ * ```
+ */
+ get executableCode(): string | undefined;
+ /**
+ * Returns the first code execution result from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the code execution result from
+ * the first one will be returned.
+ * If there are no code execution result in the response, undefined will be returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'
+ * config: {
+ * tools: [{codeExecution: {}}],
+ * },
+ * });
+ *
+ * console.debug(response.codeExecutionResult);
+ * ```
+ */
+ get codeExecutionResult(): string | undefined;
+}
+
+/** Content filter results for a prompt sent in the request. */
+export declare class GenerateContentResponsePromptFeedback {
+ /** Output only. Blocked reason. */
+ blockReason?: BlockedReason;
+ /** Output only. A readable block reason message. */
+ blockReasonMessage?: string;
+ /** Output only. Safety ratings. */
+ safetyRatings?: SafetyRating[];
+}
+
+/** Usage metadata about response(s). */
+export declare class GenerateContentResponseUsageMetadata {
+ /** Output only. List of modalities of the cached content in the request input. */
+ cacheTokensDetails?: ModalityTokenCount[];
+ /** Output only. Number of tokens in the cached part in the input (the cached content). */
+ cachedContentTokenCount?: number;
+ /** Number of tokens in the response(s). */
+ candidatesTokenCount?: number;
+ /** Output only. List of modalities that were returned in the response. */
+ candidatesTokensDetails?: ModalityTokenCount[];
+ /** Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content. */
+ promptTokenCount?: number;
+ /** Output only. List of modalities that were processed in the request input. */
+ promptTokensDetails?: ModalityTokenCount[];
+ /** Output only. Number of tokens present in thoughts output. */
+ thoughtsTokenCount?: number;
+ /** Output only. Number of tokens present in tool-use prompt(s). */
+ toolUsePromptTokenCount?: number;
+ /** Output only. List of modalities that were processed for tool-use request inputs. */
+ toolUsePromptTokensDetails?: ModalityTokenCount[];
+ /** Total token count for prompt, response candidates, and tool-use prompts (if present). */
+ totalTokenCount?: number;
+}
+
+/** An output image. */
+export declare interface GeneratedImage {
+ /** The output image data.
+ */
+ image?: Image_2;
+ /** Responsible AI filter reason if the image is filtered out of the
+ response.
+ */
+ raiFilteredReason?: string;
+ /** Safety attributes of the image. Lists of RAI categories and their
+ scores of each content.
+ */
+ safetyAttributes?: SafetyAttributes;
+ /** The rewritten prompt used for the image generation if the prompt
+ enhancer is enabled.
+ */
+ enhancedPrompt?: string;
+}
+
+/** A generated video. */
+export declare interface GeneratedVideo {
+ /** The output video */
+ video?: Video;
+}
+
+/** The config for generating an images. */
+export declare interface GenerateImagesConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Cloud Storage URI used to store the generated images.
+ */
+ outputGcsUri?: string;
+ /** Description of what to discourage in the generated images.
+ */
+ negativePrompt?: string;
+ /** Number of images to generate.
+ */
+ numberOfImages?: number;
+ /** Aspect ratio of the generated images.
+ */
+ aspectRatio?: string;
+ /** Controls how much the model adheres to the text prompt. Large
+ values increase output and prompt alignment, but may compromise image
+ quality.
+ */
+ guidanceScale?: number;
+ /** Random seed for image generation. This is not available when
+ ``add_watermark`` is set to true.
+ */
+ seed?: number;
+ /** Filter level for safety filtering.
+ */
+ safetyFilterLevel?: SafetyFilterLevel;
+ /** Allows generation of people by the model.
+ */
+ personGeneration?: PersonGeneration;
+ /** Whether to report the safety scores of each generated image and
+ the positive prompt in the response.
+ */
+ includeSafetyAttributes?: boolean;
+ /** Whether to include the Responsible AI filter reason if the image
+ is filtered out of the response.
+ */
+ includeRaiReason?: boolean;
+ /** Language of the text in the prompt.
+ */
+ language?: ImagePromptLanguage;
+ /** MIME type of the generated image.
+ */
+ outputMimeType?: string;
+ /** Compression quality of the generated image (for ``image/jpeg``
+ only).
+ */
+ outputCompressionQuality?: number;
+ /** Whether to add a watermark to the generated images.
+ */
+ addWatermark?: boolean;
+ /** Whether to use the prompt rewriting logic.
+ */
+ enhancePrompt?: boolean;
+}
+
+/** The parameters for generating images. */
+export declare interface GenerateImagesParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** Text prompt that typically describes the images to output.
+ */
+ prompt: string;
+ /** Configuration for generating images.
+ */
+ config?: GenerateImagesConfig;
+}
+
+/** The output images response. */
+export declare class GenerateImagesResponse {
+ /** List of generated images.
+ */
+ generatedImages?: GeneratedImage[];
+ /** Safety attributes of the positive prompt. Only populated if
+ ``include_safety_attributes`` is set to True.
+ */
+ positivePromptSafetyAttributes?: SafetyAttributes;
+}
+
+/** Configuration for generating videos. */
+export declare interface GenerateVideosConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Number of output videos. */
+ numberOfVideos?: number;
+ /** The gcs bucket where to save the generated videos. */
+ outputGcsUri?: string;
+ /** Frames per second for video generation. */
+ fps?: number;
+ /** Duration of the clip for video generation in seconds. */
+ durationSeconds?: number;
+ /** The RNG seed. If RNG seed is exactly same for each request with unchanged inputs, the prediction results will be consistent. Otherwise, a random RNG seed will be used each time to produce a different result. */
+ seed?: number;
+ /** The aspect ratio for the generated video. 16:9 (landscape) and 9:16 (portrait) are supported. */
+ aspectRatio?: string;
+ /** The resolution for the generated video. 1280x720, 1920x1080 are supported. */
+ resolution?: string;
+ /** Whether allow to generate person videos, and restrict to specific ages. Supported values are: dont_allow, allow_adult. */
+ personGeneration?: string;
+ /** The pubsub topic where to publish the video generation progress. */
+ pubsubTopic?: string;
+ /** Optional field in addition to the text content. Negative prompts can be explicitly stated here to help generate the video. */
+ negativePrompt?: string;
+ /** Whether to use the prompt rewriting logic. */
+ enhancePrompt?: boolean;
+}
+
+/** A video generation operation. */
+export declare interface GenerateVideosOperation {
+ /** The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. */
+ name?: string;
+ /** Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. */
+ metadata?: Record;
+ /** If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. */
+ done?: boolean;
+ /** The error result of the operation in case of failure or cancellation. */
+ error?: Record;
+ /** The normal response of the operation in case of success. */
+ response?: Record;
+ /** The generated videos. */
+ result?: GenerateVideosResponse;
+}
+
+/** Class that represents the parameters for generating an image. */
+export declare interface GenerateVideosParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** The text prompt for generating the videos. Optional for image to video use cases. */
+ prompt?: string;
+ /** The input image for generating the videos.
+ Optional if prompt is provided. */
+ image?: Image_2;
+ /** Configuration for generating videos. */
+ config?: GenerateVideosConfig;
+}
+
+/** Response with generated videos. */
+export declare class GenerateVideosResponse {
+ /** List of the generated videos */
+ generatedVideos?: GeneratedVideo[];
+ /** Returns if any videos were filtered due to RAI policies. */
+ raiMediaFilteredCount?: number;
+ /** Returns rai failure reasons if any. */
+ raiMediaFilteredReasons?: string[];
+}
+
+/** Generation config. */
+export declare interface GenerationConfig {
+ /** Optional. If enabled, audio timestamp will be included in the request to the model. */
+ audioTimestamp?: boolean;
+ /** Optional. Number of candidates to generate. */
+ candidateCount?: number;
+ /** Optional. Frequency penalties. */
+ frequencyPenalty?: number;
+ /** Optional. Logit probabilities. */
+ logprobs?: number;
+ /** Optional. The maximum number of output tokens to generate per message. */
+ maxOutputTokens?: number;
+ /** Optional. Positive penalties. */
+ presencePenalty?: number;
+ /** Optional. If true, export the logprobs results in response. */
+ responseLogprobs?: boolean;
+ /** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */
+ responseMimeType?: string;
+ /** Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response. */
+ responseSchema?: Schema;
+ /** Optional. Routing configuration. */
+ routingConfig?: GenerationConfigRoutingConfig;
+ /** Optional. Seed. */
+ seed?: number;
+ /** Optional. Stop sequences. */
+ stopSequences?: string[];
+ /** Optional. Controls the randomness of predictions. */
+ temperature?: number;
+ /** Optional. If specified, top-k sampling will be used. */
+ topK?: number;
+ /** Optional. If specified, nucleus sampling will be used. */
+ topP?: number;
+}
+
+/** The configuration for routing the request to a specific model. */
+export declare interface GenerationConfigRoutingConfig {
+ /** Automated routing. */
+ autoMode?: GenerationConfigRoutingConfigAutoRoutingMode;
+ /** Manual routing. */
+ manualMode?: GenerationConfigRoutingConfigManualRoutingMode;
+}
+
+/** When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. */
+export declare interface GenerationConfigRoutingConfigAutoRoutingMode {
+ /** The model routing preference. */
+ modelRoutingPreference?: 'UNKNOWN' | 'PRIORITIZE_QUALITY' | 'BALANCED' | 'PRIORITIZE_COST';
+}
+
+/** When manual routing is set, the specified model will be used directly. */
+export declare interface GenerationConfigRoutingConfigManualRoutingMode {
+ /** The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'. */
+ modelName?: string;
+}
+
+/** Optional parameters for caches.get method. */
+export declare interface GetCachedContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Parameters for caches.get method. */
+export declare interface GetCachedContentParameters {
+ /** The server-generated resource name of the cached content.
+ */
+ name: string;
+ /** Optional parameters for the request.
+ */
+ config?: GetCachedContentConfig;
+}
+
+/** Used to override the default configuration. */
+export declare interface GetFileConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Generates the parameters for the get method. */
+export declare interface GetFileParameters {
+ /** The name identifier for the file to retrieve. */
+ name: string;
+ /** Used to override the default configuration. */
+ config?: GetFileConfig;
+}
+
+export declare interface GetOperationConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+}
+
+/** Parameters for the GET method. */
+export declare interface GetOperationParameters {
+ /** The server-assigned name for the operation. */
+ operationName: string;
+ /** Used to override the default configuration. */
+ config?: GetOperationConfig;
+}
+
+declare function getValueByPath(data: unknown, keys: string[]): unknown;
+
+/**
+ * The Google GenAI SDK.
+ *
+ * @remarks
+ * Provides access to the GenAI features through either the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Gemini API}
+ * or the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Vertex AI API}.
+ *
+ * The {@link GoogleGenAIOptions.vertexai} value determines which of the API services to use.
+ *
+ * When using the Gemini API, a {@link GoogleGenAIOptions.apiKey} must also be set,
+ * when using Vertex AI {@link GoogleGenAIOptions.project} and {@link GoogleGenAIOptions.location} must also be set.
+ *
+ * @example
+ * Initializing the SDK for using the Gemini API:
+ * ```ts
+ * import {GoogleGenAI} from '@google/genai';
+ * const ai = new GoogleGenAI({apiKey: 'GEMINI_API_KEY'});
+ * ```
+ *
+ * @example
+ * Initializing the SDK for using the Vertex AI API:
+ * ```ts
+ * import {GoogleGenAI} from '@google/genai';
+ * const ai = new GoogleGenAI({
+ * vertexai: true,
+ * project: 'PROJECT_ID',
+ * location: 'PROJECT_LOCATION'
+ * });
+ * ```
+ *
+ */
+export declare class GoogleGenAI {
+ protected readonly apiClient: ApiClient;
+ private readonly apiKey?;
+ readonly vertexai: boolean;
+ private readonly apiVersion?;
+ readonly models: Models;
+ readonly live: Live;
+ readonly chats: Chats;
+ readonly caches: Caches;
+ readonly files: Files;
+ readonly operations: Operations;
+ constructor(options: GoogleGenAIOptions);
+}
+
+/**
+ * Google Gen AI SDK's configuration options.
+ *
+ * See {@link GoogleGenAI} for usage samples.
+ */
+export declare interface GoogleGenAIOptions {
+ /**
+ * Optional. Determines whether to use the Vertex AI or the Gemini API.
+ *
+ * @remarks
+ * When true, the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Vertex AI API} will used.
+ * When false, the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Gemini API} will be used.
+ *
+ * If unset, default SDK behavior is to use the Gemini API service.
+ */
+ vertexai?: boolean;
+ /**
+ * Optional. The Google Cloud project ID for Vertex AI clients.
+ *
+ * @remarks
+ * Only supported on Node runtimes, ignored on browser runtimes.
+ */
+ project?: string;
+ /**
+ * Optional. The Google Cloud project region for Vertex AI clients.
+ *
+ * @remarks
+ * Only supported on Node runtimes, ignored on browser runtimes.
+ *
+ */
+ location?: string;
+ /**
+ * The API Key, required for Gemini API clients.
+ *
+ * @remarks
+ * Required on browser runtimes.
+ */
+ apiKey?: string;
+ /**
+ * Optional. The API version to use.
+ *
+ * @remarks
+ * If unset, the default API version will be used.
+ */
+ apiVersion?: string;
+ /**
+ * Optional. Authentication options defined by the by google-auth-library for Vertex AI clients.
+ *
+ * @remarks
+ * @see {@link https://github.com/googleapis/google-auth-library-nodejs/blob/v9.15.0/src/auth/googleauth.ts | GoogleAuthOptions interface in google-auth-library-nodejs}.
+ *
+ * Only supported on Node runtimes, ignored on browser runtimes.
+ *
+ */
+ googleAuthOptions?: GoogleAuthOptions;
+ /**
+ * Optional. A set of customizable configuration for HTTP requests.
+ */
+ httpOptions?: HttpOptions;
+}
+
+/** Tool to support Google Search in Model. Powered by Google. */
+export declare interface GoogleSearch {
+}
+
+/** Tool to retrieve public web data for grounding, powered by Google. */
+export declare interface GoogleSearchRetrieval {
+ /** Specifies the dynamic retrieval configuration for the given source. */
+ dynamicRetrievalConfig?: DynamicRetrievalConfig;
+}
+
+/** Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp */
+export declare interface GoogleTypeDate {
+ /** Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */
+ day?: number;
+ /** Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */
+ month?: number;
+ /** Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */
+ year?: number;
+}
+
+/** Grounding chunk. */
+export declare interface GroundingChunk {
+ /** Grounding chunk from context retrieved by the retrieval tools. */
+ retrievedContext?: GroundingChunkRetrievedContext;
+ /** Grounding chunk from the web. */
+ web?: GroundingChunkWeb;
+}
+
+/** Chunk from context retrieved by the retrieval tools. */
+export declare interface GroundingChunkRetrievedContext {
+ /** Text of the attribution. */
+ text?: string;
+ /** Title of the attribution. */
+ title?: string;
+ /** URI reference of the attribution. */
+ uri?: string;
+}
+
+/** Chunk from the web. */
+export declare interface GroundingChunkWeb {
+ /** Title of the chunk. */
+ title?: string;
+ /** URI reference of the chunk. */
+ uri?: string;
+}
+
+/** Metadata returned to client when grounding is enabled. */
+export declare interface GroundingMetadata {
+ /** List of supporting references retrieved from specified grounding source. */
+ groundingChunks?: GroundingChunk[];
+ /** Optional. List of grounding support. */
+ groundingSupports?: GroundingSupport[];
+ /** Optional. Output only. Retrieval metadata. */
+ retrievalMetadata?: RetrievalMetadata;
+ /** Optional. Queries executed by the retrieval tools. */
+ retrievalQueries?: string[];
+ /** Optional. Google search entry for the following-up web searches. */
+ searchEntryPoint?: SearchEntryPoint;
+ /** Optional. Web search queries for the following-up web search. */
+ webSearchQueries?: string[];
+}
+
+/** Grounding support. */
+export declare interface GroundingSupport {
+ /** Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices. */
+ confidenceScores?: number[];
+ /** A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim. */
+ groundingChunkIndices?: number[];
+ /** Segment of the content this support belongs to. */
+ segment?: Segment;
+}
+
+export declare enum HarmBlockMethod {
+ HARM_BLOCK_METHOD_UNSPECIFIED = "HARM_BLOCK_METHOD_UNSPECIFIED",
+ SEVERITY = "SEVERITY",
+ PROBABILITY = "PROBABILITY"
+}
+
+export declare enum HarmBlockThreshold {
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED = "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
+ BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE",
+ BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE",
+ BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH",
+ BLOCK_NONE = "BLOCK_NONE",
+ OFF = "OFF"
+}
+
+export declare enum HarmCategory {
+ HARM_CATEGORY_UNSPECIFIED = "HARM_CATEGORY_UNSPECIFIED",
+ HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH",
+ HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT",
+ HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT",
+ HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ HARM_CATEGORY_CIVIC_INTEGRITY = "HARM_CATEGORY_CIVIC_INTEGRITY"
+}
+
+export declare enum HarmProbability {
+ HARM_PROBABILITY_UNSPECIFIED = "HARM_PROBABILITY_UNSPECIFIED",
+ NEGLIGIBLE = "NEGLIGIBLE",
+ LOW = "LOW",
+ MEDIUM = "MEDIUM",
+ HIGH = "HIGH"
+}
+
+export declare enum HarmSeverity {
+ HARM_SEVERITY_UNSPECIFIED = "HARM_SEVERITY_UNSPECIFIED",
+ HARM_SEVERITY_NEGLIGIBLE = "HARM_SEVERITY_NEGLIGIBLE",
+ HARM_SEVERITY_LOW = "HARM_SEVERITY_LOW",
+ HARM_SEVERITY_MEDIUM = "HARM_SEVERITY_MEDIUM",
+ HARM_SEVERITY_HIGH = "HARM_SEVERITY_HIGH"
+}
+
+/** HTTP options to be used in each of the requests. */
+export declare interface HttpOptions {
+ /** The base URL for the AI platform service endpoint. */
+ baseUrl?: string;
+ /** Specifies the version of the API to use. */
+ apiVersion?: string;
+ /** Additional HTTP headers to be sent with the request. */
+ headers?: Record;
+ /** Timeout for the request in milliseconds. */
+ timeout?: number;
+}
+
+/**
+ * Represents the necessary information to send a request to an API endpoint.
+ * This interface defines the structure for constructing and executing HTTP
+ * requests.
+ */
+declare interface HttpRequest {
+ /**
+ * URL path from the modules, this path is appended to the base API URL to
+ * form the complete request URL.
+ *
+ * If you wish to set full URL, use httpOptions.baseUrl instead. Example to
+ * set full URL in the request:
+ *
+ * const request: HttpRequest = {
+ * path: '',
+ * httpOptions: {
+ * baseUrl: 'https://',
+ * apiVersion: '',
+ * },
+ * httpMethod: 'GET',
+ * };
+ *
+ * The result URL will be: https://
+ *
+ */
+ path: string;
+ /**
+ * Optional query parameters to be appended to the request URL.
+ */
+ queryParams?: Record;
+ /**
+ * Optional request body in json string or Blob format, GET request doesn't
+ * need a request body.
+ */
+ body?: string | Blob;
+ /**
+ * The HTTP method to be used for the request.
+ */
+ httpMethod: 'GET' | 'POST' | 'PATCH' | 'DELETE';
+ /**
+ * Optional set of customizable configuration for HTTP requests.
+ */
+ httpOptions?: HttpOptions;
+}
+
+/** A wrapper class for the http response. */
+export declare class HttpResponse {
+ /** Used to retain the processed HTTP headers in the response. */
+ headers?: Record;
+ /**
+ * The original http response.
+ */
+ responseInternal: Response;
+ constructor(response: Response);
+ json(): Promise;
+}
+
+/** An image. */
+declare interface Image_2 {
+ /** The Cloud Storage URI of the image. ``Image`` can contain a value
+ for this field or the ``image_bytes`` field but not both.
+ */
+ gcsUri?: string;
+ /** The image bytes data. ``Image`` can contain a value for this field
+ or the ``gcs_uri`` field but not both.
+ */
+ imageBytes?: string;
+ /** The MIME type of the image. */
+ mimeType?: string;
+}
+export { Image_2 as Image }
+
+export declare enum ImagePromptLanguage {
+ auto = "auto",
+ en = "en",
+ ja = "ja",
+ ko = "ko",
+ hi = "hi"
+}
+
+export declare enum Language {
+ LANGUAGE_UNSPECIFIED = "LANGUAGE_UNSPECIFIED",
+ PYTHON = "PYTHON"
+}
+
+/** Config for caches.list method. */
+export declare interface ListCachedContentsConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ pageSize?: number;
+ pageToken?: string;
+}
+
+/** Parameters for caches.list method. */
+export declare interface ListCachedContentsParameters {
+ /** Configuration that contains optional parameters.
+ */
+ config?: ListCachedContentsConfig;
+}
+
+export declare class ListCachedContentsResponse {
+ nextPageToken?: string;
+ /** List of cached contents.
+ */
+ cachedContents?: CachedContent[];
+}
+
+/** Used to override the default configuration. */
+export declare interface ListFilesConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ pageSize?: number;
+ pageToken?: string;
+}
+
+/** Generates the parameters for the list method. */
+export declare interface ListFilesParameters {
+ /** Used to override the default configuration. */
+ config?: ListFilesConfig;
+}
+
+/** Response for the list files method. */
+export declare class ListFilesResponse {
+ /** A token to retrieve next page of results. */
+ nextPageToken?: string;
+ /** The list of files. */
+ files?: File_2[];
+}
+
+/**
+ Live class encapsulates the configuration for live interaction with the
+ Generative Language API. It embeds ApiClient for general API settings.
+
+ @experimental
+ */
+export declare class Live {
+ private readonly apiClient;
+ private readonly auth;
+ private readonly webSocketFactory;
+ constructor(apiClient: ApiClient, auth: Auth, webSocketFactory: WebSocketFactory);
+ /**
+ Establishes a connection to the specified model with the given
+ configuration and returns a Session object representing that connection.
+
+ @experimental
+
+ @remarks
+ If using the Gemini API, Live is currently only supported behind API
+ version `v1alpha`. Ensure that the API version is set to `v1alpha` when
+ initializing the SDK if relying on the Gemini API.
+
+ @param params - The parameters for establishing a connection to the model.
+ @return A live session.
+
+ @example
+ ```ts
+ const session = await ai.live.connect({
+ model: 'gemini-2.0-flash-exp',
+ config: {
+ responseModalities: [Modality.AUDIO],
+ },
+ callbacks: {
+ onopen: () => {
+ console.log('Connected to the socket.');
+ },
+ onmessage: (e: MessageEvent) => {
+ console.log('Received message from the server: %s\n', debug(e.data));
+ },
+ onerror: (e: ErrorEvent) => {
+ console.log('Error occurred: %s\n', debug(e.error));
+ },
+ onclose: (e: CloseEvent) => {
+ console.log('Connection closed.');
+ },
+ },
+ });
+ ```
+ */
+ connect(params: types.LiveConnectParameters): Promise;
+}
+
+/** Callbacks for the live API. */
+export declare interface LiveCallbacks {
+ onopen?: (() => void) | null;
+ onmessage: (e: LiveServerMessage) => void;
+ onerror?: ((e: ErrorEvent) => void) | null;
+ onclose?: ((e: CloseEvent) => void) | null;
+}
+
+/** Incremental update of the current conversation delivered from the client.
+
+ All the content here will unconditionally be appended to the conversation
+ history and used as part of the prompt to the model to generate content.
+
+ A message here will interrupt any current model generation.
+ */
+export declare interface LiveClientContent {
+ /** The content appended to the current conversation with the model.
+
+ For single-turn queries, this is a single instance. For multi-turn
+ queries, this is a repeated field that contains conversation history and
+ latest request.
+ */
+ turns?: Content[];
+ /** If true, indicates that the server content generation should start with
+ the currently accumulated prompt. Otherwise, the server will await
+ additional messages before starting generation. */
+ turnComplete?: boolean;
+}
+
+/** Messages sent by the client in the API call. */
+export declare interface LiveClientMessage {
+ /** Message to be sent by the system when connecting to the API. SDK users should not send this message. */
+ setup?: LiveClientSetup;
+ /** Incremental update of the current conversation delivered from the client. */
+ clientContent?: LiveClientContent;
+ /** User input that is sent in real time. */
+ realtimeInput?: LiveClientRealtimeInput;
+ /** Response to a `ToolCallMessage` received from the server. */
+ toolResponse?: LiveClientToolResponse;
+}
+
+/** User input that is sent in real time.
+
+ This is different from `ClientContentUpdate` in a few ways:
+
+ - Can be sent continuously without interruption to model generation.
+ - If there is a need to mix data interleaved across the
+ `ClientContentUpdate` and the `RealtimeUpdate`, server attempts to
+ optimize for best response, but there are no guarantees.
+ - End of turn is not explicitly specified, but is rather derived from user
+ activity (for example, end of speech).
+ - Even before the end of turn, the data is processed incrementally
+ to optimize for a fast start of the response from the model.
+ - Is always assumed to be the user's input (cannot be used to populate
+ conversation history).
+ */
+export declare interface LiveClientRealtimeInput {
+ /** Inlined bytes data for media input. */
+ mediaChunks?: Blob_2[];
+}
+
+/** Message contains configuration that will apply for the duration of the streaming session. */
+export declare interface LiveClientSetup {
+ /**
+ The fully qualified name of the publisher model or tuned model endpoint to
+ use.
+ */
+ model?: string;
+ /** The generation configuration for the session.
+
+ The following fields are supported:
+ - `response_logprobs`
+ - `response_mime_type`
+ - `logprobs`
+ - `response_schema`
+ - `stop_sequence`
+ - `routing_config`
+ - `audio_timestamp`
+ */
+ generationConfig?: GenerationConfig;
+ /** The user provided system instructions for the model.
+ Note: only text should be used in parts and content in each part will be
+ in a separate paragraph. */
+ systemInstruction?: Content;
+ /** A list of `Tools` the model may use to generate the next response.
+
+ A `Tool` is a piece of code that enables the system to interact with
+ external systems to perform an action, or set of actions, outside of
+ knowledge and scope of the model. */
+ tools?: ToolListUnion;
+}
+
+/** Client generated response to a `ToolCall` received from the server.
+
+ Individual `FunctionResponse` objects are matched to the respective
+ `FunctionCall` objects by the `id` field.
+
+ Note that in the unary and server-streaming GenerateContent APIs function
+ calling happens by exchanging the `Content` parts, while in the bidi
+ GenerateContent APIs function calling happens over this dedicated set of
+ messages.
+ */
+export declare class LiveClientToolResponse {
+ /** The response to the function calls. */
+ functionResponses?: FunctionResponse[];
+}
+
+/** Session config for the API connection. */
+export declare interface LiveConnectConfig {
+ /** The generation configuration for the session. */
+ generationConfig?: GenerationConfig;
+ /** The requested modalities of the response. Represents the set of
+ modalities that the model can return. Defaults to AUDIO if not specified.
+ */
+ responseModalities?: Modality[];
+ /** The speech generation configuration.
+ */
+ speechConfig?: SpeechConfig;
+ /** The user provided system instructions for the model.
+ Note: only text should be used in parts and content in each part will be
+ in a separate paragraph. */
+ systemInstruction?: Content;
+ /** A list of `Tools` the model may use to generate the next response.
+
+ A `Tool` is a piece of code that enables the system to interact with
+ external systems to perform an action, or set of actions, outside of
+ knowledge and scope of the model. */
+ tools?: ToolListUnion;
+}
+
+/** Parameters for connecting to the live API. */
+export declare interface LiveConnectParameters {
+ /** ID of the model to use. For a list of models, see `Google models
+ `_. */
+ model: string;
+ /** callbacks */
+ callbacks: LiveCallbacks;
+ /** Optional configuration parameters for the request.
+ */
+ config?: LiveConnectConfig;
+}
+
+/** Parameters for sending client content to the live API. */
+export declare interface LiveSendClientContentParameters {
+ /** Client content to send to the session. */
+ turns?: ContentListUnion;
+ /** If true, indicates that the server content generation should start with
+ the currently accumulated prompt. Otherwise, the server will await
+ additional messages before starting generation. */
+ turnComplete?: boolean;
+}
+
+/** Parameters for sending realtime input to the live API. */
+export declare interface LiveSendRealtimeInputParameters {
+ /** Realtime input to send to the session. */
+ media: Blob_2;
+}
+
+/** Parameters for sending tool responses to the live API. */
+export declare class LiveSendToolResponseParameters {
+ /** Tool responses to send to the session. */
+ functionResponses: FunctionResponse[] | FunctionResponse;
+}
+
+/** Incremental server update generated by the model in response to client messages.
+
+ Content is generated as quickly as possible, and not in real time. Clients
+ may choose to buffer and play it out in real time.
+ */
+export declare interface LiveServerContent {
+ /** The content that the model has generated as part of the current conversation with the user. */
+ modelTurn?: Content;
+ /** If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn. */
+ turnComplete?: boolean;
+ /** If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue. */
+ interrupted?: boolean;
+}
+
+/** Response message for API call. */
+export declare interface LiveServerMessage {
+ /** Sent in response to a `LiveClientSetup` message from the client. */
+ setupComplete?: LiveServerSetupComplete;
+ /** Content generated by the model in response to client messages. */
+ serverContent?: LiveServerContent;
+ /** Request for the client to execute the `function_calls` and return the responses with the matching `id`s. */
+ toolCall?: LiveServerToolCall;
+ /** Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled. */
+ toolCallCancellation?: LiveServerToolCallCancellation;
+}
+
+/** Sent in response to a `LiveGenerateContentSetup` message from the client. */
+export declare interface LiveServerSetupComplete {
+}
+
+/** Request for the client to execute the `function_calls` and return the responses with the matching `id`s. */
+export declare interface LiveServerToolCall {
+ /** The function call to be executed. */
+ functionCalls?: FunctionCall[];
+}
+
+/** Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.
+
+ If there were side-effects to those tool calls, clients may attempt to undo
+ the tool calls. This message occurs only in cases where the clients interrupt
+ server turns.
+ */
+export declare interface LiveServerToolCallCancellation {
+ /** The ids of the tool calls to be cancelled. */
+ ids?: string[];
+}
+
+/** Logprobs Result */
+export declare interface LogprobsResult {
+ /** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */
+ chosenCandidates?: LogprobsResultCandidate[];
+ /** Length = total number of decoding steps. */
+ topCandidates?: LogprobsResultTopCandidates[];
+}
+
+/** Candidate for the logprobs token and score. */
+export declare interface LogprobsResultCandidate {
+ /** The candidate's log probability. */
+ logProbability?: number;
+ /** The candidate's token string value. */
+ token?: string;
+ /** The candidate's token id value. */
+ tokenId?: number;
+}
+
+/** Candidates with top log probabilities at each decoding step. */
+export declare interface LogprobsResultTopCandidates {
+ /** Sorted by log probability in descending order. */
+ candidates?: LogprobsResultCandidate[];
+}
+
+/** Configuration for a Mask reference image. */
+export declare interface MaskReferenceConfig {
+ /** Prompts the model to generate a mask instead of you needing to
+ provide one (unless MASK_MODE_USER_PROVIDED is used). */
+ maskMode?: MaskReferenceMode;
+ /** A list of up to 5 class ids to use for semantic segmentation.
+ Automatically creates an image mask based on specific objects. */
+ segmentationClasses?: number[];
+ /** Dilation percentage of the mask provided.
+ Float between 0 and 1. */
+ maskDilation?: number;
+}
+
+/** A mask reference image.
+
+ This encapsulates either a mask image provided by the user and configs for
+ the user provided mask, or only config parameters for the model to generate
+ a mask.
+
+ A mask image is an image whose non-zero values indicate where to edit the base
+ image. If the user provides a mask image, the mask must be in the same
+ dimensions as the raw image.
+ */
+export declare interface MaskReferenceImage {
+ /** The reference image for the editing operation. */
+ referenceImage?: Image_2;
+ /** The id of the reference image. */
+ referenceId?: number;
+ /** The type of the reference image. Only set by the SDK. */
+ referenceType?: string;
+ /** Configuration for the mask reference image. */
+ config?: MaskReferenceConfig;
+}
+
+export declare enum MaskReferenceMode {
+ MASK_MODE_DEFAULT = "MASK_MODE_DEFAULT",
+ MASK_MODE_USER_PROVIDED = "MASK_MODE_USER_PROVIDED",
+ MASK_MODE_BACKGROUND = "MASK_MODE_BACKGROUND",
+ MASK_MODE_FOREGROUND = "MASK_MODE_FOREGROUND",
+ MASK_MODE_SEMANTIC = "MASK_MODE_SEMANTIC"
+}
+
+export declare enum MediaModality {
+ MODALITY_UNSPECIFIED = "MODALITY_UNSPECIFIED",
+ TEXT = "TEXT",
+ IMAGE = "IMAGE",
+ VIDEO = "VIDEO",
+ AUDIO = "AUDIO",
+ DOCUMENT = "DOCUMENT"
+}
+
+export declare enum MediaResolution {
+ MEDIA_RESOLUTION_UNSPECIFIED = "MEDIA_RESOLUTION_UNSPECIFIED",
+ MEDIA_RESOLUTION_LOW = "MEDIA_RESOLUTION_LOW",
+ MEDIA_RESOLUTION_MEDIUM = "MEDIA_RESOLUTION_MEDIUM",
+ MEDIA_RESOLUTION_HIGH = "MEDIA_RESOLUTION_HIGH"
+}
+
+export declare enum Modality {
+ MODALITY_UNSPECIFIED = "MODALITY_UNSPECIFIED",
+ TEXT = "TEXT",
+ IMAGE = "IMAGE",
+ AUDIO = "AUDIO"
+}
+
+/** Represents token counting info for a single modality. */
+export declare interface ModalityTokenCount {
+ /** The modality associated with this token count. */
+ modality?: MediaModality;
+ /** Number of tokens. */
+ tokenCount?: number;
+}
+
+export declare enum Mode {
+ MODE_UNSPECIFIED = "MODE_UNSPECIFIED",
+ MODE_DYNAMIC = "MODE_DYNAMIC"
+}
+
+export declare class Models extends BaseModule {
+ private readonly apiClient;
+ constructor(apiClient: ApiClient);
+ /**
+ * Makes an API request to generate content with a given model.
+ *
+ * For the `model` parameter, supported formats for Vertex AI API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The full resource name starts with 'projects/', for example:
+ * 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
+ * - The partial resource name with 'publishers/', for example:
+ * 'publishers/google/models/gemini-2.0-flash' or
+ * 'publishers/meta/models/llama-3.1-405b-instruct-maas'
+ * - `/` separated publisher and model name, for example:
+ * 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
+ *
+ * For the `model` parameter, supported formats for Gemini API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The model name starts with 'models/', for example:
+ * 'models/gemini-2.0-flash'
+ * - For tuned models, the model name starts with 'tunedModels/',
+ * for example:
+ * 'tunedModels/1234567890123456789'
+ *
+ * Some models support multimodal input and output.
+ *
+ * @param params - The parameters for generating content.
+ * @return The response from generating content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents: 'why is the sky blue?',
+ * config: {
+ * candidateCount: 2,
+ * }
+ * });
+ * console.log(response);
+ * ```
+ */
+ generateContent: (params: types.GenerateContentParameters) => Promise;
+ /**
+ * Makes an API request to generate content with a given model and yields the
+ * response in chunks.
+ *
+ * For the `model` parameter, supported formats for Vertex AI API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The full resource name starts with 'projects/', for example:
+ * 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
+ * - The partial resource name with 'publishers/', for example:
+ * 'publishers/google/models/gemini-2.0-flash' or
+ * 'publishers/meta/models/llama-3.1-405b-instruct-maas'
+ * - `/` separated publisher and model name, for example:
+ * 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
+ *
+ * For the `model` parameter, supported formats for Gemini API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The model name starts with 'models/', for example:
+ * 'models/gemini-2.0-flash'
+ * - For tuned models, the model name starts with 'tunedModels/',
+ * for example:
+ * 'tunedModels/1234567890123456789'
+ *
+ * Some models support multimodal input and output.
+ *
+ * @param params - The parameters for generating content with streaming response.
+ * @return The response from generating content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContentStream({
+ * model: 'gemini-2.0-flash',
+ * contents: 'why is the sky blue?',
+ * config: {
+ * maxOutputTokens: 200,
+ * }
+ * });
+ * for await (const chunk of response) {
+ * console.log(chunk);
+ * }
+ * ```
+ */
+ generateContentStream: (params: types.GenerateContentParameters) => Promise>;
+ /**
+ * Generates an image based on a text description and configuration.
+ *
+ * @param model - The model to use.
+ * @param prompt - A text description of the image to generate.
+ * @param [config] - The config for image generation.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await client.models.generateImages({
+ * model: 'imagen-3.0-generate-002',
+ * prompt: 'Robot holding a red skateboard',
+ * config: {
+ * numberOfImages: 1,
+ * includeRaiReason: true,
+ * },
+ * });
+ * console.log(response?.generatedImages?.[0]?.image?.imageBytes);
+ * ```
+ */
+ generateImages: (params: types.GenerateImagesParameters) => Promise;
+ private generateContentInternal;
+ private generateContentStreamInternal;
+ /**
+ * Calculates embeddings for the given contents. Only text is supported.
+ *
+ * @param params - The parameters for embedding contents.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.embedContent({
+ * model: 'text-embedding-004',
+ * contents: [
+ * 'What is your name?',
+ * 'What is your favorite color?',
+ * ],
+ * config: {
+ * outputDimensionality: 64,
+ * },
+ * });
+ * console.log(response);
+ * ```
+ */
+ embedContent(params: types.EmbedContentParameters): Promise;
+ /**
+ * Generates an image based on a text description and configuration.
+ *
+ * @param params - The parameters for generating images.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateImages({
+ * model: 'imagen-3.0-generate-002',
+ * prompt: 'Robot holding a red skateboard',
+ * config: {
+ * numberOfImages: 1,
+ * includeRaiReason: true,
+ * },
+ * });
+ * console.log(response?.generatedImages?.[0]?.image?.imageBytes);
+ * ```
+ */
+ private generateImagesInternal;
+ /**
+ * Counts the number of tokens in the given contents. Multimodal input is
+ * supported for Gemini models.
+ *
+ * @param params - The parameters for counting tokens.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.countTokens({
+ * model: 'gemini-2.0-flash',
+ * contents: 'The quick brown fox jumps over the lazy dog.'
+ * });
+ * console.log(response);
+ * ```
+ */
+ countTokens(params: types.CountTokensParameters): Promise;
+ /**
+ * Given a list of contents, returns a corresponding TokensInfo containing
+ * the list of tokens and list of token ids.
+ *
+ * This method is not supported by the Gemini Developer API.
+ *
+ * @param params - The parameters for computing tokens.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.computeTokens({
+ * model: 'gemini-2.0-flash',
+ * contents: 'What is your name?'
+ * });
+ * console.log(response);
+ * ```
+ */
+ computeTokens(params: types.ComputeTokensParameters): Promise;
+ /**
+ * Generates videos based on a text description and configuration.
+ *
+ * @param params - The parameters for generating videos.
+ * @return A Promise which allows you to track the progress and eventually retrieve the generated videos using the operations.get method.
+ *
+ * @example
+ * ```ts
+ * const operation = await ai.models.generateVideos({
+ * model: 'veo-2.0-generate-001',
+ * prompt: 'A neon hologram of a cat driving at top speed',
+ * config: {
+ * numberOfVideos: 1
+ * });
+ *
+ * while (!operation.done) {
+ * await new Promise(resolve => setTimeout(resolve, 10000));
+ * operation = await ai.operations.get({operation: operation});
+ * }
+ *
+ * console.log(operation.result?.generatedVideos?.[0]?.video?.uri);
+ * ```
+ */
+ generateVideos(params: types.GenerateVideosParameters): Promise;
+}
+
+/** Parameters for the get method of the operations module. */
+export declare interface OperationGetParameters {
+ /** The operation to be retrieved. */
+ operation: GenerateVideosOperation;
+ /** Used to override the default configuration. */
+ config?: GetOperationConfig;
+}
+
+export declare class Operations extends BaseModule {
+ private readonly apiClient;
+ constructor(apiClient: ApiClient);
+ /**
+ * Gets the status of a long-running operation.
+ *
+ * @param operation The Operation object returned by a previous API call.
+ * @return The updated Operation object, with the latest status or result.
+ */
+ get(parameters: types.OperationGetParameters): Promise;
+ private getVideosOperationInternal;
+ private fetchPredictVideosOperationInternal;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+export declare enum Outcome {
+ OUTCOME_UNSPECIFIED = "OUTCOME_UNSPECIFIED",
+ OUTCOME_OK = "OUTCOME_OK",
+ OUTCOME_FAILED = "OUTCOME_FAILED",
+ OUTCOME_DEADLINE_EXCEEDED = "OUTCOME_DEADLINE_EXCEEDED"
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+/**
+ * Pagers for the GenAI List APIs.
+ */
+export declare enum PagedItem {
+ PAGED_ITEM_BATCH_JOBS = "batchJobs",
+ PAGED_ITEM_MODELS = "models",
+ PAGED_ITEM_TUNING_JOBS = "tuningJobs",
+ PAGED_ITEM_FILES = "files",
+ PAGED_ITEM_CACHED_CONTENTS = "cachedContents"
+}
+
+declare interface PagedItemConfig {
+ config?: {
+ pageToken?: string;
+ pageSize?: number;
+ };
+}
+
+declare interface PagedItemResponse {
+ nextPageToken?: string;
+ batchJobs?: T[];
+ models?: T[];
+ tuningJobs?: T[];
+ files?: T[];
+ cachedContents?: T[];
+}
+
+/**
+ * Pager class for iterating through paginated results.
+ */
+export declare class Pager implements AsyncIterable {
+ private nameInternal;
+ private pageInternal;
+ private paramsInternal;
+ private pageInternalSize;
+ protected requestInternal: (params: PagedItemConfig) => Promise>;
+ protected idxInternal: number;
+ constructor(name: PagedItem, request: (params: PagedItemConfig) => Promise>, response: PagedItemResponse, params: PagedItemConfig);
+ private init;
+ private initNextPage;
+ /**
+ * Returns the current page, which is a list of items.
+ *
+ * @remarks
+ * The first page is retrieved when the pager is created. The returned list of
+ * items could be a subset of the entire list.
+ */
+ get page(): T[];
+ /**
+ * Returns the type of paged item (for example, ``batch_jobs``).
+ */
+ get name(): PagedItem;
+ /**
+ * Returns the length of the page fetched each time by this pager.
+ *
+ * @remarks
+ * The number of items in the page is less than or equal to the page length.
+ */
+ get pageSize(): number;
+ /**
+ * Returns the parameters when making the API request for the next page.
+ *
+ * @remarks
+ * Parameters contain a set of optional configs that can be
+ * used to customize the API request. For example, the `pageToken` parameter
+ * contains the token to request the next page.
+ */
+ get params(): PagedItemConfig;
+ /**
+ * Returns the total number of items in the current page.
+ */
+ get pageLength(): number;
+ /**
+ * Returns the item at the given index.
+ */
+ getItem(index: number): T;
+ /**
+ * Returns an async iterator that support iterating through all items
+ * retrieved from the API.
+ *
+ * @remarks
+ * The iterator will automatically fetch the next page if there are more items
+ * to fetch from the API.
+ *
+ * @example
+ *
+ * ```ts
+ * const pager = await ai.files.list({config: {pageSize: 10}});
+ * for await (const file of pager) {
+ * console.log(file.name);
+ * }
+ * ```
+ */
+ [Symbol.asyncIterator](): AsyncIterator;
+ /**
+ * Fetches the next page of items. This makes a new API request.
+ *
+ * @throws {Error} If there are no more pages to fetch.
+ *
+ * @example
+ *
+ * ```ts
+ * const pager = await ai.files.list({config: {pageSize: 10}});
+ * let page = pager.page;
+ * while (true) {
+ * for (const file of page) {
+ * console.log(file.name);
+ * }
+ * if (!pager.hasNextPage()) {
+ * break;
+ * }
+ * page = await pager.nextPage();
+ * }
+ * ```
+ */
+ nextPage(): Promise;
+ /**
+ * Returns true if there are more pages to fetch from the API.
+ */
+ hasNextPage(): boolean;
+}
+
+/** A datatype containing media content.
+
+ Exactly one field within a Part should be set, representing the specific type
+ of content being conveyed. Using multiple fields within the same `Part`
+ instance is considered invalid.
+ */
+export declare interface Part {
+ /** Metadata for a given video. */
+ videoMetadata?: VideoMetadata;
+ /** Indicates if the part is thought from the model. */
+ thought?: boolean;
+ /** Optional. Result of executing the [ExecutableCode]. */
+ codeExecutionResult?: CodeExecutionResult;
+ /** Optional. Code generated by the model that is meant to be executed. */
+ executableCode?: ExecutableCode;
+ /** Optional. URI based data. */
+ fileData?: FileData;
+ /** Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. */
+ functionCall?: FunctionCall;
+ /** Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */
+ functionResponse?: FunctionResponse;
+ /** Optional. Inlined bytes data. */
+ inlineData?: Blob_2;
+ /** Optional. Text part (can be code). */
+ text?: string;
+}
+
+export declare type PartListUnion = PartUnion[] | PartUnion;
+
+export declare type PartUnion = Part | string;
+
+export declare enum PersonGeneration {
+ DONT_ALLOW = "DONT_ALLOW",
+ ALLOW_ADULT = "ALLOW_ADULT",
+ ALLOW_ALL = "ALLOW_ALL"
+}
+
+/** The configuration for the prebuilt speaker to use. */
+export declare interface PrebuiltVoiceConfig {
+ /** The name of the prebuilt voice to use.
+ */
+ voiceName?: string;
+}
+
+/** A raw reference image.
+
+ A raw reference image represents the base image to edit, provided by the user.
+ It can optionally be provided in addition to a mask reference image or
+ a style reference image.
+ */
+export declare interface RawReferenceImage {
+ /** The reference image for the editing operation. */
+ referenceImage?: Image_2;
+ /** The id of the reference image. */
+ referenceId?: number;
+ /** The type of the reference image. Only set by the SDK. */
+ referenceType?: string;
+}
+
+/** Represents a recorded session. */
+export declare interface ReplayFile {
+ replayId?: string;
+ interactions?: ReplayInteraction[];
+}
+
+/** Represents a single interaction, request and response in a replay. */
+export declare interface ReplayInteraction {
+ request?: ReplayRequest;
+ response?: ReplayResponse;
+}
+
+/** Represents a single request in a replay. */
+export declare interface ReplayRequest {
+ method?: string;
+ url?: string;
+ headers?: Record;
+ bodySegments?: Record[];
+}
+
+/** Represents a single response in a replay. */
+export declare class ReplayResponse {
+ statusCode?: number;
+ headers?: Record;
+ bodySegments?: Record[];
+ sdkResponseSegments?: Record[];
+}
+
+/** Defines a retrieval tool that model can call to access external knowledge. */
+export declare interface Retrieval {
+ /** Optional. Deprecated. This option is no longer supported. */
+ disableAttribution?: boolean;
+ /** Set to use data source powered by Vertex AI Search. */
+ vertexAiSearch?: VertexAISearch;
+ /** Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. */
+ vertexRagStore?: VertexRagStore;
+}
+
+/** Metadata related to retrieval in the grounding flow. */
+export declare interface RetrievalMetadata {
+ /** Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search. */
+ googleSearchDynamicRetrievalScore?: number;
+}
+
+/** Safety attributes of a GeneratedImage or the user-provided prompt. */
+export declare interface SafetyAttributes {
+ /** List of RAI categories.
+ */
+ categories?: string[];
+ /** List of scores of each categories.
+ */
+ scores?: number[];
+ /** Internal use only.
+ */
+ contentType?: string;
+}
+
+export declare enum SafetyFilterLevel {
+ BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE",
+ BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE",
+ BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH",
+ BLOCK_NONE = "BLOCK_NONE"
+}
+
+/** Safety rating corresponding to the generated content. */
+export declare interface SafetyRating {
+ /** Output only. Indicates whether the content was filtered out because of this rating. */
+ blocked?: boolean;
+ /** Output only. Harm category. */
+ category?: HarmCategory;
+ /** Output only. Harm probability levels in the content. */
+ probability?: HarmProbability;
+ /** Output only. Harm probability score. */
+ probabilityScore?: number;
+ /** Output only. Harm severity levels in the content. */
+ severity?: HarmSeverity;
+ /** Output only. Harm severity score. */
+ severityScore?: number;
+}
+
+/** Safety settings. */
+export declare interface SafetySetting {
+ /** Determines if the harm block method uses probability or probability
+ and severity scores. */
+ method?: HarmBlockMethod;
+ /** Required. Harm category. */
+ category?: HarmCategory;
+ /** Required. The harm block threshold. */
+ threshold?: HarmBlockThreshold;
+}
+
+/** Schema that defines the format of input and output data.
+
+ Represents a select subset of an OpenAPI 3.0 schema object.
+ */
+export declare interface Schema {
+ /** Optional. Example of the object. Will only populated when the object is the root. */
+ example?: unknown;
+ /** Optional. Pattern of the Type.STRING to restrict a string to a regular expression. */
+ pattern?: string;
+ /** Optional. Default value of the data. */
+ default?: unknown;
+ /** Optional. Maximum length of the Type.STRING */
+ maxLength?: string;
+ /** Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING */
+ minLength?: string;
+ /** Optional. Minimum number of the properties for Type.OBJECT. */
+ minProperties?: string;
+ /** Optional. Maximum number of the properties for Type.OBJECT. */
+ maxProperties?: string;
+ /** Optional. The value should be validated against any (one or more) of the subschemas in the list. */
+ anyOf?: Schema[];
+ /** Optional. The description of the data. */
+ description?: string;
+ /** Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} */
+ enum?: string[];
+ /** Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc */
+ format?: string;
+ /** Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. */
+ items?: Schema;
+ /** Optional. Maximum number of the elements for Type.ARRAY. */
+ maxItems?: string;
+ /** Optional. Maximum value of the Type.INTEGER and Type.NUMBER */
+ maximum?: number;
+ /** Optional. Minimum number of the elements for Type.ARRAY. */
+ minItems?: string;
+ /** Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER */
+ minimum?: number;
+ /** Optional. Indicates if the value may be null. */
+ nullable?: boolean;
+ /** Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. */
+ properties?: Record;
+ /** Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties. */
+ propertyOrdering?: string[];
+ /** Optional. Required properties of Type.OBJECT. */
+ required?: string[];
+ /** Optional. The title of the Schema. */
+ title?: string;
+ /** Optional. The type of the data. */
+ type?: Type;
+}
+
+export declare type SchemaUnion = Schema;
+
+/** Google search entry point. */
+export declare interface SearchEntryPoint {
+ /** Optional. Web content snippet that can be embedded in a web page or an app webview. */
+ renderedContent?: string;
+ /** Optional. Base64 encoded JSON representing array of tuple. */
+ sdkBlob?: string;
+}
+
+/** Segment of the content. */
+export declare interface Segment {
+ /** Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. */
+ endIndex?: number;
+ /** Output only. The index of a Part object within its parent Content object. */
+ partIndex?: number;
+ /** Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero. */
+ startIndex?: number;
+ /** Output only. The text corresponding to the segment from the response. */
+ text?: string;
+}
+
+/** Parameters for sending a message within a chat session.
+
+ These parameters are used with the `chat.sendMessage()` method.
+ */
+export declare interface SendMessageParameters {
+ /** The message to send to the model.
+
+ The SDK will combine all parts into a single 'user' content to send to
+ the model.
+ */
+ message: PartListUnion;
+ /** Config for this specific request.
+
+ Please note that the per-request config does not change the chat level
+ config, nor inherit from it. If you intend to use some values from the
+ chat's default config, you must explicitly copy them into this per-request
+ config.
+ */
+ config?: GenerateContentConfig;
+}
+
+/**
+ Represents a connection to the API.
+
+ @experimental
+ */
+export declare class Session {
+ readonly conn: WebSocket_2;
+ private readonly apiClient;
+ constructor(conn: WebSocket_2, apiClient: ApiClient);
+ private tLiveClientContent;
+ private tLiveClientRealtimeInput;
+ private tLiveClienttToolResponse;
+ /**
+ Send a message over the established connection.
+
+ @param params - Contains two **optional** properties, `turns` and
+ `turnComplete`.
+
+ - `turns` will be converted to a `Content[]`
+ - `turnComplete: true` [default] indicates that you are done sending
+ content and expect a response. If `turnComplete: false`, the server
+ will wait for additional messages before starting generation.
+
+ @experimental
+
+ @remarks
+ There are two ways to send messages to the live API:
+ `sendClientContent` and `sendRealtimeInput`.
+
+ `sendClientContent` messages are added to the model context **in order**.
+ Having a conversation using `sendClientContent` messages is roughly
+ equivalent to using the `Chat.sendMessageStream`, except that the state of
+ the `chat` history is stored on the API server instead of locally.
+
+ Because of `sendClientContent`'s order guarantee, the model cannot respons
+ as quickly to `sendClientContent` messages as to `sendRealtimeInput`
+ messages. This makes the biggest difference when sending objects that have
+ significant preprocessing time (typically images).
+
+ The `sendClientContent` message sends a `Content[]`
+ which has more options than the `Blob` sent by `sendRealtimeInput`.
+
+ So the main use-cases for `sendClientContent` over `sendRealtimeInput` are:
+
+ - Sending anything that can't be represented as a `Blob` (text,
+ `sendClientContent({turns="Hello?"}`)).
+ - Managing turns when not using audio input and voice activity detection.
+ (`sendClientContent({turnComplete:true})` or the short form
+ `sendClientContent()`)
+ - Prefilling a conversation context
+ ```
+ sendClientContent({
+ turns: [
+ Content({role:user, parts:...}),
+ Content({role:user, parts:...}),
+ ...
+ ]
+ })
+ ```
+ @experimental
+ */
+ sendClientContent(params: types.LiveSendClientContentParameters): void;
+ /**
+ Send a realtime message over the established connection.
+
+ @param params - Contains one property, `media`.
+
+ - `media` will be converted to a `Blob`
+
+ @experimental
+
+ @remarks
+ Use `sendRealtimeInput` for realtime audio chunks and video frames (images).
+
+ With `sendRealtimeInput` the api will respond to audio automatically
+ based on voice activity detection (VAD).
+
+ `sendRealtimeInput` is optimized for responsivness at the expense of
+ deterministic ordering guarantees. Audio and video tokens are to the
+ context when they become available.
+
+ Note: The Call signature expects a `Blob` object, but only a subset
+ of audio and image mimetypes are allowed.
+ */
+ sendRealtimeInput(params: types.LiveSendRealtimeInputParameters): void;
+ /**
+ Send a function response message over the established connection.
+
+ @param params - Contains property `functionResponses`.
+
+ - `functionResponses` will be converted to a `functionResponses[]`
+
+ @remarks
+ Use `sendFunctionResponse` to reply to `LiveServerToolCall` from the server.
+
+ Use {@link types.LiveConnectConfig#tools} to configure the callable functions.
+
+ @experimental
+ */
+ sendToolResponse(params: types.LiveSendToolResponseParameters): void;
+ /**
+ Terminates the WebSocket connection.
+
+ @experimental
+
+ @example
+ ```ts
+ const session = await ai.live.connect({
+ model: 'gemini-2.0-flash-exp',
+ config: {
+ responseModalities: [Modality.AUDIO],
+ }
+ });
+
+ session.close();
+ ```
+ */
+ close(): void;
+}
+
+declare function setValueByPath(data: Record, keys: string[], value: unknown): void;
+
+/** The speech generation configuration. */
+export declare interface SpeechConfig {
+ /** The configuration for the speaker to use.
+ */
+ voiceConfig?: VoiceConfig;
+}
+
+export declare type SpeechConfigUnion = SpeechConfig | string;
+
+export declare enum State {
+ STATE_UNSPECIFIED = "STATE_UNSPECIFIED",
+ ACTIVE = "ACTIVE",
+ ERROR = "ERROR"
+}
+
+/** Configuration for a Style reference image. */
+export declare interface StyleReferenceConfig {
+ /** A text description of the style to use for the generated image. */
+ styleDescription?: string;
+}
+
+/** A style reference image.
+
+ This encapsulates a style reference image provided by the user, and
+ additionally optional config parameters for the style reference image.
+
+ A raw reference image can also be provided as a destination for the style to
+ be applied to.
+ */
+export declare interface StyleReferenceImage {
+ /** The reference image for the editing operation. */
+ referenceImage?: Image_2;
+ /** The id of the reference image. */
+ referenceId?: number;
+ /** The type of the reference image. Only set by the SDK. */
+ referenceType?: string;
+ /** Configuration for the style reference image. */
+ config?: StyleReferenceConfig;
+}
+
+/** Configuration for a Subject reference image. */
+export declare interface SubjectReferenceConfig {
+ /** The subject type of a subject reference image. */
+ subjectType?: SubjectReferenceType;
+ /** Subject description for the image. */
+ subjectDescription?: string;
+}
+
+/** A subject reference image.
+
+ This encapsulates a subject reference image provided by the user, and
+ additionally optional config parameters for the subject reference image.
+
+ A raw reference image can also be provided as a destination for the subject to
+ be applied to.
+ */
+export declare interface SubjectReferenceImage {
+ /** The reference image for the editing operation. */
+ referenceImage?: Image_2;
+ /** The id of the reference image. */
+ referenceId?: number;
+ /** The type of the reference image. Only set by the SDK. */
+ referenceType?: string;
+ /** Configuration for the subject reference image. */
+ config?: SubjectReferenceConfig;
+}
+
+export declare enum SubjectReferenceType {
+ SUBJECT_TYPE_DEFAULT = "SUBJECT_TYPE_DEFAULT",
+ SUBJECT_TYPE_PERSON = "SUBJECT_TYPE_PERSON",
+ SUBJECT_TYPE_ANIMAL = "SUBJECT_TYPE_ANIMAL",
+ SUBJECT_TYPE_PRODUCT = "SUBJECT_TYPE_PRODUCT"
+}
+
+export declare interface TestTableFile {
+ comment?: string;
+ testMethod?: string;
+ parameterNames?: string[];
+ testTable?: TestTableItem[];
+}
+
+export declare interface TestTableItem {
+ /** The name of the test. This is used to derive the replay id. */
+ name?: string;
+ /** The parameters to the test. Use pydantic models. */
+ parameters?: Record;
+ /** Expects an exception for MLDev matching the string. */
+ exceptionIfMldev?: string;
+ /** Expects an exception for Vertex matching the string. */
+ exceptionIfVertex?: string;
+ /** Use if you don't want to use the default replay id which is derived from the test name. */
+ overrideReplayId?: string;
+ /** True if the parameters contain an unsupported union type. This test will be skipped for languages that do not support the union type. */
+ hasUnion?: boolean;
+ /** When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource. */
+ skipInApiMode?: string;
+ /** Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic. */
+ ignoreKeys?: string[];
+}
+
+/** The thinking features configuration. */
+export declare interface ThinkingConfig {
+ /** Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.
+ */
+ includeThoughts?: boolean;
+}
+
+/** Tokens info with a list of tokens and the corresponding list of token ids. */
+export declare interface TokensInfo {
+ /** Optional. Optional fields for the role from the corresponding Content. */
+ role?: string;
+ /** A list of token ids from the input. */
+ tokenIds?: string[];
+ /** A list of tokens from the input. */
+ tokens?: string[];
+}
+
+/** Tool details of a tool that the model may use to generate a response. */
+export declare interface Tool {
+ /** List of function declarations that the tool supports. */
+ functionDeclarations?: FunctionDeclaration[];
+ /** Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */
+ retrieval?: Retrieval;
+ /** Optional. Google Search tool type. Specialized retrieval tool
+ that is powered by Google Search. */
+ googleSearch?: GoogleSearch;
+ /** Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. */
+ googleSearchRetrieval?: GoogleSearchRetrieval;
+ /** Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. */
+ codeExecution?: ToolCodeExecution;
+}
+
+/** Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. */
+export declare interface ToolCodeExecution {
+}
+
+/** Tool config.
+
+ This config is shared for all tools provided in the request.
+ */
+export declare interface ToolConfig {
+ /** Optional. Function calling config. */
+ functionCallingConfig?: FunctionCallingConfig;
+}
+
+export declare type ToolListUnion = Tool[];
+
+export declare enum Type {
+ TYPE_UNSPECIFIED = "TYPE_UNSPECIFIED",
+ STRING = "STRING",
+ NUMBER = "NUMBER",
+ INTEGER = "INTEGER",
+ BOOLEAN = "BOOLEAN",
+ ARRAY = "ARRAY",
+ OBJECT = "OBJECT"
+}
+
+declare namespace types {
+ export {
+ createPartFromUri,
+ createPartFromText,
+ createPartFromFunctionCall,
+ createPartFromFunctionResponse,
+ createPartFromBase64,
+ createPartFromCodeExecutionResult,
+ createPartFromExecutableCode,
+ createUserContent,
+ createModelContent,
+ Outcome,
+ Language,
+ Type,
+ HarmCategory,
+ HarmBlockMethod,
+ HarmBlockThreshold,
+ Mode,
+ FinishReason,
+ HarmProbability,
+ HarmSeverity,
+ BlockedReason,
+ Modality,
+ State,
+ DynamicRetrievalConfigMode,
+ FunctionCallingConfigMode,
+ MediaResolution,
+ SafetyFilterLevel,
+ PersonGeneration,
+ ImagePromptLanguage,
+ FileState,
+ FileSource,
+ MaskReferenceMode,
+ ControlReferenceType,
+ SubjectReferenceType,
+ MediaModality,
+ VideoMetadata,
+ CodeExecutionResult,
+ ExecutableCode,
+ FileData,
+ FunctionCall,
+ FunctionResponse,
+ Blob_2 as Blob,
+ Part,
+ Content,
+ HttpOptions,
+ Schema,
+ SafetySetting,
+ FunctionDeclaration,
+ GoogleSearch,
+ DynamicRetrievalConfig,
+ GoogleSearchRetrieval,
+ VertexAISearch,
+ VertexRagStoreRagResource,
+ VertexRagStore,
+ Retrieval,
+ ToolCodeExecution,
+ Tool,
+ FunctionCallingConfig,
+ ToolConfig,
+ PrebuiltVoiceConfig,
+ VoiceConfig,
+ SpeechConfig,
+ ThinkingConfig,
+ GenerationConfigRoutingConfigAutoRoutingMode,
+ GenerationConfigRoutingConfigManualRoutingMode,
+ GenerationConfigRoutingConfig,
+ GenerateContentConfig,
+ GenerateContentParameters,
+ GoogleTypeDate,
+ Citation,
+ CitationMetadata,
+ GroundingChunkRetrievedContext,
+ GroundingChunkWeb,
+ GroundingChunk,
+ Segment,
+ GroundingSupport,
+ RetrievalMetadata,
+ SearchEntryPoint,
+ GroundingMetadata,
+ LogprobsResultCandidate,
+ LogprobsResultTopCandidates,
+ LogprobsResult,
+ SafetyRating,
+ Candidate,
+ GenerateContentResponsePromptFeedback,
+ ModalityTokenCount,
+ GenerateContentResponseUsageMetadata,
+ GenerateContentResponse,
+ EmbedContentConfig,
+ EmbedContentParameters,
+ ContentEmbeddingStatistics,
+ ContentEmbedding,
+ EmbedContentMetadata,
+ EmbedContentResponse,
+ GenerateImagesConfig,
+ GenerateImagesParameters,
+ Image_2 as Image,
+ SafetyAttributes,
+ GeneratedImage,
+ GenerateImagesResponse,
+ GenerationConfig,
+ CountTokensConfig,
+ CountTokensParameters,
+ CountTokensResponse,
+ ComputeTokensConfig,
+ ComputeTokensParameters,
+ TokensInfo,
+ ComputeTokensResponse,
+ GenerateVideosConfig,
+ GenerateVideosParameters,
+ Video,
+ GeneratedVideo,
+ GenerateVideosResponse,
+ GenerateVideosOperation,
+ CreateCachedContentConfig,
+ CreateCachedContentParameters,
+ CachedContentUsageMetadata,
+ CachedContent,
+ GetCachedContentConfig,
+ GetCachedContentParameters,
+ DeleteCachedContentConfig,
+ DeleteCachedContentParameters,
+ DeleteCachedContentResponse,
+ UpdateCachedContentConfig,
+ UpdateCachedContentParameters,
+ ListCachedContentsConfig,
+ ListCachedContentsParameters,
+ ListCachedContentsResponse,
+ ListFilesConfig,
+ ListFilesParameters,
+ FileStatus,
+ File_2 as File,
+ ListFilesResponse,
+ CreateFileConfig,
+ CreateFileParameters,
+ HttpResponse,
+ LiveCallbacks,
+ CreateFileResponse,
+ GetFileConfig,
+ GetFileParameters,
+ DeleteFileConfig,
+ DeleteFileParameters,
+ DeleteFileResponse,
+ GetOperationConfig,
+ GetOperationParameters,
+ FetchPredictOperationConfig,
+ FetchPredictOperationParameters,
+ TestTableItem,
+ TestTableFile,
+ ReplayRequest,
+ ReplayResponse,
+ ReplayInteraction,
+ ReplayFile,
+ UploadFileConfig,
+ DownloadFileConfig,
+ UpscaleImageConfig,
+ UpscaleImageParameters,
+ RawReferenceImage,
+ MaskReferenceConfig,
+ MaskReferenceImage,
+ ControlReferenceConfig,
+ ControlReferenceImage,
+ StyleReferenceConfig,
+ StyleReferenceImage,
+ SubjectReferenceConfig,
+ SubjectReferenceImage,
+ LiveServerSetupComplete,
+ LiveServerContent,
+ LiveServerToolCall,
+ LiveServerToolCallCancellation,
+ LiveServerMessage,
+ LiveClientSetup,
+ LiveClientContent,
+ LiveClientRealtimeInput,
+ LiveClientToolResponse,
+ LiveClientMessage,
+ LiveConnectConfig,
+ LiveConnectParameters,
+ CreateChatParameters,
+ SendMessageParameters,
+ LiveSendClientContentParameters,
+ LiveSendRealtimeInputParameters,
+ LiveSendToolResponseParameters,
+ OperationGetParameters,
+ PartUnion,
+ PartListUnion,
+ ContentUnion,
+ ContentListUnion,
+ SchemaUnion,
+ SpeechConfigUnion,
+ ToolListUnion
+ }
+}
+
+/** Optional parameters for caches.update method. */
+export declare interface UpdateCachedContentConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s". */
+ ttl?: string;
+ /** Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z. */
+ expireTime?: string;
+}
+
+export declare interface UpdateCachedContentParameters {
+ /** The server-generated resource name of the cached content.
+ */
+ name: string;
+ /** Configuration that contains optional parameters.
+ */
+ config?: UpdateCachedContentConfig;
+}
+
+declare interface Uploader {
+ /**
+ * Uploads a file to the given upload url.
+ *
+ * @param file The file to upload. file is in string type or a Blob.
+ * @param uploadUrl The upload URL as a string is where the file will be
+ * uploaded to. The uploadUrl must be a url that was returned by the
+ * https://generativelanguage.googleapis.com/upload/v1beta/files endpoint
+ * @param apiClient The ApiClient to use for uploading.
+ * @return A Promise that resolves to types.File.
+ */
+ upload(file: string | Blob, uploadUrl: string, apiClient: ApiClient): Promise;
+ /**
+ * Returns the file's mimeType and the size of a given file. If the file is a
+ * string path, the file type is determined by the file extension. If the
+ * file's type cannot be determined, the type will be set to undefined.
+ *
+ * @param file The file to get the stat for. Can be a string path or a Blob.
+ * @return A Promise that resolves to the file stat of the given file.
+ */
+ stat(file: string | Blob): Promise;
+}
+
+/** Used to override the default configuration. */
+export declare interface UploadFileConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated. */
+ name?: string;
+ /** mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension. */
+ mimeType?: string;
+ /** Optional display name of the file. */
+ displayName?: string;
+}
+
+/** Parameters for the upload file method. */
+declare interface UploadFileParameters {
+ /** The string path to the file to be uploaded or a Blob object. */
+ file: string | Blob;
+ /** Configuration that contains optional parameters. */
+ config?: UploadFileConfig;
+}
+
+/** Configuration for upscaling an image.
+
+ For more information on this configuration, refer to
+ the `Imagen API reference documentation
+ `_.
+ */
+export declare interface UpscaleImageConfig {
+ /** Used to override HTTP request options. */
+ httpOptions?: HttpOptions;
+ /** Whether to include a reason for filtered-out images in the
+ response. */
+ includeRaiReason?: boolean;
+ /** The image format that the output should be saved as. */
+ outputMimeType?: string;
+ /** The level of compression if the ``output_mime_type`` is
+ ``image/jpeg``. */
+ outputCompressionQuality?: number;
+}
+
+/** User-facing config UpscaleImageParameters. */
+export declare interface UpscaleImageParameters {
+ /** The model to use. */
+ model: string;
+ /** The input image to upscale. */
+ image: Image_2;
+ /** The factor to upscale the image (x2 or x4). */
+ upscaleFactor: string;
+ /** Configuration for upscaling. */
+ config?: UpscaleImageConfig;
+}
+
+/** Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder */
+export declare interface VertexAISearch {
+ /** Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` */
+ datastore?: string;
+ /** Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` */
+ engine?: string;
+}
+
+/** Retrieve from Vertex RAG Store for grounding. */
+export declare interface VertexRagStore {
+ /** Optional. Deprecated. Please use rag_resources instead. */
+ ragCorpora?: string[];
+ /** Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. */
+ ragResources?: VertexRagStoreRagResource[];
+ /** Optional. Number of top k results to return from the selected corpora. */
+ similarityTopK?: number;
+ /** Optional. Only return results with vector distance smaller than the threshold. */
+ vectorDistanceThreshold?: number;
+}
+
+/** The definition of the Rag resource. */
+export declare interface VertexRagStoreRagResource {
+ /** Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */
+ ragCorpus?: string;
+ /** Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. */
+ ragFileIds?: string[];
+}
+
+/** A generated video. */
+export declare interface Video {
+ /** Path to another storage. */
+ uri?: string;
+ /** Video bytes. */
+ videoBytes?: string;
+ /** Video encoding, for example "video/mp4". */
+ mimeType?: string;
+}
+
+/** Metadata describes the input video content. */
+export declare interface VideoMetadata {
+ /** Optional. The end offset of the video. */
+ endOffset?: string;
+ /** Optional. The start offset of the video. */
+ startOffset?: string;
+}
+
+/** The configuration for the voice to use. */
+export declare interface VoiceConfig {
+ /** The configuration for the speaker to use.
+ */
+ prebuiltVoiceConfig?: PrebuiltVoiceConfig;
+}
+
+declare interface WebSocket_2 {
+ /**
+ * Connects the socket to the server.
+ */
+ connect(): void;
+ /**
+ * Sends a message to the server.
+ */
+ send(message: string): void;
+ /**
+ * Closes the socket connection.
+ */
+ close(): void;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+declare interface WebSocketCallbacks {
+ onopen: () => void;
+ onerror: (e: any) => void;
+ onmessage: (e: any) => void;
+ onclose: (e: any) => void;
+}
+
+declare interface WebSocketFactory {
+ /**
+ * Returns a new WebSocket instance.
+ */
+ create(url: string, headers: Record, callbacks: WebSocketCallbacks): WebSocket_2;
+}
+
+export { }
diff --git a/node_modules/@google/genai/dist/index.js b/node_modules/@google/genai/dist/index.js
new file mode 100644
index 0000000..7f6e773
--- /dev/null
+++ b/node_modules/@google/genai/dist/index.js
@@ -0,0 +1,7890 @@
+'use strict';
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+class BaseModule {
+}
+function formatMap(templateString, valueMap) {
+ // Use a regular expression to find all placeholders in the template string
+ const regex = /\{([^}]+)\}/g;
+ // Replace each placeholder with its corresponding value from the valueMap
+ return templateString.replace(regex, (match, key) => {
+ if (Object.prototype.hasOwnProperty.call(valueMap, key)) {
+ const value = valueMap[key];
+ // Convert the value to a string if it's not a string already
+ return value !== undefined && value !== null ? String(value) : '';
+ }
+ else {
+ // Handle missing keys
+ throw new Error(`Key '${key}' not found in valueMap.`);
+ }
+ });
+}
+function setValueByPath(data, keys, value) {
+ for (let i = 0; i < keys.length - 1; i++) {
+ const key = keys[i];
+ if (key.endsWith('[]')) {
+ const keyName = key.slice(0, -2);
+ if (!(keyName in data)) {
+ if (Array.isArray(value)) {
+ data[keyName] = Array.from({ length: value.length }, () => ({}));
+ }
+ else {
+ throw new Error(`Value must be a list given an array path ${key}`);
+ }
+ }
+ if (Array.isArray(data[keyName])) {
+ const arrayData = data[keyName];
+ if (Array.isArray(value)) {
+ for (let j = 0; j < arrayData.length; j++) {
+ const entry = arrayData[j];
+ setValueByPath(entry, keys.slice(i + 1), value[j]);
+ }
+ }
+ else {
+ for (const d of arrayData) {
+ setValueByPath(d, keys.slice(i + 1), value);
+ }
+ }
+ }
+ return;
+ }
+ else if (key.endsWith('[0]')) {
+ const keyName = key.slice(0, -3);
+ if (!(keyName in data)) {
+ data[keyName] = [{}];
+ }
+ const arrayData = data[keyName];
+ setValueByPath(arrayData[0], keys.slice(i + 1), value);
+ return;
+ }
+ if (!data[key] || typeof data[key] !== 'object') {
+ data[key] = {};
+ }
+ data = data[key];
+ }
+ const keyToSet = keys[keys.length - 1];
+ const existingData = data[keyToSet];
+ if (existingData !== undefined) {
+ if (!value ||
+ (typeof value === 'object' && Object.keys(value).length === 0)) {
+ return;
+ }
+ if (value === existingData) {
+ return;
+ }
+ if (typeof existingData === 'object' &&
+ typeof value === 'object' &&
+ existingData !== null &&
+ value !== null) {
+ Object.assign(existingData, value);
+ }
+ else {
+ throw new Error(`Cannot set value for an existing key. Key: ${keyToSet}`);
+ }
+ }
+ else {
+ data[keyToSet] = value;
+ }
+}
+function getValueByPath(data, keys) {
+ try {
+ if (keys.length === 1 && keys[0] === '_self') {
+ return data;
+ }
+ for (let i = 0; i < keys.length; i++) {
+ if (typeof data !== 'object' || data === null) {
+ return undefined;
+ }
+ const key = keys[i];
+ if (key.endsWith('[]')) {
+ const keyName = key.slice(0, -2);
+ if (keyName in data) {
+ const arrayData = data[keyName];
+ if (!Array.isArray(arrayData)) {
+ return undefined;
+ }
+ return arrayData.map((d) => getValueByPath(d, keys.slice(i + 1)));
+ }
+ else {
+ return undefined;
+ }
+ }
+ else {
+ data = data[key];
+ }
+ }
+ return data;
+ }
+ catch (error) {
+ if (error instanceof TypeError) {
+ return undefined;
+ }
+ throw error;
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function tModel(apiClient, model) {
+ if (!model || typeof model !== 'string') {
+ throw new Error('model is required and must be a string');
+ }
+ if (apiClient.isVertexAI()) {
+ if (model.startsWith('publishers/') ||
+ model.startsWith('projects/') ||
+ model.startsWith('models/')) {
+ return model;
+ }
+ else if (model.indexOf('/') >= 0) {
+ const parts = model.split('/', 2);
+ return `publishers/${parts[0]}/models/${parts[1]}`;
+ }
+ else {
+ return `publishers/google/models/${model}`;
+ }
+ }
+ else {
+ if (model.startsWith('models/') || model.startsWith('tunedModels/')) {
+ return model;
+ }
+ else {
+ return `models/${model}`;
+ }
+ }
+}
+function tCachesModel(apiClient, model) {
+ const transformedModel = tModel(apiClient, model);
+ if (!transformedModel) {
+ return '';
+ }
+ if (transformedModel.startsWith('publishers/') && apiClient.isVertexAI()) {
+ // vertex caches only support model name start with projects.
+ return `projects/${apiClient.getProject()}/locations/${apiClient.getLocation()}/${transformedModel}`;
+ }
+ else if (transformedModel.startsWith('models/') && apiClient.isVertexAI()) {
+ return `projects/${apiClient.getProject()}/locations/${apiClient.getLocation()}/publishers/google/${transformedModel}`;
+ }
+ else {
+ return transformedModel;
+ }
+}
+function tPart(apiClient, origin) {
+ if (origin === null || origin === undefined) {
+ throw new Error('PartUnion is required');
+ }
+ if (typeof origin === 'object') {
+ return origin;
+ }
+ if (typeof origin === 'string') {
+ return { text: origin };
+ }
+ throw new Error(`Unsupported part type: ${typeof origin}`);
+}
+function tParts(apiClient, origin) {
+ if (origin === null ||
+ origin === undefined ||
+ (Array.isArray(origin) && origin.length === 0)) {
+ throw new Error('PartListUnion is required');
+ }
+ if (Array.isArray(origin)) {
+ return origin.map((item) => tPart(apiClient, item));
+ }
+ return [tPart(apiClient, origin)];
+}
+function _isContent(origin) {
+ return (origin !== null &&
+ origin !== undefined &&
+ typeof origin === 'object' &&
+ 'parts' in origin &&
+ Array.isArray(origin.parts));
+}
+function _isFunctionCallPart(origin) {
+ return (origin !== null &&
+ origin !== undefined &&
+ typeof origin === 'object' &&
+ 'functionCall' in origin);
+}
+function _isUserPart(origin) {
+ if (origin === null || origin === undefined) {
+ return false;
+ }
+ if (_isFunctionCallPart(origin)) {
+ return false;
+ }
+ return true;
+}
+function _areUserParts(origin) {
+ if (origin === null ||
+ origin === undefined ||
+ (Array.isArray(origin) && origin.length === 0)) {
+ return false;
+ }
+ return origin.every(_isUserPart);
+}
+function tContent(apiClient, origin) {
+ if (origin === null || origin === undefined) {
+ throw new Error('ContentUnion is required');
+ }
+ if (_isContent(origin)) {
+ // @ts-expect-error: _isContent is a utility function that checks if the
+ // origin is a Content.
+ return origin;
+ }
+ if (_isUserPart(origin)) {
+ return {
+ role: 'user',
+ parts: tParts(apiClient, origin),
+ };
+ }
+ else {
+ return {
+ role: 'model',
+ parts: tParts(apiClient, origin),
+ };
+ }
+}
+function tContentsForEmbed(apiClient, origin) {
+ if (!origin) {
+ return [];
+ }
+ if (apiClient.isVertexAI() && Array.isArray(origin)) {
+ return origin.flatMap((item) => {
+ const content = tContent(apiClient, item);
+ if (content.parts &&
+ content.parts.length > 0 &&
+ content.parts[0].text !== undefined) {
+ return [content.parts[0].text];
+ }
+ return [];
+ });
+ }
+ else if (apiClient.isVertexAI()) {
+ const content = tContent(apiClient, origin);
+ if (content.parts &&
+ content.parts.length > 0 &&
+ content.parts[0].text !== undefined) {
+ return [content.parts[0].text];
+ }
+ return [];
+ }
+ if (Array.isArray(origin)) {
+ return origin.map((item) => tContent(apiClient, item));
+ }
+ return [tContent(apiClient, origin)];
+}
+function _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts) {
+ if (accumulatedParts.length === 0) {
+ return;
+ }
+ if (_areUserParts(accumulatedParts)) {
+ result.push({
+ role: 'user',
+ parts: tParts(apiClient, accumulatedParts),
+ });
+ }
+ else {
+ result.push({
+ role: 'model',
+ parts: tParts(apiClient, accumulatedParts),
+ });
+ }
+ accumulatedParts.length = 0; // clear the array inplace
+}
+function _handleCurrentPart(apiClient, result, accumulatedParts, currentPart) {
+ if (_isUserPart(currentPart) === _areUserParts(accumulatedParts)) {
+ accumulatedParts.push(currentPart);
+ }
+ else {
+ _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);
+ accumulatedParts.length = 0;
+ accumulatedParts.push(currentPart);
+ }
+}
+function tContents(apiClient, origin) {
+ if (origin === null ||
+ origin === undefined ||
+ (Array.isArray(origin) && origin.length === 0)) {
+ throw new Error('contents are required');
+ }
+ if (!Array.isArray(origin)) {
+ return [tContent(apiClient, origin)];
+ }
+ const result = [];
+ const accumulatedParts = [];
+ for (const content of origin) {
+ if (_isContent(content)) {
+ _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);
+ // @ts-expect-error: content is a Content here
+ result.push(content);
+ }
+ else if (typeof content === 'string' ||
+ (typeof content === 'object' && !Array.isArray(content))) {
+ // @ts-expect-error: content is a part here
+ _handleCurrentPart(apiClient, result, accumulatedParts, content);
+ }
+ else if (Array.isArray(content)) {
+ // if there're consecutive user parts before the list,
+ // convert to UserContent and append to result
+ _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);
+ result.push({
+ role: 'user',
+ parts: tParts(apiClient, content),
+ });
+ }
+ else {
+ throw new Error(`Unsupported content type: ${typeof content}`);
+ }
+ }
+ _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);
+ return result;
+}
+function processSchema(apiClient, schema) {
+ if (!apiClient.isVertexAI()) {
+ if ('default' in schema) {
+ throw new Error('Default value is not supported in the response schema for the Gemini API.');
+ }
+ }
+ if ('anyOf' in schema) {
+ if (schema['anyOf'] !== undefined) {
+ for (const subSchema of schema['anyOf']) {
+ processSchema(apiClient, subSchema);
+ }
+ }
+ }
+ if ('items' in schema) {
+ if (schema['items'] !== undefined) {
+ processSchema(apiClient, schema['items']);
+ }
+ }
+ if ('properties' in schema) {
+ if (schema['properties'] !== undefined) {
+ for (const subSchema of Object.values(schema['properties'])) {
+ processSchema(apiClient, subSchema);
+ }
+ }
+ }
+}
+function tSchema(apiClient, schema) {
+ processSchema(apiClient, schema);
+ return schema;
+}
+function tSpeechConfig(apiClient, speechConfig) {
+ if (typeof speechConfig === 'object' && 'voiceConfig' in speechConfig) {
+ return speechConfig;
+ }
+ else if (typeof speechConfig === 'string') {
+ return {
+ voiceConfig: {
+ prebuiltVoiceConfig: {
+ voiceName: speechConfig,
+ },
+ },
+ };
+ }
+ else {
+ throw new Error(`Unsupported speechConfig type: ${typeof speechConfig}`);
+ }
+}
+function tTool(apiClient, tool) {
+ return tool;
+}
+function tTools(apiClient, tool) {
+ if (!Array.isArray(tool)) {
+ throw new Error('tool is required and must be an array of Tools');
+ }
+ return tool;
+}
+/**
+ * Prepends resource name with project, location, resource_prefix if needed.
+ *
+ * @param client The API client.
+ * @param resourceName The resource name.
+ * @param resourcePrefix The resource prefix.
+ * @param splitsAfterPrefix The number of splits after the prefix.
+ * @returns The completed resource name.
+ *
+ * Examples:
+ *
+ * ```
+ * resource_name = '123'
+ * resource_prefix = 'cachedContents'
+ * splits_after_prefix = 1
+ * client.vertexai = True
+ * client.project = 'bar'
+ * client.location = 'us-west1'
+ * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)
+ * returns: 'projects/bar/locations/us-west1/cachedContents/123'
+ * ```
+ *
+ * ```
+ * resource_name = 'projects/foo/locations/us-central1/cachedContents/123'
+ * resource_prefix = 'cachedContents'
+ * splits_after_prefix = 1
+ * client.vertexai = True
+ * client.project = 'bar'
+ * client.location = 'us-west1'
+ * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)
+ * returns: 'projects/foo/locations/us-central1/cachedContents/123'
+ * ```
+ *
+ * ```
+ * resource_name = '123'
+ * resource_prefix = 'cachedContents'
+ * splits_after_prefix = 1
+ * client.vertexai = False
+ * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)
+ * returns 'cachedContents/123'
+ * ```
+ *
+ * ```
+ * resource_name = 'some/wrong/cachedContents/resource/name/123'
+ * resource_prefix = 'cachedContents'
+ * splits_after_prefix = 1
+ * client.vertexai = False
+ * # client.vertexai = True
+ * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)
+ * -> 'some/wrong/resource/name/123'
+ * ```
+ */
+function resourceName(client, resourceName, resourcePrefix, splitsAfterPrefix = 1) {
+ const shouldAppendPrefix = !resourceName.startsWith(`${resourcePrefix}/`) &&
+ resourceName.split('/').length === splitsAfterPrefix;
+ if (client.isVertexAI()) {
+ if (resourceName.startsWith('projects/')) {
+ return resourceName;
+ }
+ else if (resourceName.startsWith('locations/')) {
+ return `projects/${client.getProject()}/${resourceName}`;
+ }
+ else if (resourceName.startsWith(`${resourcePrefix}/`)) {
+ return `projects/${client.getProject()}/locations/${client.getLocation()}/${resourceName}`;
+ }
+ else if (shouldAppendPrefix) {
+ return `projects/${client.getProject()}/locations/${client.getLocation()}/${resourcePrefix}/${resourceName}`;
+ }
+ else {
+ return resourceName;
+ }
+ }
+ if (shouldAppendPrefix) {
+ return `${resourcePrefix}/${resourceName}`;
+ }
+ return resourceName;
+}
+function tCachedContentName(apiClient, name) {
+ if (typeof name !== 'string') {
+ throw new Error('name must be a string');
+ }
+ return resourceName(apiClient, name, 'cachedContents');
+}
+function tBytes(apiClient, fromImageBytes) {
+ if (typeof fromImageBytes !== 'string') {
+ throw new Error('fromImageBytes must be a string');
+ }
+ // TODO(b/389133914): Remove dummy bytes converter.
+ return fromImageBytes;
+}
+function tFileName(apiClient, fromName) {
+ if (typeof fromName !== 'string') {
+ throw new Error('fromName must be a string');
+ }
+ // Remove the files/ prefx for MLdev urls to get the actual name of the file.
+ if (fromName.startsWith('files/')) {
+ return fromName.split('files/')[1];
+ }
+ return fromName;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function partToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['videoMetadata']) !== undefined) {
+ throw new Error('videoMetadata parameter is not supported in Gemini API.');
+ }
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partToMldev$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function functionDeclarationToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['response']) !== undefined) {
+ throw new Error('response parameter is not supported in Gemini API.');
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromParameters = getValueByPath(fromObject, ['parameters']);
+ if (fromParameters != null) {
+ setValueByPath(toObject, ['parameters'], fromParameters);
+ }
+ return toObject;
+}
+function googleSearchToMldev$1() {
+ const toObject = {};
+ return toObject;
+}
+function dynamicRetrievalConfigToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromDynamicThreshold = getValueByPath(fromObject, [
+ 'dynamicThreshold',
+ ]);
+ if (fromDynamicThreshold != null) {
+ setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);
+ }
+ return toObject;
+}
+function googleSearchRetrievalToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromDynamicRetrievalConfig = getValueByPath(fromObject, [
+ 'dynamicRetrievalConfig',
+ ]);
+ if (fromDynamicRetrievalConfig != null) {
+ setValueByPath(toObject, ['dynamicRetrievalConfig'], dynamicRetrievalConfigToMldev$1(apiClient, fromDynamicRetrievalConfig));
+ }
+ return toObject;
+}
+function toolToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionDeclarations = getValueByPath(fromObject, [
+ 'functionDeclarations',
+ ]);
+ if (fromFunctionDeclarations != null) {
+ if (Array.isArray(fromFunctionDeclarations)) {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations.map((item) => {
+ return functionDeclarationToMldev$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations);
+ }
+ }
+ if (getValueByPath(fromObject, ['retrieval']) !== undefined) {
+ throw new Error('retrieval parameter is not supported in Gemini API.');
+ }
+ const fromGoogleSearch = getValueByPath(fromObject, ['googleSearch']);
+ if (fromGoogleSearch != null) {
+ setValueByPath(toObject, ['googleSearch'], googleSearchToMldev$1());
+ }
+ const fromGoogleSearchRetrieval = getValueByPath(fromObject, [
+ 'googleSearchRetrieval',
+ ]);
+ if (fromGoogleSearchRetrieval != null) {
+ setValueByPath(toObject, ['googleSearchRetrieval'], googleSearchRetrievalToMldev$1(apiClient, fromGoogleSearchRetrieval));
+ }
+ const fromCodeExecution = getValueByPath(fromObject, [
+ 'codeExecution',
+ ]);
+ if (fromCodeExecution != null) {
+ setValueByPath(toObject, ['codeExecution'], fromCodeExecution);
+ }
+ return toObject;
+}
+function functionCallingConfigToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromAllowedFunctionNames = getValueByPath(fromObject, [
+ 'allowedFunctionNames',
+ ]);
+ if (fromAllowedFunctionNames != null) {
+ setValueByPath(toObject, ['allowedFunctionNames'], fromAllowedFunctionNames);
+ }
+ return toObject;
+}
+function toolConfigToMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCallingConfig = getValueByPath(fromObject, [
+ 'functionCallingConfig',
+ ]);
+ if (fromFunctionCallingConfig != null) {
+ setValueByPath(toObject, ['functionCallingConfig'], functionCallingConfigToMldev$1(apiClient, fromFunctionCallingConfig));
+ }
+ return toObject;
+}
+function createCachedContentConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTtl = getValueByPath(fromObject, ['ttl']);
+ if (parentObject !== undefined && fromTtl != null) {
+ setValueByPath(parentObject, ['ttl'], fromTtl);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (parentObject !== undefined && fromExpireTime != null) {
+ setValueByPath(parentObject, ['expireTime'], fromExpireTime);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (parentObject !== undefined && fromDisplayName != null) {
+ setValueByPath(parentObject, ['displayName'], fromDisplayName);
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (parentObject !== undefined && fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(parentObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToMldev$1(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(parentObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (parentObject !== undefined && fromSystemInstruction != null) {
+ setValueByPath(parentObject, ['systemInstruction'], contentToMldev$1(apiClient, tContent(apiClient, fromSystemInstruction)));
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (parentObject !== undefined && fromTools != null) {
+ if (Array.isArray(fromTools)) {
+ setValueByPath(parentObject, ['tools'], fromTools.map((item) => {
+ return toolToMldev$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(parentObject, ['tools'], fromTools);
+ }
+ }
+ const fromToolConfig = getValueByPath(fromObject, ['toolConfig']);
+ if (parentObject !== undefined && fromToolConfig != null) {
+ setValueByPath(parentObject, ['toolConfig'], toolConfigToMldev$1(apiClient, fromToolConfig));
+ }
+ return toObject;
+}
+function createCachedContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['model'], tCachesModel(apiClient, fromModel));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], createCachedContentConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function getCachedContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function deleteCachedContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function updateCachedContentConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTtl = getValueByPath(fromObject, ['ttl']);
+ if (parentObject !== undefined && fromTtl != null) {
+ setValueByPath(parentObject, ['ttl'], fromTtl);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (parentObject !== undefined && fromExpireTime != null) {
+ setValueByPath(parentObject, ['expireTime'], fromExpireTime);
+ }
+ return toObject;
+}
+function updateCachedContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], updateCachedContentConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function listCachedContentsConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromPageSize = getValueByPath(fromObject, ['pageSize']);
+ if (parentObject !== undefined && fromPageSize != null) {
+ setValueByPath(parentObject, ['_query', 'pageSize'], fromPageSize);
+ }
+ const fromPageToken = getValueByPath(fromObject, ['pageToken']);
+ if (parentObject !== undefined && fromPageToken != null) {
+ setValueByPath(parentObject, ['_query', 'pageToken'], fromPageToken);
+ }
+ return toObject;
+}
+function listCachedContentsParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], listCachedContentsConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function partToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideoMetadata = getValueByPath(fromObject, [
+ 'videoMetadata',
+ ]);
+ if (fromVideoMetadata != null) {
+ setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);
+ }
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partToVertex$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function schemaToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromExample = getValueByPath(fromObject, ['example']);
+ if (fromExample != null) {
+ setValueByPath(toObject, ['example'], fromExample);
+ }
+ const fromPattern = getValueByPath(fromObject, ['pattern']);
+ if (fromPattern != null) {
+ setValueByPath(toObject, ['pattern'], fromPattern);
+ }
+ const fromDefault = getValueByPath(fromObject, ['default']);
+ if (fromDefault != null) {
+ setValueByPath(toObject, ['default'], fromDefault);
+ }
+ const fromMaxLength = getValueByPath(fromObject, ['maxLength']);
+ if (fromMaxLength != null) {
+ setValueByPath(toObject, ['maxLength'], fromMaxLength);
+ }
+ const fromMinLength = getValueByPath(fromObject, ['minLength']);
+ if (fromMinLength != null) {
+ setValueByPath(toObject, ['minLength'], fromMinLength);
+ }
+ const fromMinProperties = getValueByPath(fromObject, [
+ 'minProperties',
+ ]);
+ if (fromMinProperties != null) {
+ setValueByPath(toObject, ['minProperties'], fromMinProperties);
+ }
+ const fromMaxProperties = getValueByPath(fromObject, [
+ 'maxProperties',
+ ]);
+ if (fromMaxProperties != null) {
+ setValueByPath(toObject, ['maxProperties'], fromMaxProperties);
+ }
+ const fromAnyOf = getValueByPath(fromObject, ['anyOf']);
+ if (fromAnyOf != null) {
+ setValueByPath(toObject, ['anyOf'], fromAnyOf);
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromEnum = getValueByPath(fromObject, ['enum']);
+ if (fromEnum != null) {
+ setValueByPath(toObject, ['enum'], fromEnum);
+ }
+ const fromFormat = getValueByPath(fromObject, ['format']);
+ if (fromFormat != null) {
+ setValueByPath(toObject, ['format'], fromFormat);
+ }
+ const fromItems = getValueByPath(fromObject, ['items']);
+ if (fromItems != null) {
+ setValueByPath(toObject, ['items'], fromItems);
+ }
+ const fromMaxItems = getValueByPath(fromObject, ['maxItems']);
+ if (fromMaxItems != null) {
+ setValueByPath(toObject, ['maxItems'], fromMaxItems);
+ }
+ const fromMaximum = getValueByPath(fromObject, ['maximum']);
+ if (fromMaximum != null) {
+ setValueByPath(toObject, ['maximum'], fromMaximum);
+ }
+ const fromMinItems = getValueByPath(fromObject, ['minItems']);
+ if (fromMinItems != null) {
+ setValueByPath(toObject, ['minItems'], fromMinItems);
+ }
+ const fromMinimum = getValueByPath(fromObject, ['minimum']);
+ if (fromMinimum != null) {
+ setValueByPath(toObject, ['minimum'], fromMinimum);
+ }
+ const fromNullable = getValueByPath(fromObject, ['nullable']);
+ if (fromNullable != null) {
+ setValueByPath(toObject, ['nullable'], fromNullable);
+ }
+ const fromProperties = getValueByPath(fromObject, ['properties']);
+ if (fromProperties != null) {
+ setValueByPath(toObject, ['properties'], fromProperties);
+ }
+ const fromPropertyOrdering = getValueByPath(fromObject, [
+ 'propertyOrdering',
+ ]);
+ if (fromPropertyOrdering != null) {
+ setValueByPath(toObject, ['propertyOrdering'], fromPropertyOrdering);
+ }
+ const fromRequired = getValueByPath(fromObject, ['required']);
+ if (fromRequired != null) {
+ setValueByPath(toObject, ['required'], fromRequired);
+ }
+ const fromTitle = getValueByPath(fromObject, ['title']);
+ if (fromTitle != null) {
+ setValueByPath(toObject, ['title'], fromTitle);
+ }
+ const fromType = getValueByPath(fromObject, ['type']);
+ if (fromType != null) {
+ setValueByPath(toObject, ['type'], fromType);
+ }
+ return toObject;
+}
+function functionDeclarationToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], schemaToVertex$1(apiClient, fromResponse));
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromParameters = getValueByPath(fromObject, ['parameters']);
+ if (fromParameters != null) {
+ setValueByPath(toObject, ['parameters'], fromParameters);
+ }
+ return toObject;
+}
+function googleSearchToVertex$1() {
+ const toObject = {};
+ return toObject;
+}
+function dynamicRetrievalConfigToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromDynamicThreshold = getValueByPath(fromObject, [
+ 'dynamicThreshold',
+ ]);
+ if (fromDynamicThreshold != null) {
+ setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);
+ }
+ return toObject;
+}
+function googleSearchRetrievalToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromDynamicRetrievalConfig = getValueByPath(fromObject, [
+ 'dynamicRetrievalConfig',
+ ]);
+ if (fromDynamicRetrievalConfig != null) {
+ setValueByPath(toObject, ['dynamicRetrievalConfig'], dynamicRetrievalConfigToVertex$1(apiClient, fromDynamicRetrievalConfig));
+ }
+ return toObject;
+}
+function toolToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionDeclarations = getValueByPath(fromObject, [
+ 'functionDeclarations',
+ ]);
+ if (fromFunctionDeclarations != null) {
+ if (Array.isArray(fromFunctionDeclarations)) {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations.map((item) => {
+ return functionDeclarationToVertex$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations);
+ }
+ }
+ const fromRetrieval = getValueByPath(fromObject, ['retrieval']);
+ if (fromRetrieval != null) {
+ setValueByPath(toObject, ['retrieval'], fromRetrieval);
+ }
+ const fromGoogleSearch = getValueByPath(fromObject, ['googleSearch']);
+ if (fromGoogleSearch != null) {
+ setValueByPath(toObject, ['googleSearch'], googleSearchToVertex$1());
+ }
+ const fromGoogleSearchRetrieval = getValueByPath(fromObject, [
+ 'googleSearchRetrieval',
+ ]);
+ if (fromGoogleSearchRetrieval != null) {
+ setValueByPath(toObject, ['googleSearchRetrieval'], googleSearchRetrievalToVertex$1(apiClient, fromGoogleSearchRetrieval));
+ }
+ const fromCodeExecution = getValueByPath(fromObject, [
+ 'codeExecution',
+ ]);
+ if (fromCodeExecution != null) {
+ setValueByPath(toObject, ['codeExecution'], fromCodeExecution);
+ }
+ return toObject;
+}
+function functionCallingConfigToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromAllowedFunctionNames = getValueByPath(fromObject, [
+ 'allowedFunctionNames',
+ ]);
+ if (fromAllowedFunctionNames != null) {
+ setValueByPath(toObject, ['allowedFunctionNames'], fromAllowedFunctionNames);
+ }
+ return toObject;
+}
+function toolConfigToVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCallingConfig = getValueByPath(fromObject, [
+ 'functionCallingConfig',
+ ]);
+ if (fromFunctionCallingConfig != null) {
+ setValueByPath(toObject, ['functionCallingConfig'], functionCallingConfigToVertex$1(apiClient, fromFunctionCallingConfig));
+ }
+ return toObject;
+}
+function createCachedContentConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTtl = getValueByPath(fromObject, ['ttl']);
+ if (parentObject !== undefined && fromTtl != null) {
+ setValueByPath(parentObject, ['ttl'], fromTtl);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (parentObject !== undefined && fromExpireTime != null) {
+ setValueByPath(parentObject, ['expireTime'], fromExpireTime);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (parentObject !== undefined && fromDisplayName != null) {
+ setValueByPath(parentObject, ['displayName'], fromDisplayName);
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (parentObject !== undefined && fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(parentObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToVertex$1(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(parentObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (parentObject !== undefined && fromSystemInstruction != null) {
+ setValueByPath(parentObject, ['systemInstruction'], contentToVertex$1(apiClient, tContent(apiClient, fromSystemInstruction)));
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (parentObject !== undefined && fromTools != null) {
+ if (Array.isArray(fromTools)) {
+ setValueByPath(parentObject, ['tools'], fromTools.map((item) => {
+ return toolToVertex$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(parentObject, ['tools'], fromTools);
+ }
+ }
+ const fromToolConfig = getValueByPath(fromObject, ['toolConfig']);
+ if (parentObject !== undefined && fromToolConfig != null) {
+ setValueByPath(parentObject, ['toolConfig'], toolConfigToVertex$1(apiClient, fromToolConfig));
+ }
+ return toObject;
+}
+function createCachedContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['model'], tCachesModel(apiClient, fromModel));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], createCachedContentConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function getCachedContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function deleteCachedContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function updateCachedContentConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTtl = getValueByPath(fromObject, ['ttl']);
+ if (parentObject !== undefined && fromTtl != null) {
+ setValueByPath(parentObject, ['ttl'], fromTtl);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (parentObject !== undefined && fromExpireTime != null) {
+ setValueByPath(parentObject, ['expireTime'], fromExpireTime);
+ }
+ return toObject;
+}
+function updateCachedContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'name'], tCachedContentName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], updateCachedContentConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function listCachedContentsConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromPageSize = getValueByPath(fromObject, ['pageSize']);
+ if (parentObject !== undefined && fromPageSize != null) {
+ setValueByPath(parentObject, ['_query', 'pageSize'], fromPageSize);
+ }
+ const fromPageToken = getValueByPath(fromObject, ['pageToken']);
+ if (parentObject !== undefined && fromPageToken != null) {
+ setValueByPath(parentObject, ['_query', 'pageToken'], fromPageToken);
+ }
+ return toObject;
+}
+function listCachedContentsParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], listCachedContentsConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function cachedContentFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (fromDisplayName != null) {
+ setValueByPath(toObject, ['displayName'], fromDisplayName);
+ }
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['model'], fromModel);
+ }
+ const fromCreateTime = getValueByPath(fromObject, ['createTime']);
+ if (fromCreateTime != null) {
+ setValueByPath(toObject, ['createTime'], fromCreateTime);
+ }
+ const fromUpdateTime = getValueByPath(fromObject, ['updateTime']);
+ if (fromUpdateTime != null) {
+ setValueByPath(toObject, ['updateTime'], fromUpdateTime);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (fromExpireTime != null) {
+ setValueByPath(toObject, ['expireTime'], fromExpireTime);
+ }
+ const fromUsageMetadata = getValueByPath(fromObject, [
+ 'usageMetadata',
+ ]);
+ if (fromUsageMetadata != null) {
+ setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);
+ }
+ return toObject;
+}
+function deleteCachedContentResponseFromMldev() {
+ const toObject = {};
+ return toObject;
+}
+function listCachedContentsResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromNextPageToken = getValueByPath(fromObject, [
+ 'nextPageToken',
+ ]);
+ if (fromNextPageToken != null) {
+ setValueByPath(toObject, ['nextPageToken'], fromNextPageToken);
+ }
+ const fromCachedContents = getValueByPath(fromObject, [
+ 'cachedContents',
+ ]);
+ if (fromCachedContents != null) {
+ if (Array.isArray(fromCachedContents)) {
+ setValueByPath(toObject, ['cachedContents'], fromCachedContents.map((item) => {
+ return cachedContentFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['cachedContents'], fromCachedContents);
+ }
+ }
+ return toObject;
+}
+function cachedContentFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (fromDisplayName != null) {
+ setValueByPath(toObject, ['displayName'], fromDisplayName);
+ }
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['model'], fromModel);
+ }
+ const fromCreateTime = getValueByPath(fromObject, ['createTime']);
+ if (fromCreateTime != null) {
+ setValueByPath(toObject, ['createTime'], fromCreateTime);
+ }
+ const fromUpdateTime = getValueByPath(fromObject, ['updateTime']);
+ if (fromUpdateTime != null) {
+ setValueByPath(toObject, ['updateTime'], fromUpdateTime);
+ }
+ const fromExpireTime = getValueByPath(fromObject, ['expireTime']);
+ if (fromExpireTime != null) {
+ setValueByPath(toObject, ['expireTime'], fromExpireTime);
+ }
+ const fromUsageMetadata = getValueByPath(fromObject, [
+ 'usageMetadata',
+ ]);
+ if (fromUsageMetadata != null) {
+ setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);
+ }
+ return toObject;
+}
+function deleteCachedContentResponseFromVertex() {
+ const toObject = {};
+ return toObject;
+}
+function listCachedContentsResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromNextPageToken = getValueByPath(fromObject, [
+ 'nextPageToken',
+ ]);
+ if (fromNextPageToken != null) {
+ setValueByPath(toObject, ['nextPageToken'], fromNextPageToken);
+ }
+ const fromCachedContents = getValueByPath(fromObject, [
+ 'cachedContents',
+ ]);
+ if (fromCachedContents != null) {
+ if (Array.isArray(fromCachedContents)) {
+ setValueByPath(toObject, ['cachedContents'], fromCachedContents.map((item) => {
+ return cachedContentFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['cachedContents'], fromCachedContents);
+ }
+ }
+ return toObject;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+/**
+ * Pagers for the GenAI List APIs.
+ */
+exports.PagedItem = void 0;
+(function (PagedItem) {
+ PagedItem["PAGED_ITEM_BATCH_JOBS"] = "batchJobs";
+ PagedItem["PAGED_ITEM_MODELS"] = "models";
+ PagedItem["PAGED_ITEM_TUNING_JOBS"] = "tuningJobs";
+ PagedItem["PAGED_ITEM_FILES"] = "files";
+ PagedItem["PAGED_ITEM_CACHED_CONTENTS"] = "cachedContents";
+})(exports.PagedItem || (exports.PagedItem = {}));
+/**
+ * Pager class for iterating through paginated results.
+ */
+class Pager {
+ constructor(name, request, response, params) {
+ this.pageInternal = [];
+ this.paramsInternal = {};
+ this.requestInternal = request;
+ this.init(name, response, params);
+ }
+ init(name, response, params) {
+ var _a, _b;
+ this.nameInternal = name;
+ this.pageInternal = response[this.nameInternal] || [];
+ this.idxInternal = 0;
+ let requestParams = { config: {} };
+ if (!params) {
+ requestParams = { config: {} };
+ }
+ else if (typeof params === 'object') {
+ requestParams = Object.assign({}, params);
+ }
+ else {
+ requestParams = params;
+ }
+ if (requestParams['config']) {
+ requestParams['config']['pageToken'] = response['nextPageToken'];
+ }
+ this.paramsInternal = requestParams;
+ this.pageInternalSize =
+ (_b = (_a = requestParams['config']) === null || _a === void 0 ? void 0 : _a['pageSize']) !== null && _b !== void 0 ? _b : this.pageInternal.length;
+ }
+ initNextPage(response) {
+ this.init(this.nameInternal, response, this.paramsInternal);
+ }
+ /**
+ * Returns the current page, which is a list of items.
+ *
+ * @remarks
+ * The first page is retrieved when the pager is created. The returned list of
+ * items could be a subset of the entire list.
+ */
+ get page() {
+ return this.pageInternal;
+ }
+ /**
+ * Returns the type of paged item (for example, ``batch_jobs``).
+ */
+ get name() {
+ return this.nameInternal;
+ }
+ /**
+ * Returns the length of the page fetched each time by this pager.
+ *
+ * @remarks
+ * The number of items in the page is less than or equal to the page length.
+ */
+ get pageSize() {
+ return this.pageInternalSize;
+ }
+ /**
+ * Returns the parameters when making the API request for the next page.
+ *
+ * @remarks
+ * Parameters contain a set of optional configs that can be
+ * used to customize the API request. For example, the `pageToken` parameter
+ * contains the token to request the next page.
+ */
+ get params() {
+ return this.paramsInternal;
+ }
+ /**
+ * Returns the total number of items in the current page.
+ */
+ get pageLength() {
+ return this.pageInternal.length;
+ }
+ /**
+ * Returns the item at the given index.
+ */
+ getItem(index) {
+ return this.pageInternal[index];
+ }
+ /**
+ * Returns an async iterator that support iterating through all items
+ * retrieved from the API.
+ *
+ * @remarks
+ * The iterator will automatically fetch the next page if there are more items
+ * to fetch from the API.
+ *
+ * @example
+ *
+ * ```ts
+ * const pager = await ai.files.list({config: {pageSize: 10}});
+ * for await (const file of pager) {
+ * console.log(file.name);
+ * }
+ * ```
+ */
+ [Symbol.asyncIterator]() {
+ return {
+ next: async () => {
+ if (this.idxInternal >= this.pageLength) {
+ if (this.hasNextPage()) {
+ await this.nextPage();
+ }
+ else {
+ return { value: undefined, done: true };
+ }
+ }
+ const item = this.getItem(this.idxInternal);
+ this.idxInternal += 1;
+ return { value: item, done: false };
+ },
+ return: async () => {
+ return { value: undefined, done: true };
+ },
+ };
+ }
+ /**
+ * Fetches the next page of items. This makes a new API request.
+ *
+ * @throws {Error} If there are no more pages to fetch.
+ *
+ * @example
+ *
+ * ```ts
+ * const pager = await ai.files.list({config: {pageSize: 10}});
+ * let page = pager.page;
+ * while (true) {
+ * for (const file of page) {
+ * console.log(file.name);
+ * }
+ * if (!pager.hasNextPage()) {
+ * break;
+ * }
+ * page = await pager.nextPage();
+ * }
+ * ```
+ */
+ async nextPage() {
+ if (!this.hasNextPage()) {
+ throw new Error('No more pages to fetch.');
+ }
+ const response = await this.requestInternal(this.params);
+ this.initNextPage(response);
+ return this.page;
+ }
+ /**
+ * Returns true if there are more pages to fetch from the API.
+ */
+ hasNextPage() {
+ var _a;
+ if (((_a = this.params['config']) === null || _a === void 0 ? void 0 : _a['pageToken']) !== undefined) {
+ return true;
+ }
+ return false;
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+// Code generated by the Google Gen AI SDK generator DO NOT EDIT.
+exports.Outcome = void 0;
+(function (Outcome) {
+ Outcome["OUTCOME_UNSPECIFIED"] = "OUTCOME_UNSPECIFIED";
+ Outcome["OUTCOME_OK"] = "OUTCOME_OK";
+ Outcome["OUTCOME_FAILED"] = "OUTCOME_FAILED";
+ Outcome["OUTCOME_DEADLINE_EXCEEDED"] = "OUTCOME_DEADLINE_EXCEEDED";
+})(exports.Outcome || (exports.Outcome = {}));
+exports.Language = void 0;
+(function (Language) {
+ Language["LANGUAGE_UNSPECIFIED"] = "LANGUAGE_UNSPECIFIED";
+ Language["PYTHON"] = "PYTHON";
+})(exports.Language || (exports.Language = {}));
+exports.Type = void 0;
+(function (Type) {
+ Type["TYPE_UNSPECIFIED"] = "TYPE_UNSPECIFIED";
+ Type["STRING"] = "STRING";
+ Type["NUMBER"] = "NUMBER";
+ Type["INTEGER"] = "INTEGER";
+ Type["BOOLEAN"] = "BOOLEAN";
+ Type["ARRAY"] = "ARRAY";
+ Type["OBJECT"] = "OBJECT";
+})(exports.Type || (exports.Type = {}));
+exports.HarmCategory = void 0;
+(function (HarmCategory) {
+ HarmCategory["HARM_CATEGORY_UNSPECIFIED"] = "HARM_CATEGORY_UNSPECIFIED";
+ HarmCategory["HARM_CATEGORY_HATE_SPEECH"] = "HARM_CATEGORY_HATE_SPEECH";
+ HarmCategory["HARM_CATEGORY_DANGEROUS_CONTENT"] = "HARM_CATEGORY_DANGEROUS_CONTENT";
+ HarmCategory["HARM_CATEGORY_HARASSMENT"] = "HARM_CATEGORY_HARASSMENT";
+ HarmCategory["HARM_CATEGORY_SEXUALLY_EXPLICIT"] = "HARM_CATEGORY_SEXUALLY_EXPLICIT";
+ HarmCategory["HARM_CATEGORY_CIVIC_INTEGRITY"] = "HARM_CATEGORY_CIVIC_INTEGRITY";
+})(exports.HarmCategory || (exports.HarmCategory = {}));
+exports.HarmBlockMethod = void 0;
+(function (HarmBlockMethod) {
+ HarmBlockMethod["HARM_BLOCK_METHOD_UNSPECIFIED"] = "HARM_BLOCK_METHOD_UNSPECIFIED";
+ HarmBlockMethod["SEVERITY"] = "SEVERITY";
+ HarmBlockMethod["PROBABILITY"] = "PROBABILITY";
+})(exports.HarmBlockMethod || (exports.HarmBlockMethod = {}));
+exports.HarmBlockThreshold = void 0;
+(function (HarmBlockThreshold) {
+ HarmBlockThreshold["HARM_BLOCK_THRESHOLD_UNSPECIFIED"] = "HARM_BLOCK_THRESHOLD_UNSPECIFIED";
+ HarmBlockThreshold["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE";
+ HarmBlockThreshold["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE";
+ HarmBlockThreshold["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH";
+ HarmBlockThreshold["BLOCK_NONE"] = "BLOCK_NONE";
+ HarmBlockThreshold["OFF"] = "OFF";
+})(exports.HarmBlockThreshold || (exports.HarmBlockThreshold = {}));
+exports.Mode = void 0;
+(function (Mode) {
+ Mode["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
+ Mode["MODE_DYNAMIC"] = "MODE_DYNAMIC";
+})(exports.Mode || (exports.Mode = {}));
+exports.FinishReason = void 0;
+(function (FinishReason) {
+ FinishReason["FINISH_REASON_UNSPECIFIED"] = "FINISH_REASON_UNSPECIFIED";
+ FinishReason["STOP"] = "STOP";
+ FinishReason["MAX_TOKENS"] = "MAX_TOKENS";
+ FinishReason["SAFETY"] = "SAFETY";
+ FinishReason["RECITATION"] = "RECITATION";
+ FinishReason["OTHER"] = "OTHER";
+ FinishReason["BLOCKLIST"] = "BLOCKLIST";
+ FinishReason["PROHIBITED_CONTENT"] = "PROHIBITED_CONTENT";
+ FinishReason["SPII"] = "SPII";
+ FinishReason["MALFORMED_FUNCTION_CALL"] = "MALFORMED_FUNCTION_CALL";
+ FinishReason["IMAGE_SAFETY"] = "IMAGE_SAFETY";
+})(exports.FinishReason || (exports.FinishReason = {}));
+exports.HarmProbability = void 0;
+(function (HarmProbability) {
+ HarmProbability["HARM_PROBABILITY_UNSPECIFIED"] = "HARM_PROBABILITY_UNSPECIFIED";
+ HarmProbability["NEGLIGIBLE"] = "NEGLIGIBLE";
+ HarmProbability["LOW"] = "LOW";
+ HarmProbability["MEDIUM"] = "MEDIUM";
+ HarmProbability["HIGH"] = "HIGH";
+})(exports.HarmProbability || (exports.HarmProbability = {}));
+exports.HarmSeverity = void 0;
+(function (HarmSeverity) {
+ HarmSeverity["HARM_SEVERITY_UNSPECIFIED"] = "HARM_SEVERITY_UNSPECIFIED";
+ HarmSeverity["HARM_SEVERITY_NEGLIGIBLE"] = "HARM_SEVERITY_NEGLIGIBLE";
+ HarmSeverity["HARM_SEVERITY_LOW"] = "HARM_SEVERITY_LOW";
+ HarmSeverity["HARM_SEVERITY_MEDIUM"] = "HARM_SEVERITY_MEDIUM";
+ HarmSeverity["HARM_SEVERITY_HIGH"] = "HARM_SEVERITY_HIGH";
+})(exports.HarmSeverity || (exports.HarmSeverity = {}));
+exports.BlockedReason = void 0;
+(function (BlockedReason) {
+ BlockedReason["BLOCKED_REASON_UNSPECIFIED"] = "BLOCKED_REASON_UNSPECIFIED";
+ BlockedReason["SAFETY"] = "SAFETY";
+ BlockedReason["OTHER"] = "OTHER";
+ BlockedReason["BLOCKLIST"] = "BLOCKLIST";
+ BlockedReason["PROHIBITED_CONTENT"] = "PROHIBITED_CONTENT";
+})(exports.BlockedReason || (exports.BlockedReason = {}));
+exports.Modality = void 0;
+(function (Modality) {
+ Modality["MODALITY_UNSPECIFIED"] = "MODALITY_UNSPECIFIED";
+ Modality["TEXT"] = "TEXT";
+ Modality["IMAGE"] = "IMAGE";
+ Modality["AUDIO"] = "AUDIO";
+})(exports.Modality || (exports.Modality = {}));
+exports.State = void 0;
+(function (State) {
+ State["STATE_UNSPECIFIED"] = "STATE_UNSPECIFIED";
+ State["ACTIVE"] = "ACTIVE";
+ State["ERROR"] = "ERROR";
+})(exports.State || (exports.State = {}));
+exports.DynamicRetrievalConfigMode = void 0;
+(function (DynamicRetrievalConfigMode) {
+ DynamicRetrievalConfigMode["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
+ DynamicRetrievalConfigMode["MODE_DYNAMIC"] = "MODE_DYNAMIC";
+})(exports.DynamicRetrievalConfigMode || (exports.DynamicRetrievalConfigMode = {}));
+exports.FunctionCallingConfigMode = void 0;
+(function (FunctionCallingConfigMode) {
+ FunctionCallingConfigMode["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
+ FunctionCallingConfigMode["AUTO"] = "AUTO";
+ FunctionCallingConfigMode["ANY"] = "ANY";
+ FunctionCallingConfigMode["NONE"] = "NONE";
+})(exports.FunctionCallingConfigMode || (exports.FunctionCallingConfigMode = {}));
+exports.MediaResolution = void 0;
+(function (MediaResolution) {
+ MediaResolution["MEDIA_RESOLUTION_UNSPECIFIED"] = "MEDIA_RESOLUTION_UNSPECIFIED";
+ MediaResolution["MEDIA_RESOLUTION_LOW"] = "MEDIA_RESOLUTION_LOW";
+ MediaResolution["MEDIA_RESOLUTION_MEDIUM"] = "MEDIA_RESOLUTION_MEDIUM";
+ MediaResolution["MEDIA_RESOLUTION_HIGH"] = "MEDIA_RESOLUTION_HIGH";
+})(exports.MediaResolution || (exports.MediaResolution = {}));
+exports.SafetyFilterLevel = void 0;
+(function (SafetyFilterLevel) {
+ SafetyFilterLevel["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE";
+ SafetyFilterLevel["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE";
+ SafetyFilterLevel["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH";
+ SafetyFilterLevel["BLOCK_NONE"] = "BLOCK_NONE";
+})(exports.SafetyFilterLevel || (exports.SafetyFilterLevel = {}));
+exports.PersonGeneration = void 0;
+(function (PersonGeneration) {
+ PersonGeneration["DONT_ALLOW"] = "DONT_ALLOW";
+ PersonGeneration["ALLOW_ADULT"] = "ALLOW_ADULT";
+ PersonGeneration["ALLOW_ALL"] = "ALLOW_ALL";
+})(exports.PersonGeneration || (exports.PersonGeneration = {}));
+exports.ImagePromptLanguage = void 0;
+(function (ImagePromptLanguage) {
+ ImagePromptLanguage["auto"] = "auto";
+ ImagePromptLanguage["en"] = "en";
+ ImagePromptLanguage["ja"] = "ja";
+ ImagePromptLanguage["ko"] = "ko";
+ ImagePromptLanguage["hi"] = "hi";
+})(exports.ImagePromptLanguage || (exports.ImagePromptLanguage = {}));
+exports.FileState = void 0;
+(function (FileState) {
+ FileState["STATE_UNSPECIFIED"] = "STATE_UNSPECIFIED";
+ FileState["PROCESSING"] = "PROCESSING";
+ FileState["ACTIVE"] = "ACTIVE";
+ FileState["FAILED"] = "FAILED";
+})(exports.FileState || (exports.FileState = {}));
+exports.FileSource = void 0;
+(function (FileSource) {
+ FileSource["SOURCE_UNSPECIFIED"] = "SOURCE_UNSPECIFIED";
+ FileSource["UPLOADED"] = "UPLOADED";
+ FileSource["GENERATED"] = "GENERATED";
+})(exports.FileSource || (exports.FileSource = {}));
+exports.MaskReferenceMode = void 0;
+(function (MaskReferenceMode) {
+ MaskReferenceMode["MASK_MODE_DEFAULT"] = "MASK_MODE_DEFAULT";
+ MaskReferenceMode["MASK_MODE_USER_PROVIDED"] = "MASK_MODE_USER_PROVIDED";
+ MaskReferenceMode["MASK_MODE_BACKGROUND"] = "MASK_MODE_BACKGROUND";
+ MaskReferenceMode["MASK_MODE_FOREGROUND"] = "MASK_MODE_FOREGROUND";
+ MaskReferenceMode["MASK_MODE_SEMANTIC"] = "MASK_MODE_SEMANTIC";
+})(exports.MaskReferenceMode || (exports.MaskReferenceMode = {}));
+exports.ControlReferenceType = void 0;
+(function (ControlReferenceType) {
+ ControlReferenceType["CONTROL_TYPE_DEFAULT"] = "CONTROL_TYPE_DEFAULT";
+ ControlReferenceType["CONTROL_TYPE_CANNY"] = "CONTROL_TYPE_CANNY";
+ ControlReferenceType["CONTROL_TYPE_SCRIBBLE"] = "CONTROL_TYPE_SCRIBBLE";
+ ControlReferenceType["CONTROL_TYPE_FACE_MESH"] = "CONTROL_TYPE_FACE_MESH";
+})(exports.ControlReferenceType || (exports.ControlReferenceType = {}));
+exports.SubjectReferenceType = void 0;
+(function (SubjectReferenceType) {
+ SubjectReferenceType["SUBJECT_TYPE_DEFAULT"] = "SUBJECT_TYPE_DEFAULT";
+ SubjectReferenceType["SUBJECT_TYPE_PERSON"] = "SUBJECT_TYPE_PERSON";
+ SubjectReferenceType["SUBJECT_TYPE_ANIMAL"] = "SUBJECT_TYPE_ANIMAL";
+ SubjectReferenceType["SUBJECT_TYPE_PRODUCT"] = "SUBJECT_TYPE_PRODUCT";
+})(exports.SubjectReferenceType || (exports.SubjectReferenceType = {}));
+exports.MediaModality = void 0;
+(function (MediaModality) {
+ MediaModality["MODALITY_UNSPECIFIED"] = "MODALITY_UNSPECIFIED";
+ MediaModality["TEXT"] = "TEXT";
+ MediaModality["IMAGE"] = "IMAGE";
+ MediaModality["VIDEO"] = "VIDEO";
+ MediaModality["AUDIO"] = "AUDIO";
+ MediaModality["DOCUMENT"] = "DOCUMENT";
+})(exports.MediaModality || (exports.MediaModality = {}));
+/** A function response. */
+class FunctionResponse {
+}
+/**
+ * Creates a `Part` object from a `URI` string.
+ */
+function createPartFromUri(uri, mimeType) {
+ return {
+ fileData: {
+ fileUri: uri,
+ mimeType: mimeType,
+ },
+ };
+}
+/**
+ * Creates a `Part` object from a `text` string.
+ */
+function createPartFromText(text) {
+ return {
+ text: text,
+ };
+}
+/**
+ * Creates a `Part` object from a `FunctionCall` object.
+ */
+function createPartFromFunctionCall(name, args) {
+ return {
+ functionCall: {
+ name: name,
+ args: args,
+ },
+ };
+}
+/**
+ * Creates a `Part` object from a `FunctionResponse` object.
+ */
+function createPartFromFunctionResponse(id, name, response) {
+ return {
+ functionResponse: {
+ id: id,
+ name: name,
+ response: response,
+ },
+ };
+}
+/**
+ * Creates a `Part` object from a `base64` `string`.
+ */
+function createPartFromBase64(data, mimeType) {
+ return {
+ inlineData: {
+ data: data,
+ mimeType: mimeType,
+ },
+ };
+}
+/**
+ * Creates a `Part` object from the `outcome` and `output` of a `CodeExecutionResult` object.
+ */
+function createPartFromCodeExecutionResult(outcome, output) {
+ return {
+ codeExecutionResult: {
+ outcome: outcome,
+ output: output,
+ },
+ };
+}
+/**
+ * Creates a `Part` object from the `code` and `language` of an `ExecutableCode` object.
+ */
+function createPartFromExecutableCode(code, language) {
+ return {
+ executableCode: {
+ code: code,
+ language: language,
+ },
+ };
+}
+function _isPart(obj) {
+ if (typeof obj === 'object' && obj !== null) {
+ return ('fileData' in obj ||
+ 'text' in obj ||
+ 'functionCall' in obj ||
+ 'functionResponse' in obj ||
+ 'inlineData' in obj ||
+ 'videoMetadata' in obj ||
+ 'codeExecutionResult' in obj ||
+ 'executableCode' in obj);
+ }
+ return false;
+}
+function _toParts(partOrString) {
+ const parts = [];
+ if (typeof partOrString === 'string') {
+ parts.push(createPartFromText(partOrString));
+ }
+ else if (_isPart(partOrString)) {
+ parts.push(partOrString);
+ }
+ else if (Array.isArray(partOrString)) {
+ if (partOrString.length === 0) {
+ throw new Error('partOrString cannot be an empty array');
+ }
+ for (const part of partOrString) {
+ if (typeof part === 'string') {
+ parts.push(createPartFromText(part));
+ }
+ else if (_isPart(part)) {
+ parts.push(part);
+ }
+ else {
+ throw new Error('element in PartUnion must be a Part object or string');
+ }
+ }
+ }
+ else {
+ throw new Error('partOrString must be a Part object, string, or array');
+ }
+ return parts;
+}
+/**
+ * Creates a `Content` object with a user role from a `PartListUnion` object or `string`.
+ */
+function createUserContent(partOrString) {
+ return {
+ role: 'user',
+ parts: _toParts(partOrString),
+ };
+}
+/**
+ * Creates a `Content` object with a model role from a `PartListUnion` object or `string`.
+ */
+function createModelContent(partOrString) {
+ return {
+ role: 'model',
+ parts: _toParts(partOrString),
+ };
+}
+/** Content filter results for a prompt sent in the request. */
+class GenerateContentResponsePromptFeedback {
+}
+/** Usage metadata about response(s). */
+class GenerateContentResponseUsageMetadata {
+}
+/** Response message for PredictionService.GenerateContent. */
+class GenerateContentResponse {
+ /**
+ * Returns the concatenation of all text parts from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the text from the first
+ * one will be returned.
+ * If there are non-text parts in the response, the concatenation of all text
+ * parts will be returned, and a warning will be logged.
+ * If there are thought parts in the response, the concatenation of all text
+ * parts excluding the thought parts will be returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'Why is the sky blue?',
+ * });
+ *
+ * console.debug(response.text);
+ * ```
+ */
+ get text() {
+ var _a, _b, _c, _d, _e, _f, _g, _h;
+ if (((_d = (_c = (_b = (_a = this.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.parts) === null || _d === void 0 ? void 0 : _d.length) === 0) {
+ return undefined;
+ }
+ if (this.candidates && this.candidates.length > 1) {
+ console.warn('there are multiple candidates in the response, returning text from the first one.');
+ }
+ let text = '';
+ let anyTextPartText = false;
+ const nonTextParts = [];
+ for (const part of (_h = (_g = (_f = (_e = this.candidates) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.content) === null || _g === void 0 ? void 0 : _g.parts) !== null && _h !== void 0 ? _h : []) {
+ for (const [fieldName, fieldValue] of Object.entries(part)) {
+ if (fieldName !== 'text' &&
+ fieldName !== 'thought' &&
+ (fieldValue !== null || fieldValue !== undefined)) {
+ nonTextParts.push(fieldName);
+ }
+ }
+ if (typeof part.text === 'string') {
+ if (typeof part.thought === 'boolean' && part.thought) {
+ continue;
+ }
+ anyTextPartText = true;
+ text += part.text;
+ }
+ }
+ if (nonTextParts.length > 0) {
+ console.warn(`there are non-text parts ${nonTextParts} in the response, returning concatenation of all text parts. Please refer to the non text parts for a full response from model.`);
+ }
+ // part.text === '' is different from part.text is null
+ return anyTextPartText ? text : undefined;
+ }
+ /**
+ * Returns the function calls from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the function calls from
+ * the first one will be returned.
+ * If there are no function calls in the response, undefined will be returned.
+ *
+ * @example
+ * ```ts
+ * const controlLightFunctionDeclaration: FunctionDeclaration = {
+ * name: 'controlLight',
+ * parameters: {
+ * type: Type.OBJECT,
+ * description: 'Set the brightness and color temperature of a room light.',
+ * properties: {
+ * brightness: {
+ * type: Type.NUMBER,
+ * description:
+ * 'Light level from 0 to 100. Zero is off and 100 is full brightness.',
+ * },
+ * colorTemperature: {
+ * type: Type.STRING,
+ * description:
+ * 'Color temperature of the light fixture which can be `daylight`, `cool` or `warm`.',
+ * },
+ * },
+ * required: ['brightness', 'colorTemperature'],
+ * };
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents: 'Dim the lights so the room feels cozy and warm.',
+ * config: {
+ * tools: [{functionDeclarations: [controlLightFunctionDeclaration]}],
+ * toolConfig: {
+ * functionCallingConfig: {
+ * mode: FunctionCallingConfigMode.ANY,
+ * allowedFunctionNames: ['controlLight'],
+ * },
+ * },
+ * },
+ * });
+ * console.debug(JSON.stringify(response.functionCalls));
+ * ```
+ */
+ get functionCalls() {
+ var _a, _b, _c, _d, _e, _f, _g, _h;
+ if (((_d = (_c = (_b = (_a = this.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.parts) === null || _d === void 0 ? void 0 : _d.length) === 0) {
+ return undefined;
+ }
+ if (this.candidates && this.candidates.length > 1) {
+ console.warn('there are multiple candidates in the response, returning function calls from the first one.');
+ }
+ const functionCalls = (_h = (_g = (_f = (_e = this.candidates) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.content) === null || _g === void 0 ? void 0 : _g.parts) === null || _h === void 0 ? void 0 : _h.filter((part) => part.functionCall).map((part) => part.functionCall).filter((functionCall) => functionCall !== undefined);
+ if ((functionCalls === null || functionCalls === void 0 ? void 0 : functionCalls.length) === 0) {
+ return undefined;
+ }
+ return functionCalls;
+ }
+ /**
+ * Returns the first executable code from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the executable code from
+ * the first one will be returned.
+ * If there are no executable code in the response, undefined will be
+ * returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'
+ * config: {
+ * tools: [{codeExecution: {}}],
+ * },
+ * });
+ *
+ * console.debug(response.executableCode);
+ * ```
+ */
+ get executableCode() {
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j;
+ if (((_d = (_c = (_b = (_a = this.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.parts) === null || _d === void 0 ? void 0 : _d.length) === 0) {
+ return undefined;
+ }
+ if (this.candidates && this.candidates.length > 1) {
+ console.warn('there are multiple candidates in the response, returning executable code from the first one.');
+ }
+ const executableCode = (_h = (_g = (_f = (_e = this.candidates) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.content) === null || _g === void 0 ? void 0 : _g.parts) === null || _h === void 0 ? void 0 : _h.filter((part) => part.executableCode).map((part) => part.executableCode).filter((executableCode) => executableCode !== undefined);
+ if ((executableCode === null || executableCode === void 0 ? void 0 : executableCode.length) === 0) {
+ return undefined;
+ }
+ return (_j = executableCode === null || executableCode === void 0 ? void 0 : executableCode[0]) === null || _j === void 0 ? void 0 : _j.code;
+ }
+ /**
+ * Returns the first code execution result from the first candidate in the response.
+ *
+ * @remarks
+ * If there are multiple candidates in the response, the code execution result from
+ * the first one will be returned.
+ * If there are no code execution result in the response, undefined will be returned.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents:
+ * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'
+ * config: {
+ * tools: [{codeExecution: {}}],
+ * },
+ * });
+ *
+ * console.debug(response.codeExecutionResult);
+ * ```
+ */
+ get codeExecutionResult() {
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j;
+ if (((_d = (_c = (_b = (_a = this.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.parts) === null || _d === void 0 ? void 0 : _d.length) === 0) {
+ return undefined;
+ }
+ if (this.candidates && this.candidates.length > 1) {
+ console.warn('there are multiple candidates in the response, returning code execution result from the first one.');
+ }
+ const codeExecutionResult = (_h = (_g = (_f = (_e = this.candidates) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.content) === null || _g === void 0 ? void 0 : _g.parts) === null || _h === void 0 ? void 0 : _h.filter((part) => part.codeExecutionResult).map((part) => part.codeExecutionResult).filter((codeExecutionResult) => codeExecutionResult !== undefined);
+ if ((codeExecutionResult === null || codeExecutionResult === void 0 ? void 0 : codeExecutionResult.length) === 0) {
+ return undefined;
+ }
+ return (_j = codeExecutionResult === null || codeExecutionResult === void 0 ? void 0 : codeExecutionResult[0]) === null || _j === void 0 ? void 0 : _j.output;
+ }
+}
+/** Response for the embed_content method. */
+class EmbedContentResponse {
+}
+/** The output images response. */
+class GenerateImagesResponse {
+}
+/** Response for counting tokens. */
+class CountTokensResponse {
+}
+/** Response for computing tokens. */
+class ComputeTokensResponse {
+}
+/** Response with generated videos. */
+class GenerateVideosResponse {
+}
+/** Empty response for caches.delete method. */
+class DeleteCachedContentResponse {
+}
+class ListCachedContentsResponse {
+}
+/** Response for the list files method. */
+class ListFilesResponse {
+}
+/** A wrapper class for the http response. */
+class HttpResponse {
+ constructor(response) {
+ // Process the headers.
+ const headers = {};
+ for (const pair of response.headers.entries()) {
+ headers[pair[0]] = pair[1];
+ }
+ this.headers = headers;
+ // Keep the original response.
+ this.responseInternal = response;
+ }
+ json() {
+ return this.responseInternal.json();
+ }
+}
+/** Response for the create file method. */
+class CreateFileResponse {
+}
+/** Response for the delete file method. */
+class DeleteFileResponse {
+}
+/** Represents a single response in a replay. */
+class ReplayResponse {
+}
+/** Client generated response to a `ToolCall` received from the server.
+
+ Individual `FunctionResponse` objects are matched to the respective
+ `FunctionCall` objects by the `id` field.
+
+ Note that in the unary and server-streaming GenerateContent APIs function
+ calling happens by exchanging the `Content` parts, while in the bidi
+ GenerateContent APIs function calling happens over this dedicated set of
+ messages.
+ */
+class LiveClientToolResponse {
+}
+/** Parameters for sending tool responses to the live API. */
+class LiveSendToolResponseParameters {
+ constructor() {
+ /** Tool responses to send to the session. */
+ this.functionResponses = [];
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+class Caches extends BaseModule {
+ constructor(apiClient) {
+ super();
+ this.apiClient = apiClient;
+ /**
+ * Lists cached content configurations.
+ *
+ * @param params - The parameters for the list request.
+ * @return The paginated results of the list of cached contents.
+ *
+ * @example
+ * ```ts
+ * const cachedContents = await ai.caches.list({config: {'pageSize': 2}});
+ * for (const cachedContent of cachedContents) {
+ * console.log(cachedContent);
+ * }
+ * ```
+ */
+ this.list = async (params = {}) => {
+ return new Pager(exports.PagedItem.PAGED_ITEM_CACHED_CONTENTS, (x) => this.listInternal(x), await this.listInternal(params), params);
+ };
+ }
+ /**
+ * Creates a cached contents resource.
+ *
+ * @remarks
+ * Context caching is only supported for specific models. See [Gemini
+ * Developer API reference] (https://ai.google.dev/gemini-api/docs/caching?lang=node/context-cac)
+ * and [Vertex AI reference] (https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview#supported_models)
+ * for more information.
+ *
+ * @param params - The parameters for the create request.
+ * @return The created cached content.
+ *
+ * @example
+ * ```ts
+ * const contents = ...; // Initialize the content to cache.
+ * const response = await ai.caches.create({
+ * model: 'gemini-1.5-flash',
+ * config: {
+ * 'contents': contents,
+ * 'displayName': 'test cache',
+ * 'systemInstruction': 'What is the sum of the two pdfs?',
+ * 'ttl': '86400s',
+ * }
+ * });
+ * ```
+ */
+ async create(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = createCachedContentParametersToVertex(this.apiClient, params);
+ path = formatMap('cachedContents', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromVertex(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ const body = createCachedContentParametersToMldev(this.apiClient, params);
+ path = formatMap('cachedContents', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromMldev(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+ /**
+ * Gets cached content configurations.
+ *
+ * @param params - The parameters for the get request.
+ * @return The cached content.
+ *
+ * @example
+ * ```ts
+ * await ai.caches.get({name: 'gemini-1.5-flash'});
+ * ```
+ */
+ async get(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = getCachedContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromVertex(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ const body = getCachedContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromMldev(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+ /**
+ * Deletes cached content.
+ *
+ * @param params - The parameters for the delete request.
+ * @return The empty response returned by the API.
+ *
+ * @example
+ * ```ts
+ * await ai.caches.delete({name: 'gemini-1.5-flash'});
+ * ```
+ */
+ async delete(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = deleteCachedContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'DELETE',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then(() => {
+ const resp = deleteCachedContentResponseFromVertex();
+ const typedResp = new DeleteCachedContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = deleteCachedContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'DELETE',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then(() => {
+ const resp = deleteCachedContentResponseFromMldev();
+ const typedResp = new DeleteCachedContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ /**
+ * Updates cached content configurations.
+ *
+ * @param params - The parameters for the update request.
+ * @return The updated cached content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.caches.update({
+ * name: 'gemini-1.5-flash',
+ * config: {'ttl': '7600s'}
+ * });
+ * ```
+ */
+ async update(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = updateCachedContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'PATCH',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromVertex(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ const body = updateCachedContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{name}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'PATCH',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = cachedContentFromMldev(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+ async listInternal(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = listCachedContentsParametersToVertex(this.apiClient, params);
+ path = formatMap('cachedContents', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = listCachedContentsResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new ListCachedContentsResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = listCachedContentsParametersToMldev(this.apiClient, params);
+ path = formatMap('cachedContents', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = listCachedContentsResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new ListCachedContentsResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+}
+
+/******************************************************************************
+Copyright (c) Microsoft Corporation.
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+***************************************************************************** */
+/* global Reflect, Promise, SuppressedError, Symbol, Iterator */
+
+
+function __values(o) {
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
+ if (m) return m.call(o);
+ if (o && typeof o.length === "number") return {
+ next: function () {
+ if (o && i >= o.length) o = void 0;
+ return { value: o && o[i++], done: !o };
+ }
+ };
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
+}
+
+function __await(v) {
+ return this instanceof __await ? (this.v = v, this) : new __await(v);
+}
+
+function __asyncGenerator(thisArg, _arguments, generator) {
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
+ var g = generator.apply(thisArg, _arguments || []), i, q = [];
+ return i = Object.create((typeof AsyncIterator === "function" ? AsyncIterator : Object).prototype), verb("next"), verb("throw"), verb("return", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;
+ function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }
+ function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }
+ function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }
+ function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }
+ function fulfill(value) { resume("next", value); }
+ function reject(value) { resume("throw", value); }
+ function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }
+}
+
+function __asyncValues(o) {
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
+ var m = o[Symbol.asyncIterator], i;
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
+}
+
+typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
+ var e = new Error(message);
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
+};
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+/**
+ * Returns true if the response is valid, false otherwise.
+ */
+function isValidResponse(response) {
+ var _a;
+ if (response.candidates == undefined || response.candidates.length === 0) {
+ return false;
+ }
+ const content = (_a = response.candidates[0]) === null || _a === void 0 ? void 0 : _a.content;
+ if (content === undefined) {
+ return false;
+ }
+ return isValidContent(content);
+}
+function isValidContent(content) {
+ if (content.parts === undefined || content.parts.length === 0) {
+ return false;
+ }
+ for (const part of content.parts) {
+ if (part === undefined || Object.keys(part).length === 0) {
+ return false;
+ }
+ if (part.text !== undefined && part.text === '') {
+ return false;
+ }
+ }
+ return true;
+}
+/**
+ * Validates the history contains the correct roles.
+ *
+ * @remarks
+ * Expects the history to start with a user turn and then alternate between
+ * user and model turns.
+ *
+ * @throws Error if the history does not start with a user turn.
+ * @throws Error if the history contains an invalid role.
+ */
+function validateHistory(history) {
+ // Empty history is valid.
+ if (history.length === 0) {
+ return;
+ }
+ if (history[0].role !== 'user') {
+ throw new Error('History must start with a user turn.');
+ }
+ for (const content of history) {
+ if (content.role !== 'user' && content.role !== 'model') {
+ throw new Error(`Role must be user or model, but got ${content.role}.`);
+ }
+ }
+}
+/**
+ * Extracts the curated (valid) history from a comprehensive history.
+ *
+ * @remarks
+ * The model may sometimes generate invalid or empty contents(e.g., due to safty
+ * filters or recitation). Extracting valid turns from the history
+ * ensures that subsequent requests could be accpeted by the model.
+ */
+function extractCuratedHistory(comprehensiveHistory) {
+ if (comprehensiveHistory === undefined || comprehensiveHistory.length === 0) {
+ return [];
+ }
+ const curatedHistory = [];
+ const length = comprehensiveHistory.length;
+ let i = 0;
+ let userInput = comprehensiveHistory[0];
+ while (i < length) {
+ if (comprehensiveHistory[i].role === 'user') {
+ userInput = comprehensiveHistory[i];
+ i++;
+ }
+ else {
+ const modelOutput = [];
+ let isValid = true;
+ while (i < length && comprehensiveHistory[i].role === 'model') {
+ modelOutput.push(comprehensiveHistory[i]);
+ if (isValid && !isValidContent(comprehensiveHistory[i])) {
+ isValid = false;
+ }
+ i++;
+ }
+ if (isValid) {
+ curatedHistory.push(userInput);
+ curatedHistory.push(...modelOutput);
+ }
+ }
+ }
+ return curatedHistory;
+}
+/**
+ * A utility class to create a chat session.
+ */
+class Chats {
+ constructor(modelsModule, apiClient) {
+ this.modelsModule = modelsModule;
+ this.apiClient = apiClient;
+ }
+ /**
+ * Creates a new chat session.
+ *
+ * @remarks
+ * The config in the params will be used for all requests within the chat
+ * session unless overridden by a per-request `config` in
+ * @see {@link types.SendMessageParameters#config}.
+ *
+ * @param params - Parameters for creating a chat session.
+ * @returns A new chat session.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({
+ * model: 'gemini-2.0-flash'
+ * config: {
+ * temperature: 0.5,
+ * maxOutputTokens: 1024,
+ * }
+ * });
+ * ```
+ */
+ create(params) {
+ return new Chat(this.apiClient, this.modelsModule, params.model, params.config, params.history);
+ }
+}
+/**
+ * Chat session that enables sending messages to the model with previous
+ * conversation context.
+ *
+ * @remarks
+ * The session maintains all the turns between user and model.
+ */
+class Chat {
+ constructor(apiClient, modelsModule, model, config = {}, history = []) {
+ this.apiClient = apiClient;
+ this.modelsModule = modelsModule;
+ this.model = model;
+ this.config = config;
+ this.history = history;
+ // A promise to represent the current state of the message being sent to the
+ // model.
+ this.sendPromise = Promise.resolve();
+ validateHistory(history);
+ }
+ /**
+ * Sends a message to the model and returns the response.
+ *
+ * @remarks
+ * This method will wait for the previous message to be processed before
+ * sending the next message.
+ *
+ * @see {@link Chat#sendMessageStream} for streaming method.
+ * @param params - parameters for sending messages within a chat session.
+ * @returns The model's response.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
+ * const response = await chat.sendMessage({
+ * message: 'Why is the sky blue?'
+ * });
+ * console.log(response.text);
+ * ```
+ */
+ async sendMessage(params) {
+ var _a;
+ await this.sendPromise;
+ const inputContent = tContent(this.apiClient, params.message);
+ const responsePromise = this.modelsModule.generateContent({
+ model: this.model,
+ contents: this.getHistory(true).concat(inputContent),
+ config: (_a = params.config) !== null && _a !== void 0 ? _a : this.config,
+ });
+ this.sendPromise = (async () => {
+ var _a, _b;
+ const response = await responsePromise;
+ const outputContent = (_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content;
+ const modelOutput = outputContent ? [outputContent] : [];
+ this.recordHistory(inputContent, modelOutput);
+ return;
+ })();
+ await this.sendPromise;
+ return responsePromise;
+ }
+ /**
+ * Sends a message to the model and returns the response in chunks.
+ *
+ * @remarks
+ * This method will wait for the previous message to be processed before
+ * sending the next message.
+ *
+ * @see {@link Chat#sendMessage} for non-streaming method.
+ * @param params - parameters for sending the message.
+ * @return The model's response.
+ *
+ * @example
+ * ```ts
+ * const chat = ai.chats.create({model: 'gemini-2.0-flash'});
+ * const response = await chat.sendMessageStream({
+ * message: 'Why is the sky blue?'
+ * });
+ * for await (const chunk of response) {
+ * console.log(chunk.text);
+ * }
+ * ```
+ */
+ async sendMessageStream(params) {
+ var _a;
+ await this.sendPromise;
+ const inputContent = tContent(this.apiClient, params.message);
+ const streamResponse = this.modelsModule.generateContentStream({
+ model: this.model,
+ contents: this.getHistory(true).concat(inputContent),
+ config: (_a = params.config) !== null && _a !== void 0 ? _a : this.config,
+ });
+ this.sendPromise = streamResponse.then(() => undefined);
+ const response = await streamResponse;
+ const result = this.processStreamResponse(response, inputContent);
+ return result;
+ }
+ /**
+ * Returns the chat history.
+ *
+ * @remarks
+ * The history is a list of contents alternating between user and model.
+ *
+ * There are two types of history:
+ * - The `curated history` contains only the valid turns between user and
+ * model, which will be included in the subsequent requests sent to the model.
+ * - The `comprehensive history` contains all turns, including invalid or
+ * empty model outputs, providing a complete record of the history.
+ *
+ * The history is updated after receiving the response from the model,
+ * for streaming response, it means receiving the last chunk of the response.
+ *
+ * The `comprehensive history` is returned by default. To get the `curated
+ * history`, set the `curated` parameter to `true`.
+ *
+ * @param curated - whether to return the curated history or the comprehensive
+ * history.
+ * @return History contents alternating between user and model for the entire
+ * chat session.
+ */
+ getHistory(curated = false) {
+ return curated ? extractCuratedHistory(this.history) : this.history;
+ }
+ processStreamResponse(streamResponse, inputContent) {
+ var _a, _b;
+ return __asyncGenerator(this, arguments, function* processStreamResponse_1() {
+ var _c, e_1, _d, _e;
+ const outputContent = [];
+ try {
+ for (var _f = true, streamResponse_1 = __asyncValues(streamResponse), streamResponse_1_1; streamResponse_1_1 = yield __await(streamResponse_1.next()), _c = streamResponse_1_1.done, !_c; _f = true) {
+ _e = streamResponse_1_1.value;
+ _f = false;
+ const chunk = _e;
+ if (isValidResponse(chunk)) {
+ const content = (_b = (_a = chunk.candidates) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.content;
+ if (content !== undefined) {
+ outputContent.push(content);
+ }
+ }
+ yield yield __await(chunk);
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (!_f && !_c && (_d = streamResponse_1.return)) yield __await(_d.call(streamResponse_1));
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ this.recordHistory(inputContent, outputContent);
+ });
+ }
+ recordHistory(userInput, modelOutput) {
+ let outputContents = [];
+ if (modelOutput.length > 0 &&
+ modelOutput.every((content) => content.role === 'model')) {
+ outputContents = modelOutput;
+ }
+ else {
+ // Appends an empty content when model returns empty response, so that the
+ // history is always alternating between user and model.
+ outputContents.push({
+ role: 'model',
+ parts: [],
+ });
+ }
+ this.history.push(userInput);
+ this.history.push(...outputContents);
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+const CONTENT_TYPE_HEADER = 'Content-Type';
+const USER_AGENT_HEADER = 'User-Agent';
+const GOOGLE_API_CLIENT_HEADER = 'x-goog-api-client';
+const SDK_VERSION = '0.7.0'; // x-release-please-version
+const LIBRARY_LABEL = `google-genai-sdk/${SDK_VERSION}`;
+const VERTEX_AI_API_DEFAULT_VERSION = 'v1beta1';
+const GOOGLE_AI_API_DEFAULT_VERSION = 'v1beta';
+const responseLineRE = /^data: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
+/**
+ * Client errors raised by the GenAI API.
+ */
+class ClientError extends Error {
+ constructor(message, stackTrace) {
+ if (stackTrace) {
+ super(message, { cause: stackTrace });
+ }
+ else {
+ super(message, { cause: new Error().stack });
+ }
+ this.message = message;
+ this.name = 'ClientError';
+ }
+}
+/**
+ * Server errors raised by the GenAI API.
+ */
+class ServerError extends Error {
+ constructor(message, stackTrace) {
+ if (stackTrace) {
+ super(message, { cause: stackTrace });
+ }
+ else {
+ super(message, { cause: new Error().stack });
+ }
+ this.message = message;
+ this.name = 'ServerError';
+ }
+}
+/**
+ * The ApiClient class is used to send requests to the Gemini API or Vertex AI
+ * endpoints.
+ */
+class ApiClient {
+ constructor(opts) {
+ var _a, _b;
+ this.clientOptions = Object.assign(Object.assign({}, opts), { project: opts.project, location: opts.location, apiKey: opts.apiKey, vertexai: opts.vertexai });
+ const initHttpOptions = {};
+ if (this.clientOptions.vertexai) {
+ initHttpOptions.apiVersion =
+ (_a = this.clientOptions.apiVersion) !== null && _a !== void 0 ? _a : VERTEX_AI_API_DEFAULT_VERSION;
+ // Assume that proj/api key validation occurs before they are passed in.
+ if (this.getProject() || this.getLocation()) {
+ initHttpOptions.baseUrl = `https://${this.clientOptions.location}-aiplatform.googleapis.com/`;
+ this.clientOptions.apiKey = undefined; // unset API key.
+ }
+ else {
+ initHttpOptions.baseUrl = `https://aiplatform.googleapis.com/`;
+ this.clientOptions.project = undefined; // unset project.
+ this.clientOptions.location = undefined; // unset location.
+ }
+ }
+ else {
+ initHttpOptions.apiVersion =
+ (_b = this.clientOptions.apiVersion) !== null && _b !== void 0 ? _b : GOOGLE_AI_API_DEFAULT_VERSION;
+ initHttpOptions.baseUrl = `https://generativelanguage.googleapis.com/`;
+ }
+ initHttpOptions.headers = this.getDefaultHeaders();
+ this.clientOptions.httpOptions = initHttpOptions;
+ if (opts.httpOptions) {
+ this.clientOptions.httpOptions = this.patchHttpOptions(initHttpOptions, opts.httpOptions);
+ }
+ }
+ isVertexAI() {
+ var _a;
+ return (_a = this.clientOptions.vertexai) !== null && _a !== void 0 ? _a : false;
+ }
+ getProject() {
+ return this.clientOptions.project;
+ }
+ getLocation() {
+ return this.clientOptions.location;
+ }
+ getApiVersion() {
+ if (this.clientOptions.httpOptions &&
+ this.clientOptions.httpOptions.apiVersion !== undefined) {
+ return this.clientOptions.httpOptions.apiVersion;
+ }
+ throw new Error('API version is not set.');
+ }
+ getBaseUrl() {
+ if (this.clientOptions.httpOptions &&
+ this.clientOptions.httpOptions.baseUrl !== undefined) {
+ return this.clientOptions.httpOptions.baseUrl;
+ }
+ throw new Error('Base URL is not set.');
+ }
+ getRequestUrl() {
+ return this.getRequestUrlInternal(this.clientOptions.httpOptions);
+ }
+ getHeaders() {
+ if (this.clientOptions.httpOptions &&
+ this.clientOptions.httpOptions.headers !== undefined) {
+ return this.clientOptions.httpOptions.headers;
+ }
+ else {
+ throw new Error('Headers are not set.');
+ }
+ }
+ getRequestUrlInternal(httpOptions) {
+ if (!httpOptions ||
+ httpOptions.baseUrl === undefined ||
+ httpOptions.apiVersion === undefined) {
+ throw new Error('HTTP options are not correctly set.');
+ }
+ const baseUrl = httpOptions.baseUrl.endsWith('/')
+ ? httpOptions.baseUrl.slice(0, -1)
+ : httpOptions.baseUrl;
+ const urlElement = [baseUrl];
+ if (httpOptions.apiVersion && httpOptions.apiVersion !== '') {
+ urlElement.push(httpOptions.apiVersion);
+ }
+ return urlElement.join('/');
+ }
+ getBaseResourcePath() {
+ return `projects/${this.clientOptions.project}/locations/${this.clientOptions.location}`;
+ }
+ getApiKey() {
+ return this.clientOptions.apiKey;
+ }
+ getWebsocketBaseUrl() {
+ const baseUrl = this.getBaseUrl();
+ const urlParts = new URL(baseUrl);
+ urlParts.protocol = 'wss';
+ return urlParts.toString();
+ }
+ setBaseUrl(url) {
+ if (this.clientOptions.httpOptions) {
+ this.clientOptions.httpOptions.baseUrl = url;
+ }
+ else {
+ throw new Error('HTTP options are not correctly set.');
+ }
+ }
+ constructUrl(path, httpOptions) {
+ const urlElement = [this.getRequestUrlInternal(httpOptions)];
+ if (this.clientOptions.vertexai &&
+ !this.clientOptions.apiKey &&
+ !path.startsWith('projects/')) {
+ urlElement.push(this.getBaseResourcePath());
+ }
+ if (path !== '') {
+ urlElement.push(path);
+ }
+ const url = new URL(`${urlElement.join('/')}`);
+ return url;
+ }
+ async request(request) {
+ let patchedHttpOptions = this.clientOptions.httpOptions;
+ if (request.httpOptions) {
+ patchedHttpOptions = this.patchHttpOptions(this.clientOptions.httpOptions, request.httpOptions);
+ }
+ const url = this.constructUrl(request.path, patchedHttpOptions);
+ if (request.queryParams) {
+ for (const [key, value] of Object.entries(request.queryParams)) {
+ url.searchParams.append(key, String(value));
+ }
+ }
+ let requestInit = {};
+ if (request.httpMethod === 'GET') {
+ if (request.body && request.body !== '{}') {
+ throw new Error('Request body should be empty for GET request, but got non empty request body');
+ }
+ }
+ else {
+ requestInit.body = request.body;
+ }
+ requestInit = await this.includeExtraHttpOptionsToRequestInit(requestInit, patchedHttpOptions);
+ return this.unaryApiCall(url, requestInit, request.httpMethod);
+ }
+ patchHttpOptions(baseHttpOptions, requestHttpOptions) {
+ const patchedHttpOptions = JSON.parse(JSON.stringify(baseHttpOptions));
+ for (const [key, value] of Object.entries(requestHttpOptions)) {
+ // Records compile to objects.
+ if (typeof value === 'object') {
+ // @ts-expect-error TS2345TS7053: Element implicitly has an 'any' type
+ // because expression of type 'string' can't be used to index type
+ // 'HttpOptions'.
+ patchedHttpOptions[key] = Object.assign(Object.assign({}, patchedHttpOptions[key]), value);
+ }
+ else if (value !== undefined) {
+ // @ts-expect-error TS2345TS7053: Element implicitly has an 'any' type
+ // because expression of type 'string' can't be used to index type
+ // 'HttpOptions'.
+ patchedHttpOptions[key] = value;
+ }
+ }
+ return patchedHttpOptions;
+ }
+ async requestStream(request) {
+ let patchedHttpOptions = this.clientOptions.httpOptions;
+ if (request.httpOptions) {
+ patchedHttpOptions = this.patchHttpOptions(this.clientOptions.httpOptions, request.httpOptions);
+ }
+ const url = this.constructUrl(request.path, patchedHttpOptions);
+ if (!url.searchParams.has('alt') || url.searchParams.get('alt') !== 'sse') {
+ url.searchParams.set('alt', 'sse');
+ }
+ let requestInit = {};
+ requestInit.body = request.body;
+ requestInit = await this.includeExtraHttpOptionsToRequestInit(requestInit, patchedHttpOptions);
+ return this.streamApiCall(url, requestInit, request.httpMethod);
+ }
+ async includeExtraHttpOptionsToRequestInit(requestInit, httpOptions) {
+ if (httpOptions && httpOptions.timeout && httpOptions.timeout > 0) {
+ const abortController = new AbortController();
+ const signal = abortController.signal;
+ setTimeout(() => abortController.abort(), httpOptions.timeout);
+ requestInit.signal = signal;
+ }
+ requestInit.headers = await this.getHeadersInternal(httpOptions);
+ return requestInit;
+ }
+ async unaryApiCall(url, requestInit, httpMethod) {
+ return this.apiCall(url.toString(), Object.assign(Object.assign({}, requestInit), { method: httpMethod }))
+ .then(async (response) => {
+ await throwErrorIfNotOK(response);
+ return new HttpResponse(response);
+ })
+ .catch((e) => {
+ if (e instanceof Error) {
+ throw e;
+ }
+ else {
+ throw new Error(JSON.stringify(e));
+ }
+ });
+ }
+ async streamApiCall(url, requestInit, httpMethod) {
+ return this.apiCall(url.toString(), Object.assign(Object.assign({}, requestInit), { method: httpMethod }))
+ .then(async (response) => {
+ await throwErrorIfNotOK(response);
+ return this.processStreamResponse(response);
+ })
+ .catch((e) => {
+ if (e instanceof Error) {
+ throw e;
+ }
+ else {
+ throw new Error(JSON.stringify(e));
+ }
+ });
+ }
+ processStreamResponse(response) {
+ var _a;
+ return __asyncGenerator(this, arguments, function* processStreamResponse_1() {
+ const reader = (_a = response === null || response === void 0 ? void 0 : response.body) === null || _a === void 0 ? void 0 : _a.getReader();
+ const decoder = new TextDecoder('utf-8');
+ if (!reader) {
+ throw new Error('Response body is empty');
+ }
+ try {
+ let buffer = '';
+ while (true) {
+ const { done, value } = yield __await(reader.read());
+ if (done) {
+ if (buffer.trim().length > 0) {
+ throw new Error('Incomplete JSON segment at the end');
+ }
+ break;
+ }
+ const chunkString = decoder.decode(value);
+ buffer += chunkString;
+ let match = buffer.match(responseLineRE);
+ while (match) {
+ const processedChunkString = match[1];
+ try {
+ const chunkData = JSON.parse(processedChunkString);
+ yield yield __await(chunkData);
+ buffer = buffer.slice(match[0].length);
+ match = buffer.match(responseLineRE);
+ }
+ catch (e) {
+ throw new Error(`exception parsing stream chunk ${processedChunkString}. ${e}`);
+ }
+ }
+ }
+ }
+ finally {
+ reader.releaseLock();
+ }
+ });
+ }
+ async apiCall(url, requestInit) {
+ return fetch(url, requestInit).catch((e) => {
+ throw new Error(`exception ${e} sending request`);
+ });
+ }
+ getDefaultHeaders() {
+ const headers = {};
+ const versionHeaderValue = LIBRARY_LABEL + ' ' + this.clientOptions.userAgentExtra;
+ headers[USER_AGENT_HEADER] = versionHeaderValue;
+ headers[GOOGLE_API_CLIENT_HEADER] = versionHeaderValue;
+ headers[CONTENT_TYPE_HEADER] = 'application/json';
+ return headers;
+ }
+ async getHeadersInternal(httpOptions) {
+ const headers = new Headers();
+ if (httpOptions && httpOptions.headers) {
+ for (const [key, value] of Object.entries(httpOptions.headers)) {
+ headers.append(key, value);
+ }
+ }
+ await this.clientOptions.auth.addAuthHeaders(headers);
+ return headers;
+ }
+ /**
+ * Uploads a file asynchronously using Gemini API only, this is not supported
+ * in Vertex AI.
+ *
+ * @param file The string path to the file to be uploaded or a Blob object.
+ * @param config Optional parameters specified in the `UploadFileConfig`
+ * interface. @see {@link UploadFileConfig}
+ * @return A promise that resolves to a `File` object.
+ * @throws An error if called on a Vertex AI client.
+ * @throws An error if the `mimeType` is not provided and can not be inferred,
+ */
+ async uploadFile(file, config) {
+ var _a;
+ const fileToUpload = {};
+ if (config != null) {
+ fileToUpload.mimeType = config.mimeType;
+ fileToUpload.name = config.name;
+ fileToUpload.displayName = config.displayName;
+ }
+ if (fileToUpload.name && !fileToUpload.name.startsWith('files/')) {
+ fileToUpload.name = `files/${fileToUpload.name}`;
+ }
+ const uploader = this.clientOptions.uploader;
+ const fileStat = await uploader.stat(file);
+ fileToUpload.sizeBytes = String(fileStat.size);
+ const mimeType = (_a = config === null || config === void 0 ? void 0 : config.mimeType) !== null && _a !== void 0 ? _a : fileStat.type;
+ if (mimeType === undefined || mimeType === '') {
+ throw new Error('Can not determine mimeType. Please provide mimeType in the config.');
+ }
+ fileToUpload.mimeType = mimeType;
+ const uploadUrl = await this.fetchUploadUrl(fileToUpload, config);
+ return uploader.upload(file, uploadUrl, this);
+ }
+ async fetchUploadUrl(file, config) {
+ var _a;
+ let httpOptions = {};
+ if (config === null || config === void 0 ? void 0 : config.httpOptions) {
+ httpOptions = config.httpOptions;
+ }
+ else {
+ httpOptions = {
+ apiVersion: '',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-Goog-Upload-Protocol': 'resumable',
+ 'X-Goog-Upload-Command': 'start',
+ 'X-Goog-Upload-Header-Content-Length': `${file.sizeBytes}`,
+ 'X-Goog-Upload-Header-Content-Type': `${file.mimeType}`,
+ },
+ };
+ }
+ const body = {
+ 'file': file,
+ };
+ const httpResponse = await this.request({
+ path: formatMap('upload/v1beta/files', body['_url']),
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions,
+ });
+ if (!httpResponse || !(httpResponse === null || httpResponse === void 0 ? void 0 : httpResponse.headers)) {
+ throw new Error('Server did not return an HttpResponse or the returned HttpResponse did not have headers.');
+ }
+ const uploadUrl = (_a = httpResponse === null || httpResponse === void 0 ? void 0 : httpResponse.headers) === null || _a === void 0 ? void 0 : _a['x-goog-upload-url'];
+ if (uploadUrl === undefined) {
+ throw new Error('Failed to get upload url. Server did not return the x-google-upload-url in the headers');
+ }
+ return uploadUrl;
+ }
+}
+async function throwErrorIfNotOK(response) {
+ var _a;
+ if (response === undefined) {
+ throw new ServerError('response is undefined');
+ }
+ if (!response.ok) {
+ const status = response.status;
+ const statusText = response.statusText;
+ let errorBody;
+ if ((_a = response.headers.get('content-type')) === null || _a === void 0 ? void 0 : _a.includes('application/json')) {
+ errorBody = await response.json();
+ }
+ else {
+ errorBody = {
+ error: {
+ message: 'exception parsing response',
+ code: response.status,
+ status: response.statusText,
+ },
+ };
+ }
+ const errorMessage = `got status: ${status} ${statusText}. ${JSON.stringify(errorBody)}`;
+ if (status >= 400 && status < 500) {
+ const clientError = new ClientError(errorMessage);
+ throw clientError;
+ }
+ else if (status >= 500 && status < 600) {
+ const serverError = new ServerError(errorMessage);
+ throw serverError;
+ }
+ throw new Error(errorMessage);
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function crossError() {
+ // TODO(b/399934880): this message needs a link to a help page explaining how to enable conditional exports
+ return new Error(`This feature requires the web or Node specific @google/genai implementation, you can fix this by either:
+
+*Enabling conditional exports for your project [recommended]*
+
+*Using a platform specific import* - Make sure your code imports either '@google/genai/web' or '@google/genai/node' instead of '@google/genai'.
+`);
+}
+
+const MAX_CHUNK_SIZE = 1024 * 1024 * 8; // bytes
+class CrossUploader {
+ async upload(file, uploadUrl, apiClient) {
+ if (typeof file === 'string') {
+ throw crossError();
+ }
+ else {
+ return uploadBlob(file, uploadUrl, apiClient);
+ }
+ }
+ async stat(file) {
+ if (typeof file === 'string') {
+ throw crossError();
+ }
+ else {
+ return getBlobStat(file);
+ }
+ }
+}
+async function uploadBlob(file, uploadUrl, apiClient) {
+ var _a, _b;
+ let fileSize = 0;
+ let offset = 0;
+ let response = new HttpResponse(new Response());
+ let uploadCommand = 'upload';
+ fileSize = file.size;
+ while (offset < fileSize) {
+ const chunkSize = Math.min(MAX_CHUNK_SIZE, fileSize - offset);
+ const chunk = file.slice(offset, offset + chunkSize);
+ if (offset + chunkSize >= fileSize) {
+ uploadCommand += ', finalize';
+ }
+ response = await apiClient.request({
+ path: '',
+ body: chunk,
+ httpMethod: 'POST',
+ httpOptions: {
+ apiVersion: '',
+ baseUrl: uploadUrl,
+ headers: {
+ 'X-Goog-Upload-Command': uploadCommand,
+ 'X-Goog-Upload-Offset': String(offset),
+ 'Content-Length': String(chunkSize),
+ },
+ },
+ });
+ offset += chunkSize;
+ // The `x-goog-upload-status` header field can be `active`, `final` and
+ //`cancelled` in resposne.
+ if (((_a = response === null || response === void 0 ? void 0 : response.headers) === null || _a === void 0 ? void 0 : _a['x-goog-upload-status']) !== 'active') {
+ break;
+ }
+ // TODO(b/401391430) Investigate why the upload status is not finalized
+ // even though all content has been uploaded.
+ if (fileSize <= offset) {
+ throw new Error('All content has been uploaded, but the upload status is not finalized.');
+ }
+ }
+ const responseJson = (await (response === null || response === void 0 ? void 0 : response.json()));
+ if (((_b = response === null || response === void 0 ? void 0 : response.headers) === null || _b === void 0 ? void 0 : _b['x-goog-upload-status']) !== 'final') {
+ throw new Error('Failed to upload file: Upload status is not finalized.');
+ }
+ return responseJson['file'];
+}
+async function getBlobStat(file) {
+ const fileStat = { size: file.size, type: file.type };
+ return fileStat;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+// TODO((b/401271082): re-enable lint once CrossWebSocketFactory is implemented.
+/* eslint-disable @typescript-eslint/no-unused-vars */
+class CrossWebSocketFactory {
+ create(url, headers, callbacks) {
+ throw crossError();
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function listFilesConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromPageSize = getValueByPath(fromObject, ['pageSize']);
+ if (parentObject !== undefined && fromPageSize != null) {
+ setValueByPath(parentObject, ['_query', 'pageSize'], fromPageSize);
+ }
+ const fromPageToken = getValueByPath(fromObject, ['pageToken']);
+ if (parentObject !== undefined && fromPageToken != null) {
+ setValueByPath(parentObject, ['_query', 'pageToken'], fromPageToken);
+ }
+ return toObject;
+}
+function listFilesParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], listFilesConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function fileStatusToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromDetails = getValueByPath(fromObject, ['details']);
+ if (fromDetails != null) {
+ setValueByPath(toObject, ['details'], fromDetails);
+ }
+ const fromMessage = getValueByPath(fromObject, ['message']);
+ if (fromMessage != null) {
+ setValueByPath(toObject, ['message'], fromMessage);
+ }
+ const fromCode = getValueByPath(fromObject, ['code']);
+ if (fromCode != null) {
+ setValueByPath(toObject, ['code'], fromCode);
+ }
+ return toObject;
+}
+function fileToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (fromDisplayName != null) {
+ setValueByPath(toObject, ['displayName'], fromDisplayName);
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ const fromSizeBytes = getValueByPath(fromObject, ['sizeBytes']);
+ if (fromSizeBytes != null) {
+ setValueByPath(toObject, ['sizeBytes'], fromSizeBytes);
+ }
+ const fromCreateTime = getValueByPath(fromObject, ['createTime']);
+ if (fromCreateTime != null) {
+ setValueByPath(toObject, ['createTime'], fromCreateTime);
+ }
+ const fromExpirationTime = getValueByPath(fromObject, [
+ 'expirationTime',
+ ]);
+ if (fromExpirationTime != null) {
+ setValueByPath(toObject, ['expirationTime'], fromExpirationTime);
+ }
+ const fromUpdateTime = getValueByPath(fromObject, ['updateTime']);
+ if (fromUpdateTime != null) {
+ setValueByPath(toObject, ['updateTime'], fromUpdateTime);
+ }
+ const fromSha256Hash = getValueByPath(fromObject, ['sha256Hash']);
+ if (fromSha256Hash != null) {
+ setValueByPath(toObject, ['sha256Hash'], fromSha256Hash);
+ }
+ const fromUri = getValueByPath(fromObject, ['uri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromDownloadUri = getValueByPath(fromObject, ['downloadUri']);
+ if (fromDownloadUri != null) {
+ setValueByPath(toObject, ['downloadUri'], fromDownloadUri);
+ }
+ const fromState = getValueByPath(fromObject, ['state']);
+ if (fromState != null) {
+ setValueByPath(toObject, ['state'], fromState);
+ }
+ const fromSource = getValueByPath(fromObject, ['source']);
+ if (fromSource != null) {
+ setValueByPath(toObject, ['source'], fromSource);
+ }
+ const fromVideoMetadata = getValueByPath(fromObject, [
+ 'videoMetadata',
+ ]);
+ if (fromVideoMetadata != null) {
+ setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fileStatusToMldev(apiClient, fromError));
+ }
+ return toObject;
+}
+function createFileParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromFile = getValueByPath(fromObject, ['file']);
+ if (fromFile != null) {
+ setValueByPath(toObject, ['file'], fileToMldev(apiClient, fromFile));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function getFileParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'file'], tFileName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function deleteFileParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['_url', 'file'], tFileName(apiClient, fromName));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function fileStatusFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromDetails = getValueByPath(fromObject, ['details']);
+ if (fromDetails != null) {
+ setValueByPath(toObject, ['details'], fromDetails);
+ }
+ const fromMessage = getValueByPath(fromObject, ['message']);
+ if (fromMessage != null) {
+ setValueByPath(toObject, ['message'], fromMessage);
+ }
+ const fromCode = getValueByPath(fromObject, ['code']);
+ if (fromCode != null) {
+ setValueByPath(toObject, ['code'], fromCode);
+ }
+ return toObject;
+}
+function fileFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromDisplayName = getValueByPath(fromObject, ['displayName']);
+ if (fromDisplayName != null) {
+ setValueByPath(toObject, ['displayName'], fromDisplayName);
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ const fromSizeBytes = getValueByPath(fromObject, ['sizeBytes']);
+ if (fromSizeBytes != null) {
+ setValueByPath(toObject, ['sizeBytes'], fromSizeBytes);
+ }
+ const fromCreateTime = getValueByPath(fromObject, ['createTime']);
+ if (fromCreateTime != null) {
+ setValueByPath(toObject, ['createTime'], fromCreateTime);
+ }
+ const fromExpirationTime = getValueByPath(fromObject, [
+ 'expirationTime',
+ ]);
+ if (fromExpirationTime != null) {
+ setValueByPath(toObject, ['expirationTime'], fromExpirationTime);
+ }
+ const fromUpdateTime = getValueByPath(fromObject, ['updateTime']);
+ if (fromUpdateTime != null) {
+ setValueByPath(toObject, ['updateTime'], fromUpdateTime);
+ }
+ const fromSha256Hash = getValueByPath(fromObject, ['sha256Hash']);
+ if (fromSha256Hash != null) {
+ setValueByPath(toObject, ['sha256Hash'], fromSha256Hash);
+ }
+ const fromUri = getValueByPath(fromObject, ['uri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromDownloadUri = getValueByPath(fromObject, ['downloadUri']);
+ if (fromDownloadUri != null) {
+ setValueByPath(toObject, ['downloadUri'], fromDownloadUri);
+ }
+ const fromState = getValueByPath(fromObject, ['state']);
+ if (fromState != null) {
+ setValueByPath(toObject, ['state'], fromState);
+ }
+ const fromSource = getValueByPath(fromObject, ['source']);
+ if (fromSource != null) {
+ setValueByPath(toObject, ['source'], fromSource);
+ }
+ const fromVideoMetadata = getValueByPath(fromObject, [
+ 'videoMetadata',
+ ]);
+ if (fromVideoMetadata != null) {
+ setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fileStatusFromMldev(apiClient, fromError));
+ }
+ return toObject;
+}
+function listFilesResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromNextPageToken = getValueByPath(fromObject, [
+ 'nextPageToken',
+ ]);
+ if (fromNextPageToken != null) {
+ setValueByPath(toObject, ['nextPageToken'], fromNextPageToken);
+ }
+ const fromFiles = getValueByPath(fromObject, ['files']);
+ if (fromFiles != null) {
+ if (Array.isArray(fromFiles)) {
+ setValueByPath(toObject, ['files'], fromFiles.map((item) => {
+ return fileFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['files'], fromFiles);
+ }
+ }
+ return toObject;
+}
+function createFileResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromHttpHeaders = getValueByPath(fromObject, ['httpHeaders']);
+ if (fromHttpHeaders != null) {
+ setValueByPath(toObject, ['httpHeaders'], fromHttpHeaders);
+ }
+ return toObject;
+}
+function deleteFileResponseFromMldev() {
+ const toObject = {};
+ return toObject;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+class Files extends BaseModule {
+ constructor(apiClient) {
+ super();
+ this.apiClient = apiClient;
+ /**
+ * Lists all current project files from the service.
+ *
+ * @param params - The parameters for the list request
+ * @return The paginated results of the list of files
+ *
+ * @example
+ * The following code prints the names of all files from the service, the
+ * size of each page is 10.
+ *
+ * ```ts
+ * const listResponse = await ai.files.list({config: {'pageSize': 10}});
+ * for await (const file of listResponse) {
+ * console.log(file.name);
+ * }
+ * ```
+ */
+ this.list = async (params = {}) => {
+ return new Pager(exports.PagedItem.PAGED_ITEM_FILES, (x) => this.listInternal(x), await this.listInternal(params), params);
+ };
+ }
+ /**
+ * Uploads a file asynchronously to the Gemini API.
+ * This method is not available in Vertex AI.
+ * Supported upload sources:
+ * - Node.js: File path (string) or Blob object.
+ * - Browser: Blob object (e.g., File).
+ *
+ * @remarks
+ * The `mimeType` can be specified in the `config` parameter. If omitted:
+ * - For file path (string) inputs, the `mimeType` will be inferred from the
+ * file extension.
+ * - For Blob object inputs, the `mimeType` will be set to the Blob's `type`
+ * property.
+ * Somex eamples for file extension to mimeType mapping:
+ * .txt -> text/plain
+ * .json -> application/json
+ * .jpg -> image/jpeg
+ * .png -> image/png
+ * .mp3 -> audio/mpeg
+ * .mp4 -> video/mp4
+ *
+ * This section can contain multiple paragraphs and code examples.
+ *
+ * @param params - Optional parameters specified in the
+ * `common.UploadFileParameters` interface.
+ * @return A promise that resolves to a `types.File` object.
+ * @throws An error if called on a Vertex AI client.
+ * @throws An error if the `mimeType` is not provided and can not be inferred,
+ * the `mimeType` can be provided in the `params.config` parameter.
+ * @throws An error occurs if a suitable upload location cannot be established.
+ *
+ * @example
+ * The following code uploads a file to Gemini API.
+ *
+ * ```ts
+ * const file = await ai.files.upload({file: 'file.txt', config: {
+ * mimeType: 'text/plain',
+ * }});
+ * console.log(file.name);
+ * ```
+ */
+ async upload(params) {
+ if (this.apiClient.isVertexAI()) {
+ throw new Error('Vertex AI does not support uploading files. You can share files through a GCS bucket.');
+ }
+ return this.apiClient
+ .uploadFile(params.file, params.config)
+ .then((response) => {
+ const file = fileFromMldev(this.apiClient, response);
+ return file;
+ });
+ }
+ async listInternal(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ throw new Error('This method is only supported by the Gemini Developer API.');
+ }
+ else {
+ const body = listFilesParametersToMldev(this.apiClient, params);
+ path = formatMap('files', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = listFilesResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new ListFilesResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ async createInternal(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ throw new Error('This method is only supported by the Gemini Developer API.');
+ }
+ else {
+ const body = createFileParametersToMldev(this.apiClient, params);
+ path = formatMap('upload/v1beta/files', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = createFileResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new CreateFileResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ /**
+ * Retrieves the file information from the service.
+ *
+ * @param params - The parameters for the get request
+ * @return The Promise that resolves to the types.File object requested.
+ *
+ * @example
+ * ```ts
+ * const config: GetFileParameters = {
+ * name: fileName,
+ * };
+ * file = await ai.files.get(config);
+ * console.log(file.name);
+ * ```
+ */
+ async get(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ throw new Error('This method is only supported by the Gemini Developer API.');
+ }
+ else {
+ const body = getFileParametersToMldev(this.apiClient, params);
+ path = formatMap('files/{file}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = fileFromMldev(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+ /**
+ * Deletes a remotely stored file.
+ *
+ * @param params - The parameters for the delete request.
+ * @return The DeleteFileResponse, the response for the delete method.
+ *
+ * @example
+ * The following code deletes an example file named "files/mehozpxf877d".
+ *
+ * ```ts
+ * await ai.files.delete({name: file.name});
+ * ```
+ */
+ async delete(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ throw new Error('This method is only supported by the Gemini Developer API.');
+ }
+ else {
+ const body = deleteFileParametersToMldev(this.apiClient, params);
+ path = formatMap('files/{file}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'DELETE',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then(() => {
+ const resp = deleteFileResponseFromMldev();
+ const typedResp = new DeleteFileResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function partToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['videoMetadata']) !== undefined) {
+ throw new Error('videoMetadata parameter is not supported in Gemini API.');
+ }
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partToMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function schemaToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['example']) !== undefined) {
+ throw new Error('example parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['pattern']) !== undefined) {
+ throw new Error('pattern parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['default']) !== undefined) {
+ throw new Error('default parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['maxLength']) !== undefined) {
+ throw new Error('maxLength parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['minLength']) !== undefined) {
+ throw new Error('minLength parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['minProperties']) !== undefined) {
+ throw new Error('minProperties parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['maxProperties']) !== undefined) {
+ throw new Error('maxProperties parameter is not supported in Gemini API.');
+ }
+ const fromAnyOf = getValueByPath(fromObject, ['anyOf']);
+ if (fromAnyOf != null) {
+ setValueByPath(toObject, ['anyOf'], fromAnyOf);
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromEnum = getValueByPath(fromObject, ['enum']);
+ if (fromEnum != null) {
+ setValueByPath(toObject, ['enum'], fromEnum);
+ }
+ const fromFormat = getValueByPath(fromObject, ['format']);
+ if (fromFormat != null) {
+ setValueByPath(toObject, ['format'], fromFormat);
+ }
+ const fromItems = getValueByPath(fromObject, ['items']);
+ if (fromItems != null) {
+ setValueByPath(toObject, ['items'], fromItems);
+ }
+ const fromMaxItems = getValueByPath(fromObject, ['maxItems']);
+ if (fromMaxItems != null) {
+ setValueByPath(toObject, ['maxItems'], fromMaxItems);
+ }
+ const fromMaximum = getValueByPath(fromObject, ['maximum']);
+ if (fromMaximum != null) {
+ setValueByPath(toObject, ['maximum'], fromMaximum);
+ }
+ const fromMinItems = getValueByPath(fromObject, ['minItems']);
+ if (fromMinItems != null) {
+ setValueByPath(toObject, ['minItems'], fromMinItems);
+ }
+ const fromMinimum = getValueByPath(fromObject, ['minimum']);
+ if (fromMinimum != null) {
+ setValueByPath(toObject, ['minimum'], fromMinimum);
+ }
+ const fromNullable = getValueByPath(fromObject, ['nullable']);
+ if (fromNullable != null) {
+ setValueByPath(toObject, ['nullable'], fromNullable);
+ }
+ const fromProperties = getValueByPath(fromObject, ['properties']);
+ if (fromProperties != null) {
+ setValueByPath(toObject, ['properties'], fromProperties);
+ }
+ const fromPropertyOrdering = getValueByPath(fromObject, [
+ 'propertyOrdering',
+ ]);
+ if (fromPropertyOrdering != null) {
+ setValueByPath(toObject, ['propertyOrdering'], fromPropertyOrdering);
+ }
+ const fromRequired = getValueByPath(fromObject, ['required']);
+ if (fromRequired != null) {
+ setValueByPath(toObject, ['required'], fromRequired);
+ }
+ const fromTitle = getValueByPath(fromObject, ['title']);
+ if (fromTitle != null) {
+ setValueByPath(toObject, ['title'], fromTitle);
+ }
+ const fromType = getValueByPath(fromObject, ['type']);
+ if (fromType != null) {
+ setValueByPath(toObject, ['type'], fromType);
+ }
+ return toObject;
+}
+function safetySettingToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['method']) !== undefined) {
+ throw new Error('method parameter is not supported in Gemini API.');
+ }
+ const fromCategory = getValueByPath(fromObject, ['category']);
+ if (fromCategory != null) {
+ setValueByPath(toObject, ['category'], fromCategory);
+ }
+ const fromThreshold = getValueByPath(fromObject, ['threshold']);
+ if (fromThreshold != null) {
+ setValueByPath(toObject, ['threshold'], fromThreshold);
+ }
+ return toObject;
+}
+function functionDeclarationToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['response']) !== undefined) {
+ throw new Error('response parameter is not supported in Gemini API.');
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromParameters = getValueByPath(fromObject, ['parameters']);
+ if (fromParameters != null) {
+ setValueByPath(toObject, ['parameters'], fromParameters);
+ }
+ return toObject;
+}
+function googleSearchToMldev() {
+ const toObject = {};
+ return toObject;
+}
+function dynamicRetrievalConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromDynamicThreshold = getValueByPath(fromObject, [
+ 'dynamicThreshold',
+ ]);
+ if (fromDynamicThreshold != null) {
+ setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);
+ }
+ return toObject;
+}
+function googleSearchRetrievalToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromDynamicRetrievalConfig = getValueByPath(fromObject, [
+ 'dynamicRetrievalConfig',
+ ]);
+ if (fromDynamicRetrievalConfig != null) {
+ setValueByPath(toObject, ['dynamicRetrievalConfig'], dynamicRetrievalConfigToMldev(apiClient, fromDynamicRetrievalConfig));
+ }
+ return toObject;
+}
+function toolToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionDeclarations = getValueByPath(fromObject, [
+ 'functionDeclarations',
+ ]);
+ if (fromFunctionDeclarations != null) {
+ if (Array.isArray(fromFunctionDeclarations)) {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations.map((item) => {
+ return functionDeclarationToMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations);
+ }
+ }
+ if (getValueByPath(fromObject, ['retrieval']) !== undefined) {
+ throw new Error('retrieval parameter is not supported in Gemini API.');
+ }
+ const fromGoogleSearch = getValueByPath(fromObject, ['googleSearch']);
+ if (fromGoogleSearch != null) {
+ setValueByPath(toObject, ['googleSearch'], googleSearchToMldev());
+ }
+ const fromGoogleSearchRetrieval = getValueByPath(fromObject, [
+ 'googleSearchRetrieval',
+ ]);
+ if (fromGoogleSearchRetrieval != null) {
+ setValueByPath(toObject, ['googleSearchRetrieval'], googleSearchRetrievalToMldev(apiClient, fromGoogleSearchRetrieval));
+ }
+ const fromCodeExecution = getValueByPath(fromObject, [
+ 'codeExecution',
+ ]);
+ if (fromCodeExecution != null) {
+ setValueByPath(toObject, ['codeExecution'], fromCodeExecution);
+ }
+ return toObject;
+}
+function functionCallingConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromAllowedFunctionNames = getValueByPath(fromObject, [
+ 'allowedFunctionNames',
+ ]);
+ if (fromAllowedFunctionNames != null) {
+ setValueByPath(toObject, ['allowedFunctionNames'], fromAllowedFunctionNames);
+ }
+ return toObject;
+}
+function toolConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCallingConfig = getValueByPath(fromObject, [
+ 'functionCallingConfig',
+ ]);
+ if (fromFunctionCallingConfig != null) {
+ setValueByPath(toObject, ['functionCallingConfig'], functionCallingConfigToMldev(apiClient, fromFunctionCallingConfig));
+ }
+ return toObject;
+}
+function prebuiltVoiceConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromVoiceName = getValueByPath(fromObject, ['voiceName']);
+ if (fromVoiceName != null) {
+ setValueByPath(toObject, ['voiceName'], fromVoiceName);
+ }
+ return toObject;
+}
+function voiceConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromPrebuiltVoiceConfig = getValueByPath(fromObject, [
+ 'prebuiltVoiceConfig',
+ ]);
+ if (fromPrebuiltVoiceConfig != null) {
+ setValueByPath(toObject, ['prebuiltVoiceConfig'], prebuiltVoiceConfigToMldev(apiClient, fromPrebuiltVoiceConfig));
+ }
+ return toObject;
+}
+function speechConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromVoiceConfig = getValueByPath(fromObject, ['voiceConfig']);
+ if (fromVoiceConfig != null) {
+ setValueByPath(toObject, ['voiceConfig'], voiceConfigToMldev(apiClient, fromVoiceConfig));
+ }
+ return toObject;
+}
+function thinkingConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromIncludeThoughts = getValueByPath(fromObject, [
+ 'includeThoughts',
+ ]);
+ if (fromIncludeThoughts != null) {
+ setValueByPath(toObject, ['includeThoughts'], fromIncludeThoughts);
+ }
+ return toObject;
+}
+function generateContentConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (parentObject !== undefined && fromSystemInstruction != null) {
+ setValueByPath(parentObject, ['systemInstruction'], contentToMldev(apiClient, tContent(apiClient, fromSystemInstruction)));
+ }
+ const fromTemperature = getValueByPath(fromObject, ['temperature']);
+ if (fromTemperature != null) {
+ setValueByPath(toObject, ['temperature'], fromTemperature);
+ }
+ const fromTopP = getValueByPath(fromObject, ['topP']);
+ if (fromTopP != null) {
+ setValueByPath(toObject, ['topP'], fromTopP);
+ }
+ const fromTopK = getValueByPath(fromObject, ['topK']);
+ if (fromTopK != null) {
+ setValueByPath(toObject, ['topK'], fromTopK);
+ }
+ const fromCandidateCount = getValueByPath(fromObject, [
+ 'candidateCount',
+ ]);
+ if (fromCandidateCount != null) {
+ setValueByPath(toObject, ['candidateCount'], fromCandidateCount);
+ }
+ const fromMaxOutputTokens = getValueByPath(fromObject, [
+ 'maxOutputTokens',
+ ]);
+ if (fromMaxOutputTokens != null) {
+ setValueByPath(toObject, ['maxOutputTokens'], fromMaxOutputTokens);
+ }
+ const fromStopSequences = getValueByPath(fromObject, [
+ 'stopSequences',
+ ]);
+ if (fromStopSequences != null) {
+ setValueByPath(toObject, ['stopSequences'], fromStopSequences);
+ }
+ const fromResponseLogprobs = getValueByPath(fromObject, [
+ 'responseLogprobs',
+ ]);
+ if (fromResponseLogprobs != null) {
+ setValueByPath(toObject, ['responseLogprobs'], fromResponseLogprobs);
+ }
+ const fromLogprobs = getValueByPath(fromObject, ['logprobs']);
+ if (fromLogprobs != null) {
+ setValueByPath(toObject, ['logprobs'], fromLogprobs);
+ }
+ const fromPresencePenalty = getValueByPath(fromObject, [
+ 'presencePenalty',
+ ]);
+ if (fromPresencePenalty != null) {
+ setValueByPath(toObject, ['presencePenalty'], fromPresencePenalty);
+ }
+ const fromFrequencyPenalty = getValueByPath(fromObject, [
+ 'frequencyPenalty',
+ ]);
+ if (fromFrequencyPenalty != null) {
+ setValueByPath(toObject, ['frequencyPenalty'], fromFrequencyPenalty);
+ }
+ const fromSeed = getValueByPath(fromObject, ['seed']);
+ if (fromSeed != null) {
+ setValueByPath(toObject, ['seed'], fromSeed);
+ }
+ const fromResponseMimeType = getValueByPath(fromObject, [
+ 'responseMimeType',
+ ]);
+ if (fromResponseMimeType != null) {
+ setValueByPath(toObject, ['responseMimeType'], fromResponseMimeType);
+ }
+ const fromResponseSchema = getValueByPath(fromObject, [
+ 'responseSchema',
+ ]);
+ if (fromResponseSchema != null) {
+ setValueByPath(toObject, ['responseSchema'], schemaToMldev(apiClient, tSchema(apiClient, fromResponseSchema)));
+ }
+ if (getValueByPath(fromObject, ['routingConfig']) !== undefined) {
+ throw new Error('routingConfig parameter is not supported in Gemini API.');
+ }
+ const fromSafetySettings = getValueByPath(fromObject, [
+ 'safetySettings',
+ ]);
+ if (parentObject !== undefined && fromSafetySettings != null) {
+ if (Array.isArray(fromSafetySettings)) {
+ setValueByPath(parentObject, ['safetySettings'], fromSafetySettings.map((item) => {
+ return safetySettingToMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(parentObject, ['safetySettings'], fromSafetySettings);
+ }
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (parentObject !== undefined && fromTools != null) {
+ if (Array.isArray(fromTools)) {
+ setValueByPath(parentObject, ['tools'], tTools(apiClient, tTools(apiClient, fromTools).map((item) => {
+ return toolToMldev(apiClient, tTool(apiClient, item));
+ })));
+ }
+ else {
+ setValueByPath(parentObject, ['tools'], tTools(apiClient, fromTools));
+ }
+ }
+ const fromToolConfig = getValueByPath(fromObject, ['toolConfig']);
+ if (parentObject !== undefined && fromToolConfig != null) {
+ setValueByPath(parentObject, ['toolConfig'], toolConfigToMldev(apiClient, fromToolConfig));
+ }
+ if (getValueByPath(fromObject, ['labels']) !== undefined) {
+ throw new Error('labels parameter is not supported in Gemini API.');
+ }
+ const fromCachedContent = getValueByPath(fromObject, [
+ 'cachedContent',
+ ]);
+ if (parentObject !== undefined && fromCachedContent != null) {
+ setValueByPath(parentObject, ['cachedContent'], tCachedContentName(apiClient, fromCachedContent));
+ }
+ const fromResponseModalities = getValueByPath(fromObject, [
+ 'responseModalities',
+ ]);
+ if (fromResponseModalities != null) {
+ setValueByPath(toObject, ['responseModalities'], fromResponseModalities);
+ }
+ const fromMediaResolution = getValueByPath(fromObject, [
+ 'mediaResolution',
+ ]);
+ if (fromMediaResolution != null) {
+ setValueByPath(toObject, ['mediaResolution'], fromMediaResolution);
+ }
+ const fromSpeechConfig = getValueByPath(fromObject, ['speechConfig']);
+ if (fromSpeechConfig != null) {
+ setValueByPath(toObject, ['speechConfig'], speechConfigToMldev(apiClient, tSpeechConfig(apiClient, fromSpeechConfig)));
+ }
+ if (getValueByPath(fromObject, ['audioTimestamp']) !== undefined) {
+ throw new Error('audioTimestamp parameter is not supported in Gemini API.');
+ }
+ const fromThinkingConfig = getValueByPath(fromObject, [
+ 'thinkingConfig',
+ ]);
+ if (fromThinkingConfig != null) {
+ setValueByPath(toObject, ['thinkingConfig'], thinkingConfigToMldev(apiClient, fromThinkingConfig));
+ }
+ return toObject;
+}
+function generateContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToMldev(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['generationConfig'], generateContentConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function embedContentConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTaskType = getValueByPath(fromObject, ['taskType']);
+ if (parentObject !== undefined && fromTaskType != null) {
+ setValueByPath(parentObject, ['requests[]', 'taskType'], fromTaskType);
+ }
+ const fromTitle = getValueByPath(fromObject, ['title']);
+ if (parentObject !== undefined && fromTitle != null) {
+ setValueByPath(parentObject, ['requests[]', 'title'], fromTitle);
+ }
+ const fromOutputDimensionality = getValueByPath(fromObject, [
+ 'outputDimensionality',
+ ]);
+ if (parentObject !== undefined && fromOutputDimensionality != null) {
+ setValueByPath(parentObject, ['requests[]', 'outputDimensionality'], fromOutputDimensionality);
+ }
+ if (getValueByPath(fromObject, ['mimeType']) !== undefined) {
+ throw new Error('mimeType parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['autoTruncate']) !== undefined) {
+ throw new Error('autoTruncate parameter is not supported in Gemini API.');
+ }
+ return toObject;
+}
+function embedContentParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ setValueByPath(toObject, ['requests[]', 'content'], tContentsForEmbed(apiClient, fromContents));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], embedContentConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ const fromModelForEmbedContent = getValueByPath(fromObject, ['model']);
+ if (fromModelForEmbedContent !== undefined) {
+ setValueByPath(toObject, ['requests[]', 'model'], tModel(apiClient, fromModelForEmbedContent));
+ }
+ return toObject;
+}
+function generateImagesConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['outputGcsUri']) !== undefined) {
+ throw new Error('outputGcsUri parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['negativePrompt']) !== undefined) {
+ throw new Error('negativePrompt parameter is not supported in Gemini API.');
+ }
+ const fromNumberOfImages = getValueByPath(fromObject, [
+ 'numberOfImages',
+ ]);
+ if (parentObject !== undefined && fromNumberOfImages != null) {
+ setValueByPath(parentObject, ['parameters', 'sampleCount'], fromNumberOfImages);
+ }
+ const fromAspectRatio = getValueByPath(fromObject, ['aspectRatio']);
+ if (parentObject !== undefined && fromAspectRatio != null) {
+ setValueByPath(parentObject, ['parameters', 'aspectRatio'], fromAspectRatio);
+ }
+ const fromGuidanceScale = getValueByPath(fromObject, [
+ 'guidanceScale',
+ ]);
+ if (parentObject !== undefined && fromGuidanceScale != null) {
+ setValueByPath(parentObject, ['parameters', 'guidanceScale'], fromGuidanceScale);
+ }
+ if (getValueByPath(fromObject, ['seed']) !== undefined) {
+ throw new Error('seed parameter is not supported in Gemini API.');
+ }
+ const fromSafetyFilterLevel = getValueByPath(fromObject, [
+ 'safetyFilterLevel',
+ ]);
+ if (parentObject !== undefined && fromSafetyFilterLevel != null) {
+ setValueByPath(parentObject, ['parameters', 'safetySetting'], fromSafetyFilterLevel);
+ }
+ const fromPersonGeneration = getValueByPath(fromObject, [
+ 'personGeneration',
+ ]);
+ if (parentObject !== undefined && fromPersonGeneration != null) {
+ setValueByPath(parentObject, ['parameters', 'personGeneration'], fromPersonGeneration);
+ }
+ const fromIncludeSafetyAttributes = getValueByPath(fromObject, [
+ 'includeSafetyAttributes',
+ ]);
+ if (parentObject !== undefined && fromIncludeSafetyAttributes != null) {
+ setValueByPath(parentObject, ['parameters', 'includeSafetyAttributes'], fromIncludeSafetyAttributes);
+ }
+ const fromIncludeRaiReason = getValueByPath(fromObject, [
+ 'includeRaiReason',
+ ]);
+ if (parentObject !== undefined && fromIncludeRaiReason != null) {
+ setValueByPath(parentObject, ['parameters', 'includeRaiReason'], fromIncludeRaiReason);
+ }
+ const fromLanguage = getValueByPath(fromObject, ['language']);
+ if (parentObject !== undefined && fromLanguage != null) {
+ setValueByPath(parentObject, ['parameters', 'language'], fromLanguage);
+ }
+ const fromOutputMimeType = getValueByPath(fromObject, [
+ 'outputMimeType',
+ ]);
+ if (parentObject !== undefined && fromOutputMimeType != null) {
+ setValueByPath(parentObject, ['parameters', 'outputOptions', 'mimeType'], fromOutputMimeType);
+ }
+ const fromOutputCompressionQuality = getValueByPath(fromObject, [
+ 'outputCompressionQuality',
+ ]);
+ if (parentObject !== undefined && fromOutputCompressionQuality != null) {
+ setValueByPath(parentObject, ['parameters', 'outputOptions', 'compressionQuality'], fromOutputCompressionQuality);
+ }
+ if (getValueByPath(fromObject, ['addWatermark']) !== undefined) {
+ throw new Error('addWatermark parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['enhancePrompt']) !== undefined) {
+ throw new Error('enhancePrompt parameter is not supported in Gemini API.');
+ }
+ return toObject;
+}
+function generateImagesParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromPrompt = getValueByPath(fromObject, ['prompt']);
+ if (fromPrompt != null) {
+ setValueByPath(toObject, ['instances[0]', 'prompt'], fromPrompt);
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], generateImagesConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function countTokensConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['systemInstruction']) !== undefined) {
+ throw new Error('systemInstruction parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['tools']) !== undefined) {
+ throw new Error('tools parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['generationConfig']) !== undefined) {
+ throw new Error('generationConfig parameter is not supported in Gemini API.');
+ }
+ return toObject;
+}
+function countTokensParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToMldev(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], countTokensConfigToMldev(apiClient, fromConfig));
+ }
+ return toObject;
+}
+function imageToMldev(apiClient, fromObject) {
+ const toObject = {};
+ if (getValueByPath(fromObject, ['gcsUri']) !== undefined) {
+ throw new Error('gcsUri parameter is not supported in Gemini API.');
+ }
+ const fromImageBytes = getValueByPath(fromObject, ['imageBytes']);
+ if (fromImageBytes != null) {
+ setValueByPath(toObject, ['bytesBase64Encoded'], tBytes(apiClient, fromImageBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generateVideosConfigToMldev(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromNumberOfVideos = getValueByPath(fromObject, [
+ 'numberOfVideos',
+ ]);
+ if (parentObject !== undefined && fromNumberOfVideos != null) {
+ setValueByPath(parentObject, ['parameters', 'sampleCount'], fromNumberOfVideos);
+ }
+ if (getValueByPath(fromObject, ['outputGcsUri']) !== undefined) {
+ throw new Error('outputGcsUri parameter is not supported in Gemini API.');
+ }
+ if (getValueByPath(fromObject, ['fps']) !== undefined) {
+ throw new Error('fps parameter is not supported in Gemini API.');
+ }
+ const fromDurationSeconds = getValueByPath(fromObject, [
+ 'durationSeconds',
+ ]);
+ if (parentObject !== undefined && fromDurationSeconds != null) {
+ setValueByPath(parentObject, ['parameters', 'durationSeconds'], fromDurationSeconds);
+ }
+ if (getValueByPath(fromObject, ['seed']) !== undefined) {
+ throw new Error('seed parameter is not supported in Gemini API.');
+ }
+ const fromAspectRatio = getValueByPath(fromObject, ['aspectRatio']);
+ if (parentObject !== undefined && fromAspectRatio != null) {
+ setValueByPath(parentObject, ['parameters', 'aspectRatio'], fromAspectRatio);
+ }
+ if (getValueByPath(fromObject, ['resolution']) !== undefined) {
+ throw new Error('resolution parameter is not supported in Gemini API.');
+ }
+ const fromPersonGeneration = getValueByPath(fromObject, [
+ 'personGeneration',
+ ]);
+ if (parentObject !== undefined && fromPersonGeneration != null) {
+ setValueByPath(parentObject, ['parameters', 'personGeneration'], fromPersonGeneration);
+ }
+ if (getValueByPath(fromObject, ['pubsubTopic']) !== undefined) {
+ throw new Error('pubsubTopic parameter is not supported in Gemini API.');
+ }
+ const fromNegativePrompt = getValueByPath(fromObject, [
+ 'negativePrompt',
+ ]);
+ if (parentObject !== undefined && fromNegativePrompt != null) {
+ setValueByPath(parentObject, ['parameters', 'negativePrompt'], fromNegativePrompt);
+ }
+ if (getValueByPath(fromObject, ['enhancePrompt']) !== undefined) {
+ throw new Error('enhancePrompt parameter is not supported in Gemini API.');
+ }
+ return toObject;
+}
+function generateVideosParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromPrompt = getValueByPath(fromObject, ['prompt']);
+ if (fromPrompt != null) {
+ setValueByPath(toObject, ['instances[0]', 'prompt'], fromPrompt);
+ }
+ const fromImage = getValueByPath(fromObject, ['image']);
+ if (fromImage != null) {
+ setValueByPath(toObject, ['instances[0]', 'image'], imageToMldev(apiClient, fromImage));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], generateVideosConfigToMldev(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function partToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideoMetadata = getValueByPath(fromObject, [
+ 'videoMetadata',
+ ]);
+ if (fromVideoMetadata != null) {
+ setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);
+ }
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partToVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function schemaToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromExample = getValueByPath(fromObject, ['example']);
+ if (fromExample != null) {
+ setValueByPath(toObject, ['example'], fromExample);
+ }
+ const fromPattern = getValueByPath(fromObject, ['pattern']);
+ if (fromPattern != null) {
+ setValueByPath(toObject, ['pattern'], fromPattern);
+ }
+ const fromDefault = getValueByPath(fromObject, ['default']);
+ if (fromDefault != null) {
+ setValueByPath(toObject, ['default'], fromDefault);
+ }
+ const fromMaxLength = getValueByPath(fromObject, ['maxLength']);
+ if (fromMaxLength != null) {
+ setValueByPath(toObject, ['maxLength'], fromMaxLength);
+ }
+ const fromMinLength = getValueByPath(fromObject, ['minLength']);
+ if (fromMinLength != null) {
+ setValueByPath(toObject, ['minLength'], fromMinLength);
+ }
+ const fromMinProperties = getValueByPath(fromObject, [
+ 'minProperties',
+ ]);
+ if (fromMinProperties != null) {
+ setValueByPath(toObject, ['minProperties'], fromMinProperties);
+ }
+ const fromMaxProperties = getValueByPath(fromObject, [
+ 'maxProperties',
+ ]);
+ if (fromMaxProperties != null) {
+ setValueByPath(toObject, ['maxProperties'], fromMaxProperties);
+ }
+ const fromAnyOf = getValueByPath(fromObject, ['anyOf']);
+ if (fromAnyOf != null) {
+ setValueByPath(toObject, ['anyOf'], fromAnyOf);
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromEnum = getValueByPath(fromObject, ['enum']);
+ if (fromEnum != null) {
+ setValueByPath(toObject, ['enum'], fromEnum);
+ }
+ const fromFormat = getValueByPath(fromObject, ['format']);
+ if (fromFormat != null) {
+ setValueByPath(toObject, ['format'], fromFormat);
+ }
+ const fromItems = getValueByPath(fromObject, ['items']);
+ if (fromItems != null) {
+ setValueByPath(toObject, ['items'], fromItems);
+ }
+ const fromMaxItems = getValueByPath(fromObject, ['maxItems']);
+ if (fromMaxItems != null) {
+ setValueByPath(toObject, ['maxItems'], fromMaxItems);
+ }
+ const fromMaximum = getValueByPath(fromObject, ['maximum']);
+ if (fromMaximum != null) {
+ setValueByPath(toObject, ['maximum'], fromMaximum);
+ }
+ const fromMinItems = getValueByPath(fromObject, ['minItems']);
+ if (fromMinItems != null) {
+ setValueByPath(toObject, ['minItems'], fromMinItems);
+ }
+ const fromMinimum = getValueByPath(fromObject, ['minimum']);
+ if (fromMinimum != null) {
+ setValueByPath(toObject, ['minimum'], fromMinimum);
+ }
+ const fromNullable = getValueByPath(fromObject, ['nullable']);
+ if (fromNullable != null) {
+ setValueByPath(toObject, ['nullable'], fromNullable);
+ }
+ const fromProperties = getValueByPath(fromObject, ['properties']);
+ if (fromProperties != null) {
+ setValueByPath(toObject, ['properties'], fromProperties);
+ }
+ const fromPropertyOrdering = getValueByPath(fromObject, [
+ 'propertyOrdering',
+ ]);
+ if (fromPropertyOrdering != null) {
+ setValueByPath(toObject, ['propertyOrdering'], fromPropertyOrdering);
+ }
+ const fromRequired = getValueByPath(fromObject, ['required']);
+ if (fromRequired != null) {
+ setValueByPath(toObject, ['required'], fromRequired);
+ }
+ const fromTitle = getValueByPath(fromObject, ['title']);
+ if (fromTitle != null) {
+ setValueByPath(toObject, ['title'], fromTitle);
+ }
+ const fromType = getValueByPath(fromObject, ['type']);
+ if (fromType != null) {
+ setValueByPath(toObject, ['type'], fromType);
+ }
+ return toObject;
+}
+function safetySettingToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromMethod = getValueByPath(fromObject, ['method']);
+ if (fromMethod != null) {
+ setValueByPath(toObject, ['method'], fromMethod);
+ }
+ const fromCategory = getValueByPath(fromObject, ['category']);
+ if (fromCategory != null) {
+ setValueByPath(toObject, ['category'], fromCategory);
+ }
+ const fromThreshold = getValueByPath(fromObject, ['threshold']);
+ if (fromThreshold != null) {
+ setValueByPath(toObject, ['threshold'], fromThreshold);
+ }
+ return toObject;
+}
+function functionDeclarationToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], schemaToVertex(apiClient, fromResponse));
+ }
+ const fromDescription = getValueByPath(fromObject, ['description']);
+ if (fromDescription != null) {
+ setValueByPath(toObject, ['description'], fromDescription);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromParameters = getValueByPath(fromObject, ['parameters']);
+ if (fromParameters != null) {
+ setValueByPath(toObject, ['parameters'], fromParameters);
+ }
+ return toObject;
+}
+function googleSearchToVertex() {
+ const toObject = {};
+ return toObject;
+}
+function dynamicRetrievalConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromDynamicThreshold = getValueByPath(fromObject, [
+ 'dynamicThreshold',
+ ]);
+ if (fromDynamicThreshold != null) {
+ setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);
+ }
+ return toObject;
+}
+function googleSearchRetrievalToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromDynamicRetrievalConfig = getValueByPath(fromObject, [
+ 'dynamicRetrievalConfig',
+ ]);
+ if (fromDynamicRetrievalConfig != null) {
+ setValueByPath(toObject, ['dynamicRetrievalConfig'], dynamicRetrievalConfigToVertex(apiClient, fromDynamicRetrievalConfig));
+ }
+ return toObject;
+}
+function toolToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionDeclarations = getValueByPath(fromObject, [
+ 'functionDeclarations',
+ ]);
+ if (fromFunctionDeclarations != null) {
+ if (Array.isArray(fromFunctionDeclarations)) {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations.map((item) => {
+ return functionDeclarationToVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['functionDeclarations'], fromFunctionDeclarations);
+ }
+ }
+ const fromRetrieval = getValueByPath(fromObject, ['retrieval']);
+ if (fromRetrieval != null) {
+ setValueByPath(toObject, ['retrieval'], fromRetrieval);
+ }
+ const fromGoogleSearch = getValueByPath(fromObject, ['googleSearch']);
+ if (fromGoogleSearch != null) {
+ setValueByPath(toObject, ['googleSearch'], googleSearchToVertex());
+ }
+ const fromGoogleSearchRetrieval = getValueByPath(fromObject, [
+ 'googleSearchRetrieval',
+ ]);
+ if (fromGoogleSearchRetrieval != null) {
+ setValueByPath(toObject, ['googleSearchRetrieval'], googleSearchRetrievalToVertex(apiClient, fromGoogleSearchRetrieval));
+ }
+ const fromCodeExecution = getValueByPath(fromObject, [
+ 'codeExecution',
+ ]);
+ if (fromCodeExecution != null) {
+ setValueByPath(toObject, ['codeExecution'], fromCodeExecution);
+ }
+ return toObject;
+}
+function functionCallingConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromMode = getValueByPath(fromObject, ['mode']);
+ if (fromMode != null) {
+ setValueByPath(toObject, ['mode'], fromMode);
+ }
+ const fromAllowedFunctionNames = getValueByPath(fromObject, [
+ 'allowedFunctionNames',
+ ]);
+ if (fromAllowedFunctionNames != null) {
+ setValueByPath(toObject, ['allowedFunctionNames'], fromAllowedFunctionNames);
+ }
+ return toObject;
+}
+function toolConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCallingConfig = getValueByPath(fromObject, [
+ 'functionCallingConfig',
+ ]);
+ if (fromFunctionCallingConfig != null) {
+ setValueByPath(toObject, ['functionCallingConfig'], functionCallingConfigToVertex(apiClient, fromFunctionCallingConfig));
+ }
+ return toObject;
+}
+function prebuiltVoiceConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromVoiceName = getValueByPath(fromObject, ['voiceName']);
+ if (fromVoiceName != null) {
+ setValueByPath(toObject, ['voiceName'], fromVoiceName);
+ }
+ return toObject;
+}
+function voiceConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromPrebuiltVoiceConfig = getValueByPath(fromObject, [
+ 'prebuiltVoiceConfig',
+ ]);
+ if (fromPrebuiltVoiceConfig != null) {
+ setValueByPath(toObject, ['prebuiltVoiceConfig'], prebuiltVoiceConfigToVertex(apiClient, fromPrebuiltVoiceConfig));
+ }
+ return toObject;
+}
+function speechConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromVoiceConfig = getValueByPath(fromObject, ['voiceConfig']);
+ if (fromVoiceConfig != null) {
+ setValueByPath(toObject, ['voiceConfig'], voiceConfigToVertex(apiClient, fromVoiceConfig));
+ }
+ return toObject;
+}
+function thinkingConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromIncludeThoughts = getValueByPath(fromObject, [
+ 'includeThoughts',
+ ]);
+ if (fromIncludeThoughts != null) {
+ setValueByPath(toObject, ['includeThoughts'], fromIncludeThoughts);
+ }
+ return toObject;
+}
+function generateContentConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (parentObject !== undefined && fromSystemInstruction != null) {
+ setValueByPath(parentObject, ['systemInstruction'], contentToVertex(apiClient, tContent(apiClient, fromSystemInstruction)));
+ }
+ const fromTemperature = getValueByPath(fromObject, ['temperature']);
+ if (fromTemperature != null) {
+ setValueByPath(toObject, ['temperature'], fromTemperature);
+ }
+ const fromTopP = getValueByPath(fromObject, ['topP']);
+ if (fromTopP != null) {
+ setValueByPath(toObject, ['topP'], fromTopP);
+ }
+ const fromTopK = getValueByPath(fromObject, ['topK']);
+ if (fromTopK != null) {
+ setValueByPath(toObject, ['topK'], fromTopK);
+ }
+ const fromCandidateCount = getValueByPath(fromObject, [
+ 'candidateCount',
+ ]);
+ if (fromCandidateCount != null) {
+ setValueByPath(toObject, ['candidateCount'], fromCandidateCount);
+ }
+ const fromMaxOutputTokens = getValueByPath(fromObject, [
+ 'maxOutputTokens',
+ ]);
+ if (fromMaxOutputTokens != null) {
+ setValueByPath(toObject, ['maxOutputTokens'], fromMaxOutputTokens);
+ }
+ const fromStopSequences = getValueByPath(fromObject, [
+ 'stopSequences',
+ ]);
+ if (fromStopSequences != null) {
+ setValueByPath(toObject, ['stopSequences'], fromStopSequences);
+ }
+ const fromResponseLogprobs = getValueByPath(fromObject, [
+ 'responseLogprobs',
+ ]);
+ if (fromResponseLogprobs != null) {
+ setValueByPath(toObject, ['responseLogprobs'], fromResponseLogprobs);
+ }
+ const fromLogprobs = getValueByPath(fromObject, ['logprobs']);
+ if (fromLogprobs != null) {
+ setValueByPath(toObject, ['logprobs'], fromLogprobs);
+ }
+ const fromPresencePenalty = getValueByPath(fromObject, [
+ 'presencePenalty',
+ ]);
+ if (fromPresencePenalty != null) {
+ setValueByPath(toObject, ['presencePenalty'], fromPresencePenalty);
+ }
+ const fromFrequencyPenalty = getValueByPath(fromObject, [
+ 'frequencyPenalty',
+ ]);
+ if (fromFrequencyPenalty != null) {
+ setValueByPath(toObject, ['frequencyPenalty'], fromFrequencyPenalty);
+ }
+ const fromSeed = getValueByPath(fromObject, ['seed']);
+ if (fromSeed != null) {
+ setValueByPath(toObject, ['seed'], fromSeed);
+ }
+ const fromResponseMimeType = getValueByPath(fromObject, [
+ 'responseMimeType',
+ ]);
+ if (fromResponseMimeType != null) {
+ setValueByPath(toObject, ['responseMimeType'], fromResponseMimeType);
+ }
+ const fromResponseSchema = getValueByPath(fromObject, [
+ 'responseSchema',
+ ]);
+ if (fromResponseSchema != null) {
+ setValueByPath(toObject, ['responseSchema'], schemaToVertex(apiClient, tSchema(apiClient, fromResponseSchema)));
+ }
+ const fromRoutingConfig = getValueByPath(fromObject, [
+ 'routingConfig',
+ ]);
+ if (fromRoutingConfig != null) {
+ setValueByPath(toObject, ['routingConfig'], fromRoutingConfig);
+ }
+ const fromSafetySettings = getValueByPath(fromObject, [
+ 'safetySettings',
+ ]);
+ if (parentObject !== undefined && fromSafetySettings != null) {
+ if (Array.isArray(fromSafetySettings)) {
+ setValueByPath(parentObject, ['safetySettings'], fromSafetySettings.map((item) => {
+ return safetySettingToVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(parentObject, ['safetySettings'], fromSafetySettings);
+ }
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (parentObject !== undefined && fromTools != null) {
+ if (Array.isArray(fromTools)) {
+ setValueByPath(parentObject, ['tools'], tTools(apiClient, tTools(apiClient, fromTools).map((item) => {
+ return toolToVertex(apiClient, tTool(apiClient, item));
+ })));
+ }
+ else {
+ setValueByPath(parentObject, ['tools'], tTools(apiClient, fromTools));
+ }
+ }
+ const fromToolConfig = getValueByPath(fromObject, ['toolConfig']);
+ if (parentObject !== undefined && fromToolConfig != null) {
+ setValueByPath(parentObject, ['toolConfig'], toolConfigToVertex(apiClient, fromToolConfig));
+ }
+ const fromLabels = getValueByPath(fromObject, ['labels']);
+ if (parentObject !== undefined && fromLabels != null) {
+ setValueByPath(parentObject, ['labels'], fromLabels);
+ }
+ const fromCachedContent = getValueByPath(fromObject, [
+ 'cachedContent',
+ ]);
+ if (parentObject !== undefined && fromCachedContent != null) {
+ setValueByPath(parentObject, ['cachedContent'], tCachedContentName(apiClient, fromCachedContent));
+ }
+ const fromResponseModalities = getValueByPath(fromObject, [
+ 'responseModalities',
+ ]);
+ if (fromResponseModalities != null) {
+ setValueByPath(toObject, ['responseModalities'], fromResponseModalities);
+ }
+ const fromMediaResolution = getValueByPath(fromObject, [
+ 'mediaResolution',
+ ]);
+ if (fromMediaResolution != null) {
+ setValueByPath(toObject, ['mediaResolution'], fromMediaResolution);
+ }
+ const fromSpeechConfig = getValueByPath(fromObject, ['speechConfig']);
+ if (fromSpeechConfig != null) {
+ setValueByPath(toObject, ['speechConfig'], speechConfigToVertex(apiClient, tSpeechConfig(apiClient, fromSpeechConfig)));
+ }
+ const fromAudioTimestamp = getValueByPath(fromObject, [
+ 'audioTimestamp',
+ ]);
+ if (fromAudioTimestamp != null) {
+ setValueByPath(toObject, ['audioTimestamp'], fromAudioTimestamp);
+ }
+ const fromThinkingConfig = getValueByPath(fromObject, [
+ 'thinkingConfig',
+ ]);
+ if (fromThinkingConfig != null) {
+ setValueByPath(toObject, ['thinkingConfig'], thinkingConfigToVertex(apiClient, fromThinkingConfig));
+ }
+ return toObject;
+}
+function generateContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToVertex(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['generationConfig'], generateContentConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function embedContentConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromTaskType = getValueByPath(fromObject, ['taskType']);
+ if (parentObject !== undefined && fromTaskType != null) {
+ setValueByPath(parentObject, ['instances[]', 'task_type'], fromTaskType);
+ }
+ const fromTitle = getValueByPath(fromObject, ['title']);
+ if (parentObject !== undefined && fromTitle != null) {
+ setValueByPath(parentObject, ['instances[]', 'title'], fromTitle);
+ }
+ const fromOutputDimensionality = getValueByPath(fromObject, [
+ 'outputDimensionality',
+ ]);
+ if (parentObject !== undefined && fromOutputDimensionality != null) {
+ setValueByPath(parentObject, ['parameters', 'outputDimensionality'], fromOutputDimensionality);
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (parentObject !== undefined && fromMimeType != null) {
+ setValueByPath(parentObject, ['instances[]', 'mimeType'], fromMimeType);
+ }
+ const fromAutoTruncate = getValueByPath(fromObject, ['autoTruncate']);
+ if (parentObject !== undefined && fromAutoTruncate != null) {
+ setValueByPath(parentObject, ['parameters', 'autoTruncate'], fromAutoTruncate);
+ }
+ return toObject;
+}
+function embedContentParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ setValueByPath(toObject, ['instances[]', 'content'], tContentsForEmbed(apiClient, fromContents));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], embedContentConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function generateImagesConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromOutputGcsUri = getValueByPath(fromObject, ['outputGcsUri']);
+ if (parentObject !== undefined && fromOutputGcsUri != null) {
+ setValueByPath(parentObject, ['parameters', 'storageUri'], fromOutputGcsUri);
+ }
+ const fromNegativePrompt = getValueByPath(fromObject, [
+ 'negativePrompt',
+ ]);
+ if (parentObject !== undefined && fromNegativePrompt != null) {
+ setValueByPath(parentObject, ['parameters', 'negativePrompt'], fromNegativePrompt);
+ }
+ const fromNumberOfImages = getValueByPath(fromObject, [
+ 'numberOfImages',
+ ]);
+ if (parentObject !== undefined && fromNumberOfImages != null) {
+ setValueByPath(parentObject, ['parameters', 'sampleCount'], fromNumberOfImages);
+ }
+ const fromAspectRatio = getValueByPath(fromObject, ['aspectRatio']);
+ if (parentObject !== undefined && fromAspectRatio != null) {
+ setValueByPath(parentObject, ['parameters', 'aspectRatio'], fromAspectRatio);
+ }
+ const fromGuidanceScale = getValueByPath(fromObject, [
+ 'guidanceScale',
+ ]);
+ if (parentObject !== undefined && fromGuidanceScale != null) {
+ setValueByPath(parentObject, ['parameters', 'guidanceScale'], fromGuidanceScale);
+ }
+ const fromSeed = getValueByPath(fromObject, ['seed']);
+ if (parentObject !== undefined && fromSeed != null) {
+ setValueByPath(parentObject, ['parameters', 'seed'], fromSeed);
+ }
+ const fromSafetyFilterLevel = getValueByPath(fromObject, [
+ 'safetyFilterLevel',
+ ]);
+ if (parentObject !== undefined && fromSafetyFilterLevel != null) {
+ setValueByPath(parentObject, ['parameters', 'safetySetting'], fromSafetyFilterLevel);
+ }
+ const fromPersonGeneration = getValueByPath(fromObject, [
+ 'personGeneration',
+ ]);
+ if (parentObject !== undefined && fromPersonGeneration != null) {
+ setValueByPath(parentObject, ['parameters', 'personGeneration'], fromPersonGeneration);
+ }
+ const fromIncludeSafetyAttributes = getValueByPath(fromObject, [
+ 'includeSafetyAttributes',
+ ]);
+ if (parentObject !== undefined && fromIncludeSafetyAttributes != null) {
+ setValueByPath(parentObject, ['parameters', 'includeSafetyAttributes'], fromIncludeSafetyAttributes);
+ }
+ const fromIncludeRaiReason = getValueByPath(fromObject, [
+ 'includeRaiReason',
+ ]);
+ if (parentObject !== undefined && fromIncludeRaiReason != null) {
+ setValueByPath(parentObject, ['parameters', 'includeRaiReason'], fromIncludeRaiReason);
+ }
+ const fromLanguage = getValueByPath(fromObject, ['language']);
+ if (parentObject !== undefined && fromLanguage != null) {
+ setValueByPath(parentObject, ['parameters', 'language'], fromLanguage);
+ }
+ const fromOutputMimeType = getValueByPath(fromObject, [
+ 'outputMimeType',
+ ]);
+ if (parentObject !== undefined && fromOutputMimeType != null) {
+ setValueByPath(parentObject, ['parameters', 'outputOptions', 'mimeType'], fromOutputMimeType);
+ }
+ const fromOutputCompressionQuality = getValueByPath(fromObject, [
+ 'outputCompressionQuality',
+ ]);
+ if (parentObject !== undefined && fromOutputCompressionQuality != null) {
+ setValueByPath(parentObject, ['parameters', 'outputOptions', 'compressionQuality'], fromOutputCompressionQuality);
+ }
+ const fromAddWatermark = getValueByPath(fromObject, ['addWatermark']);
+ if (parentObject !== undefined && fromAddWatermark != null) {
+ setValueByPath(parentObject, ['parameters', 'addWatermark'], fromAddWatermark);
+ }
+ const fromEnhancePrompt = getValueByPath(fromObject, [
+ 'enhancePrompt',
+ ]);
+ if (parentObject !== undefined && fromEnhancePrompt != null) {
+ setValueByPath(parentObject, ['parameters', 'enhancePrompt'], fromEnhancePrompt);
+ }
+ return toObject;
+}
+function generateImagesParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromPrompt = getValueByPath(fromObject, ['prompt']);
+ if (fromPrompt != null) {
+ setValueByPath(toObject, ['instances[0]', 'prompt'], fromPrompt);
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], generateImagesConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function countTokensConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (parentObject !== undefined && fromSystemInstruction != null) {
+ setValueByPath(parentObject, ['systemInstruction'], contentToVertex(apiClient, tContent(apiClient, fromSystemInstruction)));
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (parentObject !== undefined && fromTools != null) {
+ if (Array.isArray(fromTools)) {
+ setValueByPath(parentObject, ['tools'], fromTools.map((item) => {
+ return toolToVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(parentObject, ['tools'], fromTools);
+ }
+ }
+ const fromGenerationConfig = getValueByPath(fromObject, [
+ 'generationConfig',
+ ]);
+ if (parentObject !== undefined && fromGenerationConfig != null) {
+ setValueByPath(parentObject, ['generationConfig'], fromGenerationConfig);
+ }
+ return toObject;
+}
+function countTokensParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToVertex(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], countTokensConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function computeTokensParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromContents = getValueByPath(fromObject, ['contents']);
+ if (fromContents != null) {
+ if (Array.isArray(fromContents)) {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, tContents(apiClient, fromContents).map((item) => {
+ return contentToVertex(apiClient, item);
+ })));
+ }
+ else {
+ setValueByPath(toObject, ['contents'], tContents(apiClient, fromContents));
+ }
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function imageToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromGcsUri = getValueByPath(fromObject, ['gcsUri']);
+ if (fromGcsUri != null) {
+ setValueByPath(toObject, ['gcsUri'], fromGcsUri);
+ }
+ const fromImageBytes = getValueByPath(fromObject, ['imageBytes']);
+ if (fromImageBytes != null) {
+ setValueByPath(toObject, ['bytesBase64Encoded'], tBytes(apiClient, fromImageBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generateVideosConfigToVertex(apiClient, fromObject, parentObject) {
+ const toObject = {};
+ const fromNumberOfVideos = getValueByPath(fromObject, [
+ 'numberOfVideos',
+ ]);
+ if (parentObject !== undefined && fromNumberOfVideos != null) {
+ setValueByPath(parentObject, ['parameters', 'sampleCount'], fromNumberOfVideos);
+ }
+ const fromOutputGcsUri = getValueByPath(fromObject, ['outputGcsUri']);
+ if (parentObject !== undefined && fromOutputGcsUri != null) {
+ setValueByPath(parentObject, ['parameters', 'storageUri'], fromOutputGcsUri);
+ }
+ const fromFps = getValueByPath(fromObject, ['fps']);
+ if (parentObject !== undefined && fromFps != null) {
+ setValueByPath(parentObject, ['parameters', 'fps'], fromFps);
+ }
+ const fromDurationSeconds = getValueByPath(fromObject, [
+ 'durationSeconds',
+ ]);
+ if (parentObject !== undefined && fromDurationSeconds != null) {
+ setValueByPath(parentObject, ['parameters', 'durationSeconds'], fromDurationSeconds);
+ }
+ const fromSeed = getValueByPath(fromObject, ['seed']);
+ if (parentObject !== undefined && fromSeed != null) {
+ setValueByPath(parentObject, ['parameters', 'seed'], fromSeed);
+ }
+ const fromAspectRatio = getValueByPath(fromObject, ['aspectRatio']);
+ if (parentObject !== undefined && fromAspectRatio != null) {
+ setValueByPath(parentObject, ['parameters', 'aspectRatio'], fromAspectRatio);
+ }
+ const fromResolution = getValueByPath(fromObject, ['resolution']);
+ if (parentObject !== undefined && fromResolution != null) {
+ setValueByPath(parentObject, ['parameters', 'resolution'], fromResolution);
+ }
+ const fromPersonGeneration = getValueByPath(fromObject, [
+ 'personGeneration',
+ ]);
+ if (parentObject !== undefined && fromPersonGeneration != null) {
+ setValueByPath(parentObject, ['parameters', 'personGeneration'], fromPersonGeneration);
+ }
+ const fromPubsubTopic = getValueByPath(fromObject, ['pubsubTopic']);
+ if (parentObject !== undefined && fromPubsubTopic != null) {
+ setValueByPath(parentObject, ['parameters', 'pubsubTopic'], fromPubsubTopic);
+ }
+ const fromNegativePrompt = getValueByPath(fromObject, [
+ 'negativePrompt',
+ ]);
+ if (parentObject !== undefined && fromNegativePrompt != null) {
+ setValueByPath(parentObject, ['parameters', 'negativePrompt'], fromNegativePrompt);
+ }
+ const fromEnhancePrompt = getValueByPath(fromObject, [
+ 'enhancePrompt',
+ ]);
+ if (parentObject !== undefined && fromEnhancePrompt != null) {
+ setValueByPath(parentObject, ['parameters', 'enhancePrompt'], fromEnhancePrompt);
+ }
+ return toObject;
+}
+function generateVideosParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel != null) {
+ setValueByPath(toObject, ['_url', 'model'], tModel(apiClient, fromModel));
+ }
+ const fromPrompt = getValueByPath(fromObject, ['prompt']);
+ if (fromPrompt != null) {
+ setValueByPath(toObject, ['instances[0]', 'prompt'], fromPrompt);
+ }
+ const fromImage = getValueByPath(fromObject, ['image']);
+ if (fromImage != null) {
+ setValueByPath(toObject, ['instances[0]', 'image'], imageToVertex(apiClient, fromImage));
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], generateVideosConfigToVertex(apiClient, fromConfig, toObject));
+ }
+ return toObject;
+}
+function partFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function citationMetadataFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromCitations = getValueByPath(fromObject, ['citationSources']);
+ if (fromCitations != null) {
+ setValueByPath(toObject, ['citations'], fromCitations);
+ }
+ return toObject;
+}
+function candidateFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromContent = getValueByPath(fromObject, ['content']);
+ if (fromContent != null) {
+ setValueByPath(toObject, ['content'], contentFromMldev(apiClient, fromContent));
+ }
+ const fromCitationMetadata = getValueByPath(fromObject, [
+ 'citationMetadata',
+ ]);
+ if (fromCitationMetadata != null) {
+ setValueByPath(toObject, ['citationMetadata'], citationMetadataFromMldev(apiClient, fromCitationMetadata));
+ }
+ const fromTokenCount = getValueByPath(fromObject, ['tokenCount']);
+ if (fromTokenCount != null) {
+ setValueByPath(toObject, ['tokenCount'], fromTokenCount);
+ }
+ const fromFinishReason = getValueByPath(fromObject, ['finishReason']);
+ if (fromFinishReason != null) {
+ setValueByPath(toObject, ['finishReason'], fromFinishReason);
+ }
+ const fromAvgLogprobs = getValueByPath(fromObject, ['avgLogprobs']);
+ if (fromAvgLogprobs != null) {
+ setValueByPath(toObject, ['avgLogprobs'], fromAvgLogprobs);
+ }
+ const fromGroundingMetadata = getValueByPath(fromObject, [
+ 'groundingMetadata',
+ ]);
+ if (fromGroundingMetadata != null) {
+ setValueByPath(toObject, ['groundingMetadata'], fromGroundingMetadata);
+ }
+ const fromIndex = getValueByPath(fromObject, ['index']);
+ if (fromIndex != null) {
+ setValueByPath(toObject, ['index'], fromIndex);
+ }
+ const fromLogprobsResult = getValueByPath(fromObject, [
+ 'logprobsResult',
+ ]);
+ if (fromLogprobsResult != null) {
+ setValueByPath(toObject, ['logprobsResult'], fromLogprobsResult);
+ }
+ const fromSafetyRatings = getValueByPath(fromObject, [
+ 'safetyRatings',
+ ]);
+ if (fromSafetyRatings != null) {
+ setValueByPath(toObject, ['safetyRatings'], fromSafetyRatings);
+ }
+ return toObject;
+}
+function generateContentResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromCandidates = getValueByPath(fromObject, ['candidates']);
+ if (fromCandidates != null) {
+ if (Array.isArray(fromCandidates)) {
+ setValueByPath(toObject, ['candidates'], fromCandidates.map((item) => {
+ return candidateFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['candidates'], fromCandidates);
+ }
+ }
+ const fromModelVersion = getValueByPath(fromObject, ['modelVersion']);
+ if (fromModelVersion != null) {
+ setValueByPath(toObject, ['modelVersion'], fromModelVersion);
+ }
+ const fromPromptFeedback = getValueByPath(fromObject, [
+ 'promptFeedback',
+ ]);
+ if (fromPromptFeedback != null) {
+ setValueByPath(toObject, ['promptFeedback'], fromPromptFeedback);
+ }
+ const fromUsageMetadata = getValueByPath(fromObject, [
+ 'usageMetadata',
+ ]);
+ if (fromUsageMetadata != null) {
+ setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);
+ }
+ return toObject;
+}
+function contentEmbeddingFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromValues = getValueByPath(fromObject, ['values']);
+ if (fromValues != null) {
+ setValueByPath(toObject, ['values'], fromValues);
+ }
+ return toObject;
+}
+function embedContentMetadataFromMldev() {
+ const toObject = {};
+ return toObject;
+}
+function embedContentResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromEmbeddings = getValueByPath(fromObject, ['embeddings']);
+ if (fromEmbeddings != null) {
+ if (Array.isArray(fromEmbeddings)) {
+ setValueByPath(toObject, ['embeddings'], fromEmbeddings.map((item) => {
+ return contentEmbeddingFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['embeddings'], fromEmbeddings);
+ }
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], embedContentMetadataFromMldev());
+ }
+ return toObject;
+}
+function imageFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromImageBytes = getValueByPath(fromObject, [
+ 'bytesBase64Encoded',
+ ]);
+ if (fromImageBytes != null) {
+ setValueByPath(toObject, ['imageBytes'], tBytes(apiClient, fromImageBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function safetyAttributesFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromCategories = getValueByPath(fromObject, [
+ 'safetyAttributes',
+ 'categories',
+ ]);
+ if (fromCategories != null) {
+ setValueByPath(toObject, ['categories'], fromCategories);
+ }
+ const fromScores = getValueByPath(fromObject, [
+ 'safetyAttributes',
+ 'scores',
+ ]);
+ if (fromScores != null) {
+ setValueByPath(toObject, ['scores'], fromScores);
+ }
+ const fromContentType = getValueByPath(fromObject, ['contentType']);
+ if (fromContentType != null) {
+ setValueByPath(toObject, ['contentType'], fromContentType);
+ }
+ return toObject;
+}
+function generatedImageFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromImage = getValueByPath(fromObject, ['_self']);
+ if (fromImage != null) {
+ setValueByPath(toObject, ['image'], imageFromMldev(apiClient, fromImage));
+ }
+ const fromRaiFilteredReason = getValueByPath(fromObject, [
+ 'raiFilteredReason',
+ ]);
+ if (fromRaiFilteredReason != null) {
+ setValueByPath(toObject, ['raiFilteredReason'], fromRaiFilteredReason);
+ }
+ const fromSafetyAttributes = getValueByPath(fromObject, ['_self']);
+ if (fromSafetyAttributes != null) {
+ setValueByPath(toObject, ['safetyAttributes'], safetyAttributesFromMldev(apiClient, fromSafetyAttributes));
+ }
+ return toObject;
+}
+function generateImagesResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedImages = getValueByPath(fromObject, [
+ 'predictions',
+ ]);
+ if (fromGeneratedImages != null) {
+ if (Array.isArray(fromGeneratedImages)) {
+ setValueByPath(toObject, ['generatedImages'], fromGeneratedImages.map((item) => {
+ return generatedImageFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedImages'], fromGeneratedImages);
+ }
+ }
+ const fromPositivePromptSafetyAttributes = getValueByPath(fromObject, [
+ 'positivePromptSafetyAttributes',
+ ]);
+ if (fromPositivePromptSafetyAttributes != null) {
+ setValueByPath(toObject, ['positivePromptSafetyAttributes'], safetyAttributesFromMldev(apiClient, fromPositivePromptSafetyAttributes));
+ }
+ return toObject;
+}
+function countTokensResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromTotalTokens = getValueByPath(fromObject, ['totalTokens']);
+ if (fromTotalTokens != null) {
+ setValueByPath(toObject, ['totalTokens'], fromTotalTokens);
+ }
+ const fromCachedContentTokenCount = getValueByPath(fromObject, [
+ 'cachedContentTokenCount',
+ ]);
+ if (fromCachedContentTokenCount != null) {
+ setValueByPath(toObject, ['cachedContentTokenCount'], fromCachedContentTokenCount);
+ }
+ return toObject;
+}
+function videoFromMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromUri = getValueByPath(fromObject, ['video', 'uri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromVideoBytes = getValueByPath(fromObject, [
+ 'video',
+ 'encodedVideo',
+ ]);
+ if (fromVideoBytes != null) {
+ setValueByPath(toObject, ['videoBytes'], tBytes(apiClient, fromVideoBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['encoding']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generatedVideoFromMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideo = getValueByPath(fromObject, ['_self']);
+ if (fromVideo != null) {
+ setValueByPath(toObject, ['video'], videoFromMldev$1(apiClient, fromVideo));
+ }
+ return toObject;
+}
+function generateVideosResponseFromMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedVideos = getValueByPath(fromObject, [
+ 'generatedSamples',
+ ]);
+ if (fromGeneratedVideos != null) {
+ if (Array.isArray(fromGeneratedVideos)) {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos.map((item) => {
+ return generatedVideoFromMldev$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos);
+ }
+ }
+ const fromRaiMediaFilteredCount = getValueByPath(fromObject, [
+ 'raiMediaFilteredCount',
+ ]);
+ if (fromRaiMediaFilteredCount != null) {
+ setValueByPath(toObject, ['raiMediaFilteredCount'], fromRaiMediaFilteredCount);
+ }
+ const fromRaiMediaFilteredReasons = getValueByPath(fromObject, [
+ 'raiMediaFilteredReasons',
+ ]);
+ if (fromRaiMediaFilteredReasons != null) {
+ setValueByPath(toObject, ['raiMediaFilteredReasons'], fromRaiMediaFilteredReasons);
+ }
+ return toObject;
+}
+function generateVideosOperationFromMldev$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], fromMetadata);
+ }
+ const fromDone = getValueByPath(fromObject, ['done']);
+ if (fromDone != null) {
+ setValueByPath(toObject, ['done'], fromDone);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fromError);
+ }
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], fromResponse);
+ }
+ const fromResult = getValueByPath(fromObject, [
+ 'response',
+ 'generateVideoResponse',
+ ]);
+ if (fromResult != null) {
+ setValueByPath(toObject, ['result'], generateVideosResponseFromMldev$1(apiClient, fromResult));
+ }
+ return toObject;
+}
+function partFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideoMetadata = getValueByPath(fromObject, [
+ 'videoMetadata',
+ ]);
+ if (fromVideoMetadata != null) {
+ setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);
+ }
+ const fromThought = getValueByPath(fromObject, ['thought']);
+ if (fromThought != null) {
+ setValueByPath(toObject, ['thought'], fromThought);
+ }
+ const fromCodeExecutionResult = getValueByPath(fromObject, [
+ 'codeExecutionResult',
+ ]);
+ if (fromCodeExecutionResult != null) {
+ setValueByPath(toObject, ['codeExecutionResult'], fromCodeExecutionResult);
+ }
+ const fromExecutableCode = getValueByPath(fromObject, [
+ 'executableCode',
+ ]);
+ if (fromExecutableCode != null) {
+ setValueByPath(toObject, ['executableCode'], fromExecutableCode);
+ }
+ const fromFileData = getValueByPath(fromObject, ['fileData']);
+ if (fromFileData != null) {
+ setValueByPath(toObject, ['fileData'], fromFileData);
+ }
+ const fromFunctionCall = getValueByPath(fromObject, ['functionCall']);
+ if (fromFunctionCall != null) {
+ setValueByPath(toObject, ['functionCall'], fromFunctionCall);
+ }
+ const fromFunctionResponse = getValueByPath(fromObject, [
+ 'functionResponse',
+ ]);
+ if (fromFunctionResponse != null) {
+ setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);
+ }
+ const fromInlineData = getValueByPath(fromObject, ['inlineData']);
+ if (fromInlineData != null) {
+ setValueByPath(toObject, ['inlineData'], fromInlineData);
+ }
+ const fromText = getValueByPath(fromObject, ['text']);
+ if (fromText != null) {
+ setValueByPath(toObject, ['text'], fromText);
+ }
+ return toObject;
+}
+function contentFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromParts = getValueByPath(fromObject, ['parts']);
+ if (fromParts != null) {
+ if (Array.isArray(fromParts)) {
+ setValueByPath(toObject, ['parts'], fromParts.map((item) => {
+ return partFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['parts'], fromParts);
+ }
+ }
+ const fromRole = getValueByPath(fromObject, ['role']);
+ if (fromRole != null) {
+ setValueByPath(toObject, ['role'], fromRole);
+ }
+ return toObject;
+}
+function citationMetadataFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromCitations = getValueByPath(fromObject, ['citations']);
+ if (fromCitations != null) {
+ setValueByPath(toObject, ['citations'], fromCitations);
+ }
+ return toObject;
+}
+function candidateFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromContent = getValueByPath(fromObject, ['content']);
+ if (fromContent != null) {
+ setValueByPath(toObject, ['content'], contentFromVertex(apiClient, fromContent));
+ }
+ const fromCitationMetadata = getValueByPath(fromObject, [
+ 'citationMetadata',
+ ]);
+ if (fromCitationMetadata != null) {
+ setValueByPath(toObject, ['citationMetadata'], citationMetadataFromVertex(apiClient, fromCitationMetadata));
+ }
+ const fromFinishMessage = getValueByPath(fromObject, [
+ 'finishMessage',
+ ]);
+ if (fromFinishMessage != null) {
+ setValueByPath(toObject, ['finishMessage'], fromFinishMessage);
+ }
+ const fromFinishReason = getValueByPath(fromObject, ['finishReason']);
+ if (fromFinishReason != null) {
+ setValueByPath(toObject, ['finishReason'], fromFinishReason);
+ }
+ const fromAvgLogprobs = getValueByPath(fromObject, ['avgLogprobs']);
+ if (fromAvgLogprobs != null) {
+ setValueByPath(toObject, ['avgLogprobs'], fromAvgLogprobs);
+ }
+ const fromGroundingMetadata = getValueByPath(fromObject, [
+ 'groundingMetadata',
+ ]);
+ if (fromGroundingMetadata != null) {
+ setValueByPath(toObject, ['groundingMetadata'], fromGroundingMetadata);
+ }
+ const fromIndex = getValueByPath(fromObject, ['index']);
+ if (fromIndex != null) {
+ setValueByPath(toObject, ['index'], fromIndex);
+ }
+ const fromLogprobsResult = getValueByPath(fromObject, [
+ 'logprobsResult',
+ ]);
+ if (fromLogprobsResult != null) {
+ setValueByPath(toObject, ['logprobsResult'], fromLogprobsResult);
+ }
+ const fromSafetyRatings = getValueByPath(fromObject, [
+ 'safetyRatings',
+ ]);
+ if (fromSafetyRatings != null) {
+ setValueByPath(toObject, ['safetyRatings'], fromSafetyRatings);
+ }
+ return toObject;
+}
+function generateContentResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromCandidates = getValueByPath(fromObject, ['candidates']);
+ if (fromCandidates != null) {
+ if (Array.isArray(fromCandidates)) {
+ setValueByPath(toObject, ['candidates'], fromCandidates.map((item) => {
+ return candidateFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['candidates'], fromCandidates);
+ }
+ }
+ const fromCreateTime = getValueByPath(fromObject, ['createTime']);
+ if (fromCreateTime != null) {
+ setValueByPath(toObject, ['createTime'], fromCreateTime);
+ }
+ const fromResponseId = getValueByPath(fromObject, ['responseId']);
+ if (fromResponseId != null) {
+ setValueByPath(toObject, ['responseId'], fromResponseId);
+ }
+ const fromModelVersion = getValueByPath(fromObject, ['modelVersion']);
+ if (fromModelVersion != null) {
+ setValueByPath(toObject, ['modelVersion'], fromModelVersion);
+ }
+ const fromPromptFeedback = getValueByPath(fromObject, [
+ 'promptFeedback',
+ ]);
+ if (fromPromptFeedback != null) {
+ setValueByPath(toObject, ['promptFeedback'], fromPromptFeedback);
+ }
+ const fromUsageMetadata = getValueByPath(fromObject, [
+ 'usageMetadata',
+ ]);
+ if (fromUsageMetadata != null) {
+ setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);
+ }
+ return toObject;
+}
+function contentEmbeddingStatisticsFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromTruncated = getValueByPath(fromObject, ['truncated']);
+ if (fromTruncated != null) {
+ setValueByPath(toObject, ['truncated'], fromTruncated);
+ }
+ const fromTokenCount = getValueByPath(fromObject, ['token_count']);
+ if (fromTokenCount != null) {
+ setValueByPath(toObject, ['tokenCount'], fromTokenCount);
+ }
+ return toObject;
+}
+function contentEmbeddingFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromValues = getValueByPath(fromObject, ['values']);
+ if (fromValues != null) {
+ setValueByPath(toObject, ['values'], fromValues);
+ }
+ const fromStatistics = getValueByPath(fromObject, ['statistics']);
+ if (fromStatistics != null) {
+ setValueByPath(toObject, ['statistics'], contentEmbeddingStatisticsFromVertex(apiClient, fromStatistics));
+ }
+ return toObject;
+}
+function embedContentMetadataFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromBillableCharacterCount = getValueByPath(fromObject, [
+ 'billableCharacterCount',
+ ]);
+ if (fromBillableCharacterCount != null) {
+ setValueByPath(toObject, ['billableCharacterCount'], fromBillableCharacterCount);
+ }
+ return toObject;
+}
+function embedContentResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromEmbeddings = getValueByPath(fromObject, [
+ 'predictions[]',
+ 'embeddings',
+ ]);
+ if (fromEmbeddings != null) {
+ if (Array.isArray(fromEmbeddings)) {
+ setValueByPath(toObject, ['embeddings'], fromEmbeddings.map((item) => {
+ return contentEmbeddingFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['embeddings'], fromEmbeddings);
+ }
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], embedContentMetadataFromVertex(apiClient, fromMetadata));
+ }
+ return toObject;
+}
+function imageFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromGcsUri = getValueByPath(fromObject, ['gcsUri']);
+ if (fromGcsUri != null) {
+ setValueByPath(toObject, ['gcsUri'], fromGcsUri);
+ }
+ const fromImageBytes = getValueByPath(fromObject, [
+ 'bytesBase64Encoded',
+ ]);
+ if (fromImageBytes != null) {
+ setValueByPath(toObject, ['imageBytes'], tBytes(apiClient, fromImageBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function safetyAttributesFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromCategories = getValueByPath(fromObject, [
+ 'safetyAttributes',
+ 'categories',
+ ]);
+ if (fromCategories != null) {
+ setValueByPath(toObject, ['categories'], fromCategories);
+ }
+ const fromScores = getValueByPath(fromObject, [
+ 'safetyAttributes',
+ 'scores',
+ ]);
+ if (fromScores != null) {
+ setValueByPath(toObject, ['scores'], fromScores);
+ }
+ const fromContentType = getValueByPath(fromObject, ['contentType']);
+ if (fromContentType != null) {
+ setValueByPath(toObject, ['contentType'], fromContentType);
+ }
+ return toObject;
+}
+function generatedImageFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromImage = getValueByPath(fromObject, ['_self']);
+ if (fromImage != null) {
+ setValueByPath(toObject, ['image'], imageFromVertex(apiClient, fromImage));
+ }
+ const fromRaiFilteredReason = getValueByPath(fromObject, [
+ 'raiFilteredReason',
+ ]);
+ if (fromRaiFilteredReason != null) {
+ setValueByPath(toObject, ['raiFilteredReason'], fromRaiFilteredReason);
+ }
+ const fromSafetyAttributes = getValueByPath(fromObject, ['_self']);
+ if (fromSafetyAttributes != null) {
+ setValueByPath(toObject, ['safetyAttributes'], safetyAttributesFromVertex(apiClient, fromSafetyAttributes));
+ }
+ const fromEnhancedPrompt = getValueByPath(fromObject, ['prompt']);
+ if (fromEnhancedPrompt != null) {
+ setValueByPath(toObject, ['enhancedPrompt'], fromEnhancedPrompt);
+ }
+ return toObject;
+}
+function generateImagesResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedImages = getValueByPath(fromObject, [
+ 'predictions',
+ ]);
+ if (fromGeneratedImages != null) {
+ if (Array.isArray(fromGeneratedImages)) {
+ setValueByPath(toObject, ['generatedImages'], fromGeneratedImages.map((item) => {
+ return generatedImageFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedImages'], fromGeneratedImages);
+ }
+ }
+ const fromPositivePromptSafetyAttributes = getValueByPath(fromObject, [
+ 'positivePromptSafetyAttributes',
+ ]);
+ if (fromPositivePromptSafetyAttributes != null) {
+ setValueByPath(toObject, ['positivePromptSafetyAttributes'], safetyAttributesFromVertex(apiClient, fromPositivePromptSafetyAttributes));
+ }
+ return toObject;
+}
+function countTokensResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromTotalTokens = getValueByPath(fromObject, ['totalTokens']);
+ if (fromTotalTokens != null) {
+ setValueByPath(toObject, ['totalTokens'], fromTotalTokens);
+ }
+ return toObject;
+}
+function computeTokensResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromTokensInfo = getValueByPath(fromObject, ['tokensInfo']);
+ if (fromTokensInfo != null) {
+ setValueByPath(toObject, ['tokensInfo'], fromTokensInfo);
+ }
+ return toObject;
+}
+function videoFromVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromUri = getValueByPath(fromObject, ['gcsUri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromVideoBytes = getValueByPath(fromObject, [
+ 'bytesBase64Encoded',
+ ]);
+ if (fromVideoBytes != null) {
+ setValueByPath(toObject, ['videoBytes'], tBytes(apiClient, fromVideoBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generatedVideoFromVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideo = getValueByPath(fromObject, ['_self']);
+ if (fromVideo != null) {
+ setValueByPath(toObject, ['video'], videoFromVertex$1(apiClient, fromVideo));
+ }
+ return toObject;
+}
+function generateVideosResponseFromVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedVideos = getValueByPath(fromObject, ['videos']);
+ if (fromGeneratedVideos != null) {
+ if (Array.isArray(fromGeneratedVideos)) {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos.map((item) => {
+ return generatedVideoFromVertex$1(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos);
+ }
+ }
+ const fromRaiMediaFilteredCount = getValueByPath(fromObject, [
+ 'raiMediaFilteredCount',
+ ]);
+ if (fromRaiMediaFilteredCount != null) {
+ setValueByPath(toObject, ['raiMediaFilteredCount'], fromRaiMediaFilteredCount);
+ }
+ const fromRaiMediaFilteredReasons = getValueByPath(fromObject, [
+ 'raiMediaFilteredReasons',
+ ]);
+ if (fromRaiMediaFilteredReasons != null) {
+ setValueByPath(toObject, ['raiMediaFilteredReasons'], fromRaiMediaFilteredReasons);
+ }
+ return toObject;
+}
+function generateVideosOperationFromVertex$1(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], fromMetadata);
+ }
+ const fromDone = getValueByPath(fromObject, ['done']);
+ if (fromDone != null) {
+ setValueByPath(toObject, ['done'], fromDone);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fromError);
+ }
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], fromResponse);
+ }
+ const fromResult = getValueByPath(fromObject, ['response']);
+ if (fromResult != null) {
+ setValueByPath(toObject, ['result'], generateVideosResponseFromVertex$1(apiClient, fromResult));
+ }
+ return toObject;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+/**
+ * Converters for live client.
+ */
+function liveConnectParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig !== undefined && fromConfig !== null) {
+ setValueByPath(toObject, ['setup'], liveConnectConfigToMldev(apiClient, fromConfig));
+ }
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel !== undefined) {
+ setValueByPath(toObject, ['setup', 'model'], fromModel);
+ }
+ return toObject;
+}
+function liveConnectParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig !== undefined && fromConfig !== null) {
+ setValueByPath(toObject, ['setup'], liveConnectConfigToVertex(apiClient, fromConfig));
+ }
+ const fromModel = getValueByPath(fromObject, ['model']);
+ if (fromModel !== undefined) {
+ setValueByPath(toObject, ['setup', 'model'], fromModel);
+ }
+ return toObject;
+}
+function liveServerMessageFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromSetupComplete = getValueByPath(fromObject, [
+ 'setupComplete',
+ ]);
+ if (fromSetupComplete !== undefined) {
+ setValueByPath(toObject, ['setupComplete'], fromSetupComplete);
+ }
+ const fromServerContent = getValueByPath(fromObject, [
+ 'serverContent',
+ ]);
+ if (fromServerContent !== undefined && fromServerContent !== null) {
+ setValueByPath(toObject, ['serverContent'], liveServerContentFromMldev(apiClient, fromServerContent));
+ }
+ const fromToolCall = getValueByPath(fromObject, ['toolCall']);
+ if (fromToolCall !== undefined && fromToolCall !== null) {
+ setValueByPath(toObject, ['toolCall'], liveServerToolCallFromMldev(apiClient, fromToolCall));
+ }
+ const fromToolCallCancellation = getValueByPath(fromObject, [
+ 'toolCallCancellation',
+ ]);
+ if (fromToolCallCancellation !== undefined &&
+ fromToolCallCancellation !== null) {
+ setValueByPath(toObject, ['toolCallCancellation'], liveServerToolCallCancellationFromMldev(apiClient, fromToolCallCancellation));
+ }
+ return toObject;
+}
+function liveServerMessageFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromSetupComplete = getValueByPath(fromObject, [
+ 'setupComplete',
+ ]);
+ if (fromSetupComplete !== undefined) {
+ setValueByPath(toObject, ['setupComplete'], fromSetupComplete);
+ }
+ const fromServerContent = getValueByPath(fromObject, [
+ 'serverContent',
+ ]);
+ if (fromServerContent !== undefined && fromServerContent !== null) {
+ setValueByPath(toObject, ['serverContent'], liveServerContentFromVertex(apiClient, fromServerContent));
+ }
+ const fromToolCall = getValueByPath(fromObject, ['toolCall']);
+ if (fromToolCall !== undefined && fromToolCall !== null) {
+ setValueByPath(toObject, ['toolCall'], liveServerToolCallFromVertex(apiClient, fromToolCall));
+ }
+ const fromToolCallCancellation = getValueByPath(fromObject, [
+ 'toolCallCancellation',
+ ]);
+ if (fromToolCallCancellation !== undefined &&
+ fromToolCallCancellation !== null) {
+ setValueByPath(toObject, ['toolCallCancellation'], liveServerToolCallCancellationFromVertex(apiClient, fromToolCallCancellation));
+ }
+ return toObject;
+}
+function liveConnectConfigToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromGenerationConfig = getValueByPath(fromObject, [
+ 'generationConfig',
+ ]);
+ if (fromGenerationConfig !== undefined) {
+ setValueByPath(toObject, ['generationConfig'], fromGenerationConfig);
+ }
+ const fromResponseModalities = getValueByPath(fromObject, [
+ 'responseModalities',
+ ]);
+ if (fromResponseModalities !== undefined) {
+ setValueByPath(toObject, ['generationConfig', 'responseModalities'], fromResponseModalities);
+ }
+ const fromSpeechConfig = getValueByPath(fromObject, ['speechConfig']);
+ if (fromSpeechConfig !== undefined) {
+ setValueByPath(toObject, ['generationConfig', 'speechConfig'], fromSpeechConfig);
+ }
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (fromSystemInstruction !== undefined && fromSystemInstruction !== null) {
+ setValueByPath(toObject, ['systemInstruction'], contentToMldev(apiClient, fromSystemInstruction));
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (fromTools !== undefined &&
+ fromTools !== null &&
+ Array.isArray(fromTools)) {
+ setValueByPath(toObject, ['tools'], fromTools.map((item) => {
+ return toolToMldev(apiClient, item);
+ }));
+ }
+ return toObject;
+}
+function liveConnectConfigToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromGenerationConfig = getValueByPath(fromObject, [
+ 'generationConfig',
+ ]);
+ if (fromGenerationConfig !== undefined) {
+ setValueByPath(toObject, ['generationConfig'], fromGenerationConfig);
+ }
+ const fromResponseModalities = getValueByPath(fromObject, [
+ 'responseModalities',
+ ]);
+ if (fromResponseModalities !== undefined) {
+ setValueByPath(toObject, ['generationConfig', 'responseModalities'], fromResponseModalities);
+ }
+ else {
+ // Set default to AUDIO to align with MLDev API.
+ setValueByPath(toObject, ['generationConfig', 'responseModalities'], ['AUDIO']);
+ }
+ const fromSpeechConfig = getValueByPath(fromObject, ['speechConfig']);
+ if (fromSpeechConfig !== undefined) {
+ setValueByPath(toObject, ['generationConfig', 'speechConfig'], fromSpeechConfig);
+ }
+ const fromSystemInstruction = getValueByPath(fromObject, [
+ 'systemInstruction',
+ ]);
+ if (fromSystemInstruction !== undefined && fromSystemInstruction !== null) {
+ setValueByPath(toObject, ['systemInstruction'], contentToVertex(apiClient, fromSystemInstruction));
+ }
+ const fromTools = getValueByPath(fromObject, ['tools']);
+ if (fromTools !== undefined &&
+ fromTools !== null &&
+ Array.isArray(fromTools)) {
+ setValueByPath(toObject, ['tools'], fromTools.map((item) => {
+ return toolToVertex(apiClient, item);
+ }));
+ }
+ return toObject;
+}
+function liveServerContentFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromModelTurn = getValueByPath(fromObject, ['modelTurn']);
+ if (fromModelTurn !== undefined && fromModelTurn !== null) {
+ setValueByPath(toObject, ['modelTurn'], contentFromMldev(apiClient, fromModelTurn));
+ }
+ const fromTurnComplete = getValueByPath(fromObject, ['turnComplete']);
+ if (fromTurnComplete !== undefined) {
+ setValueByPath(toObject, ['turnComplete'], fromTurnComplete);
+ }
+ const fromInterrupted = getValueByPath(fromObject, ['interrupted']);
+ if (fromInterrupted !== undefined) {
+ setValueByPath(toObject, ['interrupted'], fromInterrupted);
+ }
+ return toObject;
+}
+function liveServerContentFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromModelTurn = getValueByPath(fromObject, ['modelTurn']);
+ if (fromModelTurn !== undefined && fromModelTurn !== null) {
+ setValueByPath(toObject, ['modelTurn'], contentFromVertex(apiClient, fromModelTurn));
+ }
+ const fromTurnComplete = getValueByPath(fromObject, ['turnComplete']);
+ if (fromTurnComplete !== undefined) {
+ setValueByPath(toObject, ['turnComplete'], fromTurnComplete);
+ }
+ const fromInterrupted = getValueByPath(fromObject, ['interrupted']);
+ if (fromInterrupted !== undefined) {
+ setValueByPath(toObject, ['interrupted'], fromInterrupted);
+ }
+ return toObject;
+}
+function functionCallFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromId = getValueByPath(fromObject, ['id']);
+ if (fromId !== undefined) {
+ setValueByPath(toObject, ['id'], fromId);
+ }
+ const fromArgs = getValueByPath(fromObject, ['args']);
+ if (fromArgs !== undefined) {
+ setValueByPath(toObject, ['args'], fromArgs);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName !== undefined) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ return toObject;
+}
+function functionCallFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromArgs = getValueByPath(fromObject, ['args']);
+ if (fromArgs !== undefined) {
+ setValueByPath(toObject, ['args'], fromArgs);
+ }
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName !== undefined) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ return toObject;
+}
+function liveServerToolCallFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCalls = getValueByPath(fromObject, [
+ 'functionCalls',
+ ]);
+ if (fromFunctionCalls !== undefined &&
+ fromFunctionCalls !== null &&
+ Array.isArray(fromFunctionCalls)) {
+ setValueByPath(toObject, ['functionCalls'], fromFunctionCalls.map((item) => {
+ return functionCallFromMldev(apiClient, item);
+ }));
+ }
+ return toObject;
+}
+function liveServerToolCallFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromFunctionCalls = getValueByPath(fromObject, [
+ 'functionCalls',
+ ]);
+ if (fromFunctionCalls !== undefined &&
+ fromFunctionCalls !== null &&
+ Array.isArray(fromFunctionCalls)) {
+ setValueByPath(toObject, ['functionCalls'], fromFunctionCalls.map((item) => {
+ return functionCallFromVertex(apiClient, item);
+ }));
+ }
+ return toObject;
+}
+function liveServerToolCallCancellationFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromIds = getValueByPath(fromObject, ['ids']);
+ if (fromIds !== undefined) {
+ setValueByPath(toObject, ['ids'], fromIds);
+ }
+ return toObject;
+}
+function liveServerToolCallCancellationFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromIds = getValueByPath(fromObject, ['ids']);
+ if (fromIds !== undefined) {
+ setValueByPath(toObject, ['ids'], fromIds);
+ }
+ return toObject;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+const FUNCTION_RESPONSE_REQUIRES_ID = 'FunctionResponse request must have an `id` field from the response of a ToolCall.FunctionalCalls in Google AI.';
+/**
+ * Handles incoming messages from the WebSocket.
+ *
+ * @remarks
+ * This function is responsible for parsing incoming messages, transforming them
+ * into LiveServerMessages, and then calling the onmessage callback. Note that
+ * the first message which is received from the server is a setupComplete
+ * message.
+ *
+ * @param apiClient The ApiClient instance.
+ * @param onmessage The user-provided onmessage callback (if any).
+ * @param event The MessageEvent from the WebSocket.
+ */
+async function handleWebSocketMessage(apiClient, onmessage, event) {
+ let serverMessage;
+ let data;
+ if (event.data instanceof Blob) {
+ data = JSON.parse(await event.data.text());
+ }
+ else {
+ data = JSON.parse(event.data);
+ }
+ if (apiClient.isVertexAI()) {
+ serverMessage = liveServerMessageFromVertex(apiClient, data);
+ }
+ else {
+ serverMessage = liveServerMessageFromMldev(apiClient, data);
+ }
+ onmessage(serverMessage);
+}
+/**
+ Live class encapsulates the configuration for live interaction with the
+ Generative Language API. It embeds ApiClient for general API settings.
+
+ @experimental
+ */
+class Live {
+ constructor(apiClient, auth, webSocketFactory) {
+ this.apiClient = apiClient;
+ this.auth = auth;
+ this.webSocketFactory = webSocketFactory;
+ }
+ /**
+ Establishes a connection to the specified model with the given
+ configuration and returns a Session object representing that connection.
+
+ @experimental
+
+ @remarks
+ If using the Gemini API, Live is currently only supported behind API
+ version `v1alpha`. Ensure that the API version is set to `v1alpha` when
+ initializing the SDK if relying on the Gemini API.
+
+ @param params - The parameters for establishing a connection to the model.
+ @return A live session.
+
+ @example
+ ```ts
+ const session = await ai.live.connect({
+ model: 'gemini-2.0-flash-exp',
+ config: {
+ responseModalities: [Modality.AUDIO],
+ },
+ callbacks: {
+ onopen: () => {
+ console.log('Connected to the socket.');
+ },
+ onmessage: (e: MessageEvent) => {
+ console.log('Received message from the server: %s\n', debug(e.data));
+ },
+ onerror: (e: ErrorEvent) => {
+ console.log('Error occurred: %s\n', debug(e.error));
+ },
+ onclose: (e: CloseEvent) => {
+ console.log('Connection closed.');
+ },
+ },
+ });
+ ```
+ */
+ async connect(params) {
+ var _a, _b;
+ const websocketBaseUrl = this.apiClient.getWebsocketBaseUrl();
+ const apiVersion = this.apiClient.getApiVersion();
+ let url;
+ const headers = mapToHeaders(this.apiClient.getDefaultHeaders());
+ if (this.apiClient.isVertexAI()) {
+ url = `${websocketBaseUrl}/ws/google.cloud.aiplatform.${apiVersion}.LlmBidiService/BidiGenerateContent`;
+ await this.auth.addAuthHeaders(headers);
+ }
+ else {
+ const apiKey = this.apiClient.getApiKey();
+ url = `${websocketBaseUrl}/ws/google.ai.generativelanguage.${apiVersion}.GenerativeService.BidiGenerateContent?key=${apiKey}`;
+ }
+ let onopenResolve = () => { };
+ const onopenPromise = new Promise((resolve) => {
+ onopenResolve = resolve;
+ });
+ const callbacks = params.callbacks;
+ const onopenAwaitedCallback = function () {
+ var _a;
+ (_a = callbacks === null || callbacks === void 0 ? void 0 : callbacks.onopen) === null || _a === void 0 ? void 0 : _a.call(callbacks);
+ onopenResolve({});
+ };
+ const apiClient = this.apiClient;
+ const websocketCallbacks = {
+ onopen: onopenAwaitedCallback,
+ onmessage: (event) => {
+ void handleWebSocketMessage(apiClient, callbacks.onmessage, event);
+ },
+ onerror: (_a = callbacks === null || callbacks === void 0 ? void 0 : callbacks.onerror) !== null && _a !== void 0 ? _a : function (e) {
+ },
+ onclose: (_b = callbacks === null || callbacks === void 0 ? void 0 : callbacks.onclose) !== null && _b !== void 0 ? _b : function (e) {
+ },
+ };
+ const conn = this.webSocketFactory.create(url, headersToMap(headers), websocketCallbacks);
+ conn.connect();
+ // Wait for the websocket to open before sending requests.
+ await onopenPromise;
+ let transformedModel = tModel(this.apiClient, params.model);
+ if (this.apiClient.isVertexAI() &&
+ transformedModel.startsWith('publishers/')) {
+ const project = this.apiClient.getProject();
+ const location = this.apiClient.getLocation();
+ transformedModel =
+ `projects/${project}/locations/${location}/` + transformedModel;
+ }
+ let clientMessage = {};
+ const liveConnectParameters = {
+ model: transformedModel,
+ config: params.config,
+ callbacks: params.callbacks,
+ };
+ if (this.apiClient.isVertexAI()) {
+ clientMessage = liveConnectParametersToVertex(this.apiClient, liveConnectParameters);
+ }
+ else {
+ clientMessage = liveConnectParametersToMldev(this.apiClient, liveConnectParameters);
+ }
+ conn.send(JSON.stringify(clientMessage));
+ return new Session(conn, this.apiClient);
+ }
+}
+const defaultLiveSendClientContentParamerters = {
+ turnComplete: true,
+};
+/**
+ Represents a connection to the API.
+
+ @experimental
+ */
+class Session {
+ constructor(conn, apiClient) {
+ this.conn = conn;
+ this.apiClient = apiClient;
+ }
+ tLiveClientContent(apiClient, params) {
+ if (params.turns !== null && params.turns !== undefined) {
+ let contents = [];
+ try {
+ contents = tContents(apiClient, params.turns);
+ if (apiClient.isVertexAI()) {
+ contents = contents.map((item) => contentToVertex(apiClient, item));
+ }
+ else {
+ contents = contents.map((item) => contentToMldev(apiClient, item));
+ }
+ }
+ catch (_a) {
+ throw new Error(`Failed to parse client content "turns", type: '${typeof params.turns}'`);
+ }
+ return {
+ clientContent: { turns: contents, turnComplete: params.turnComplete },
+ };
+ }
+ return {
+ clientContent: { turnComplete: params.turnComplete },
+ };
+ }
+ tLiveClientRealtimeInput(apiClient, params) {
+ let clientMessage = {};
+ if (!('media' in params) || !params.media) {
+ throw new Error(`Failed to convert realtime input "media", type: '${typeof params.media}'`);
+ }
+ // LiveClientRealtimeInput
+ clientMessage = { realtimeInput: { mediaChunks: [params.media] } };
+ return clientMessage;
+ }
+ tLiveClienttToolResponse(apiClient, params) {
+ let functionResponses = [];
+ if (params.functionResponses == null) {
+ throw new Error('functionResponses is required.');
+ }
+ if (!Array.isArray(params.functionResponses)) {
+ functionResponses = [params.functionResponses];
+ }
+ else {
+ functionResponses = params.functionResponses;
+ }
+ if (functionResponses.length === 0) {
+ throw new Error('functionResponses is required.');
+ }
+ for (const functionResponse of functionResponses) {
+ if (typeof functionResponse !== 'object' ||
+ functionResponse === null ||
+ !('name' in functionResponse) ||
+ !('response' in functionResponse)) {
+ throw new Error(`Could not parse function response, type '${typeof functionResponse}'.`);
+ }
+ if (!apiClient.isVertexAI() && !('id' in functionResponse)) {
+ throw new Error(FUNCTION_RESPONSE_REQUIRES_ID);
+ }
+ }
+ const clientMessage = {
+ toolResponse: { functionResponses: functionResponses },
+ };
+ return clientMessage;
+ }
+ /**
+ Send a message over the established connection.
+
+ @param params - Contains two **optional** properties, `turns` and
+ `turnComplete`.
+
+ - `turns` will be converted to a `Content[]`
+ - `turnComplete: true` [default] indicates that you are done sending
+ content and expect a response. If `turnComplete: false`, the server
+ will wait for additional messages before starting generation.
+
+ @experimental
+
+ @remarks
+ There are two ways to send messages to the live API:
+ `sendClientContent` and `sendRealtimeInput`.
+
+ `sendClientContent` messages are added to the model context **in order**.
+ Having a conversation using `sendClientContent` messages is roughly
+ equivalent to using the `Chat.sendMessageStream`, except that the state of
+ the `chat` history is stored on the API server instead of locally.
+
+ Because of `sendClientContent`'s order guarantee, the model cannot respons
+ as quickly to `sendClientContent` messages as to `sendRealtimeInput`
+ messages. This makes the biggest difference when sending objects that have
+ significant preprocessing time (typically images).
+
+ The `sendClientContent` message sends a `Content[]`
+ which has more options than the `Blob` sent by `sendRealtimeInput`.
+
+ So the main use-cases for `sendClientContent` over `sendRealtimeInput` are:
+
+ - Sending anything that can't be represented as a `Blob` (text,
+ `sendClientContent({turns="Hello?"}`)).
+ - Managing turns when not using audio input and voice activity detection.
+ (`sendClientContent({turnComplete:true})` or the short form
+ `sendClientContent()`)
+ - Prefilling a conversation context
+ ```
+ sendClientContent({
+ turns: [
+ Content({role:user, parts:...}),
+ Content({role:user, parts:...}),
+ ...
+ ]
+ })
+ ```
+ @experimental
+ */
+ sendClientContent(params) {
+ params = Object.assign(Object.assign({}, defaultLiveSendClientContentParamerters), params);
+ const clientMessage = this.tLiveClientContent(this.apiClient, params);
+ this.conn.send(JSON.stringify(clientMessage));
+ }
+ /**
+ Send a realtime message over the established connection.
+
+ @param params - Contains one property, `media`.
+
+ - `media` will be converted to a `Blob`
+
+ @experimental
+
+ @remarks
+ Use `sendRealtimeInput` for realtime audio chunks and video frames (images).
+
+ With `sendRealtimeInput` the api will respond to audio automatically
+ based on voice activity detection (VAD).
+
+ `sendRealtimeInput` is optimized for responsivness at the expense of
+ deterministic ordering guarantees. Audio and video tokens are to the
+ context when they become available.
+
+ Note: The Call signature expects a `Blob` object, but only a subset
+ of audio and image mimetypes are allowed.
+ */
+ sendRealtimeInput(params) {
+ if (params.media == null) {
+ throw new Error('Media is required.');
+ }
+ const clientMessage = this.tLiveClientRealtimeInput(this.apiClient, params);
+ this.conn.send(JSON.stringify(clientMessage));
+ }
+ /**
+ Send a function response message over the established connection.
+
+ @param params - Contains property `functionResponses`.
+
+ - `functionResponses` will be converted to a `functionResponses[]`
+
+ @remarks
+ Use `sendFunctionResponse` to reply to `LiveServerToolCall` from the server.
+
+ Use {@link types.LiveConnectConfig#tools} to configure the callable functions.
+
+ @experimental
+ */
+ sendToolResponse(params) {
+ if (params.functionResponses == null) {
+ throw new Error('Tool response parameters are required.');
+ }
+ const clientMessage = this.tLiveClienttToolResponse(this.apiClient, params);
+ this.conn.send(JSON.stringify(clientMessage));
+ }
+ /**
+ Terminates the WebSocket connection.
+
+ @experimental
+
+ @example
+ ```ts
+ const session = await ai.live.connect({
+ model: 'gemini-2.0-flash-exp',
+ config: {
+ responseModalities: [Modality.AUDIO],
+ }
+ });
+
+ session.close();
+ ```
+ */
+ close() {
+ this.conn.close();
+ }
+}
+// Converts an headers object to a "map" object as expected by the WebSocket
+// constructor. We use this as the Auth interface works with Headers objects
+// while the WebSocket constructor takes a map.
+function headersToMap(headers) {
+ const headerMap = {};
+ headers.forEach((value, key) => {
+ headerMap[key] = value;
+ });
+ return headerMap;
+}
+// Converts a "map" object to a headers object. We use this as the Auth
+// interface works with Headers objects while the API client default headers
+// returns a map.
+function mapToHeaders(map) {
+ const headers = new Headers();
+ for (const [key, value] of Object.entries(map)) {
+ headers.append(key, value);
+ }
+ return headers;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+class Models extends BaseModule {
+ constructor(apiClient) {
+ super();
+ this.apiClient = apiClient;
+ /**
+ * Makes an API request to generate content with a given model.
+ *
+ * For the `model` parameter, supported formats for Vertex AI API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The full resource name starts with 'projects/', for example:
+ * 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
+ * - The partial resource name with 'publishers/', for example:
+ * 'publishers/google/models/gemini-2.0-flash' or
+ * 'publishers/meta/models/llama-3.1-405b-instruct-maas'
+ * - `/` separated publisher and model name, for example:
+ * 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
+ *
+ * For the `model` parameter, supported formats for Gemini API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The model name starts with 'models/', for example:
+ * 'models/gemini-2.0-flash'
+ * - For tuned models, the model name starts with 'tunedModels/',
+ * for example:
+ * 'tunedModels/1234567890123456789'
+ *
+ * Some models support multimodal input and output.
+ *
+ * @param params - The parameters for generating content.
+ * @return The response from generating content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContent({
+ * model: 'gemini-2.0-flash',
+ * contents: 'why is the sky blue?',
+ * config: {
+ * candidateCount: 2,
+ * }
+ * });
+ * console.log(response);
+ * ```
+ */
+ this.generateContent = async (params) => {
+ return await this.generateContentInternal(params);
+ };
+ /**
+ * Makes an API request to generate content with a given model and yields the
+ * response in chunks.
+ *
+ * For the `model` parameter, supported formats for Vertex AI API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The full resource name starts with 'projects/', for example:
+ * 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
+ * - The partial resource name with 'publishers/', for example:
+ * 'publishers/google/models/gemini-2.0-flash' or
+ * 'publishers/meta/models/llama-3.1-405b-instruct-maas'
+ * - `/` separated publisher and model name, for example:
+ * 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
+ *
+ * For the `model` parameter, supported formats for Gemini API include:
+ * - The Gemini model ID, for example: 'gemini-2.0-flash'
+ * - The model name starts with 'models/', for example:
+ * 'models/gemini-2.0-flash'
+ * - For tuned models, the model name starts with 'tunedModels/',
+ * for example:
+ * 'tunedModels/1234567890123456789'
+ *
+ * Some models support multimodal input and output.
+ *
+ * @param params - The parameters for generating content with streaming response.
+ * @return The response from generating content.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateContentStream({
+ * model: 'gemini-2.0-flash',
+ * contents: 'why is the sky blue?',
+ * config: {
+ * maxOutputTokens: 200,
+ * }
+ * });
+ * for await (const chunk of response) {
+ * console.log(chunk);
+ * }
+ * ```
+ */
+ this.generateContentStream = async (params) => {
+ return await this.generateContentStreamInternal(params);
+ };
+ /**
+ * Generates an image based on a text description and configuration.
+ *
+ * @param model - The model to use.
+ * @param prompt - A text description of the image to generate.
+ * @param [config] - The config for image generation.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await client.models.generateImages({
+ * model: 'imagen-3.0-generate-002',
+ * prompt: 'Robot holding a red skateboard',
+ * config: {
+ * numberOfImages: 1,
+ * includeRaiReason: true,
+ * },
+ * });
+ * console.log(response?.generatedImages?.[0]?.image?.imageBytes);
+ * ```
+ */
+ this.generateImages = async (params) => {
+ return await this.generateImagesInternal(params).then((apiResponse) => {
+ var _a;
+ let positivePromptSafetyAttributes;
+ const generatedImages = [];
+ if (apiResponse === null || apiResponse === void 0 ? void 0 : apiResponse.generatedImages) {
+ for (const generatedImage of apiResponse.generatedImages) {
+ if (generatedImage &&
+ (generatedImage === null || generatedImage === void 0 ? void 0 : generatedImage.safetyAttributes) &&
+ ((_a = generatedImage === null || generatedImage === void 0 ? void 0 : generatedImage.safetyAttributes) === null || _a === void 0 ? void 0 : _a.contentType) === 'Positive Prompt') {
+ positivePromptSafetyAttributes = generatedImage === null || generatedImage === void 0 ? void 0 : generatedImage.safetyAttributes;
+ }
+ else {
+ generatedImages.push(generatedImage);
+ }
+ }
+ }
+ let response;
+ if (positivePromptSafetyAttributes) {
+ response = {
+ generatedImages: generatedImages,
+ positivePromptSafetyAttributes: positivePromptSafetyAttributes,
+ };
+ }
+ else {
+ response = {
+ generatedImages: generatedImages,
+ };
+ }
+ return response;
+ });
+ };
+ }
+ async generateContentInternal(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = generateContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:generateContent', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateContentResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new GenerateContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = generateContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:generateContent', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateContentResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new GenerateContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ async generateContentStreamInternal(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = generateContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:streamGenerateContent?alt=sse', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ const apiClient = this.apiClient;
+ response = apiClient.requestStream({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ });
+ return response.then(function (apiResponse) {
+ return __asyncGenerator(this, arguments, function* () {
+ var _a, e_1, _b, _c;
+ try {
+ for (var _d = true, apiResponse_1 = __asyncValues(apiResponse), apiResponse_1_1; apiResponse_1_1 = yield __await(apiResponse_1.next()), _a = apiResponse_1_1.done, !_a; _d = true) {
+ _c = apiResponse_1_1.value;
+ _d = false;
+ const chunk = _c;
+ const resp = generateContentResponseFromVertex(apiClient, chunk);
+ const typedResp = new GenerateContentResponse();
+ Object.assign(typedResp, resp);
+ yield yield __await(typedResp);
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (!_d && !_a && (_b = apiResponse_1.return)) yield __await(_b.call(apiResponse_1));
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ });
+ });
+ }
+ else {
+ const body = generateContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:streamGenerateContent?alt=sse', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ const apiClient = this.apiClient;
+ response = apiClient.requestStream({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ });
+ return response.then(function (apiResponse) {
+ return __asyncGenerator(this, arguments, function* () {
+ var _a, e_2, _b, _c;
+ try {
+ for (var _d = true, apiResponse_2 = __asyncValues(apiResponse), apiResponse_2_1; apiResponse_2_1 = yield __await(apiResponse_2.next()), _a = apiResponse_2_1.done, !_a; _d = true) {
+ _c = apiResponse_2_1.value;
+ _d = false;
+ const chunk = _c;
+ const resp = generateContentResponseFromMldev(apiClient, chunk);
+ const typedResp = new GenerateContentResponse();
+ Object.assign(typedResp, resp);
+ yield yield __await(typedResp);
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (!_d && !_a && (_b = apiResponse_2.return)) yield __await(_b.call(apiResponse_2));
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ });
+ });
+ }
+ }
+ /**
+ * Calculates embeddings for the given contents. Only text is supported.
+ *
+ * @param params - The parameters for embedding contents.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.embedContent({
+ * model: 'text-embedding-004',
+ * contents: [
+ * 'What is your name?',
+ * 'What is your favorite color?',
+ * ],
+ * config: {
+ * outputDimensionality: 64,
+ * },
+ * });
+ * console.log(response);
+ * ```
+ */
+ async embedContent(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = embedContentParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:predict', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = embedContentResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new EmbedContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = embedContentParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:batchEmbedContents', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = embedContentResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new EmbedContentResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ /**
+ * Generates an image based on a text description and configuration.
+ *
+ * @param params - The parameters for generating images.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.generateImages({
+ * model: 'imagen-3.0-generate-002',
+ * prompt: 'Robot holding a red skateboard',
+ * config: {
+ * numberOfImages: 1,
+ * includeRaiReason: true,
+ * },
+ * });
+ * console.log(response?.generatedImages?.[0]?.image?.imageBytes);
+ * ```
+ */
+ async generateImagesInternal(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = generateImagesParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:predict', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateImagesResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new GenerateImagesResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = generateImagesParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:predict', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateImagesResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new GenerateImagesResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ /**
+ * Counts the number of tokens in the given contents. Multimodal input is
+ * supported for Gemini models.
+ *
+ * @param params - The parameters for counting tokens.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.countTokens({
+ * model: 'gemini-2.0-flash',
+ * contents: 'The quick brown fox jumps over the lazy dog.'
+ * });
+ * console.log(response);
+ * ```
+ */
+ async countTokens(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = countTokensParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:countTokens', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = countTokensResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new CountTokensResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ const body = countTokensParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:countTokens', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = countTokensResponseFromMldev(this.apiClient, apiResponse);
+ const typedResp = new CountTokensResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ }
+ /**
+ * Given a list of contents, returns a corresponding TokensInfo containing
+ * the list of tokens and list of token ids.
+ *
+ * This method is not supported by the Gemini Developer API.
+ *
+ * @param params - The parameters for computing tokens.
+ * @return The response from the API.
+ *
+ * @example
+ * ```ts
+ * const response = await ai.models.computeTokens({
+ * model: 'gemini-2.0-flash',
+ * contents: 'What is your name?'
+ * });
+ * console.log(response);
+ * ```
+ */
+ async computeTokens(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = computeTokensParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:computeTokens', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = computeTokensResponseFromVertex(this.apiClient, apiResponse);
+ const typedResp = new ComputeTokensResponse();
+ Object.assign(typedResp, resp);
+ return typedResp;
+ });
+ }
+ else {
+ throw new Error('This method is only supported by the Vertex AI.');
+ }
+ }
+ /**
+ * Generates videos based on a text description and configuration.
+ *
+ * @param params - The parameters for generating videos.
+ * @return A Promise which allows you to track the progress and eventually retrieve the generated videos using the operations.get method.
+ *
+ * @example
+ * ```ts
+ * const operation = await ai.models.generateVideos({
+ * model: 'veo-2.0-generate-001',
+ * prompt: 'A neon hologram of a cat driving at top speed',
+ * config: {
+ * numberOfVideos: 1
+ * });
+ *
+ * while (!operation.done) {
+ * await new Promise(resolve => setTimeout(resolve, 10000));
+ * operation = await ai.operations.get({operation: operation});
+ * }
+ *
+ * console.log(operation.result?.generatedVideos?.[0]?.video?.uri);
+ * ```
+ */
+ async generateVideos(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = generateVideosParametersToVertex(this.apiClient, params);
+ path = formatMap('{model}:predictLongRunning', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateVideosOperationFromVertex$1(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ const body = generateVideosParametersToMldev(this.apiClient, params);
+ path = formatMap('{model}:predictLongRunning', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateVideosOperationFromMldev$1(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+function getOperationParametersToMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromOperationName = getValueByPath(fromObject, [
+ 'operationName',
+ ]);
+ if (fromOperationName != null) {
+ setValueByPath(toObject, ['_url', 'operationName'], fromOperationName);
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function getOperationParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromOperationName = getValueByPath(fromObject, [
+ 'operationName',
+ ]);
+ if (fromOperationName != null) {
+ setValueByPath(toObject, ['_url', 'operationName'], fromOperationName);
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function fetchPredictOperationParametersToVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromOperationName = getValueByPath(fromObject, [
+ 'operationName',
+ ]);
+ if (fromOperationName != null) {
+ setValueByPath(toObject, ['operationName'], fromOperationName);
+ }
+ const fromResourceName = getValueByPath(fromObject, ['resourceName']);
+ if (fromResourceName != null) {
+ setValueByPath(toObject, ['_url', 'resourceName'], fromResourceName);
+ }
+ const fromConfig = getValueByPath(fromObject, ['config']);
+ if (fromConfig != null) {
+ setValueByPath(toObject, ['config'], fromConfig);
+ }
+ return toObject;
+}
+function videoFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromUri = getValueByPath(fromObject, ['video', 'uri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromVideoBytes = getValueByPath(fromObject, [
+ 'video',
+ 'encodedVideo',
+ ]);
+ if (fromVideoBytes != null) {
+ setValueByPath(toObject, ['videoBytes'], tBytes(apiClient, fromVideoBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['encoding']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generatedVideoFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideo = getValueByPath(fromObject, ['_self']);
+ if (fromVideo != null) {
+ setValueByPath(toObject, ['video'], videoFromMldev(apiClient, fromVideo));
+ }
+ return toObject;
+}
+function generateVideosResponseFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedVideos = getValueByPath(fromObject, [
+ 'generatedSamples',
+ ]);
+ if (fromGeneratedVideos != null) {
+ if (Array.isArray(fromGeneratedVideos)) {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos.map((item) => {
+ return generatedVideoFromMldev(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos);
+ }
+ }
+ const fromRaiMediaFilteredCount = getValueByPath(fromObject, [
+ 'raiMediaFilteredCount',
+ ]);
+ if (fromRaiMediaFilteredCount != null) {
+ setValueByPath(toObject, ['raiMediaFilteredCount'], fromRaiMediaFilteredCount);
+ }
+ const fromRaiMediaFilteredReasons = getValueByPath(fromObject, [
+ 'raiMediaFilteredReasons',
+ ]);
+ if (fromRaiMediaFilteredReasons != null) {
+ setValueByPath(toObject, ['raiMediaFilteredReasons'], fromRaiMediaFilteredReasons);
+ }
+ return toObject;
+}
+function generateVideosOperationFromMldev(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], fromMetadata);
+ }
+ const fromDone = getValueByPath(fromObject, ['done']);
+ if (fromDone != null) {
+ setValueByPath(toObject, ['done'], fromDone);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fromError);
+ }
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], fromResponse);
+ }
+ const fromResult = getValueByPath(fromObject, [
+ 'response',
+ 'generateVideoResponse',
+ ]);
+ if (fromResult != null) {
+ setValueByPath(toObject, ['result'], generateVideosResponseFromMldev(apiClient, fromResult));
+ }
+ return toObject;
+}
+function videoFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromUri = getValueByPath(fromObject, ['gcsUri']);
+ if (fromUri != null) {
+ setValueByPath(toObject, ['uri'], fromUri);
+ }
+ const fromVideoBytes = getValueByPath(fromObject, [
+ 'bytesBase64Encoded',
+ ]);
+ if (fromVideoBytes != null) {
+ setValueByPath(toObject, ['videoBytes'], tBytes(apiClient, fromVideoBytes));
+ }
+ const fromMimeType = getValueByPath(fromObject, ['mimeType']);
+ if (fromMimeType != null) {
+ setValueByPath(toObject, ['mimeType'], fromMimeType);
+ }
+ return toObject;
+}
+function generatedVideoFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromVideo = getValueByPath(fromObject, ['_self']);
+ if (fromVideo != null) {
+ setValueByPath(toObject, ['video'], videoFromVertex(apiClient, fromVideo));
+ }
+ return toObject;
+}
+function generateVideosResponseFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromGeneratedVideos = getValueByPath(fromObject, ['videos']);
+ if (fromGeneratedVideos != null) {
+ if (Array.isArray(fromGeneratedVideos)) {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos.map((item) => {
+ return generatedVideoFromVertex(apiClient, item);
+ }));
+ }
+ else {
+ setValueByPath(toObject, ['generatedVideos'], fromGeneratedVideos);
+ }
+ }
+ const fromRaiMediaFilteredCount = getValueByPath(fromObject, [
+ 'raiMediaFilteredCount',
+ ]);
+ if (fromRaiMediaFilteredCount != null) {
+ setValueByPath(toObject, ['raiMediaFilteredCount'], fromRaiMediaFilteredCount);
+ }
+ const fromRaiMediaFilteredReasons = getValueByPath(fromObject, [
+ 'raiMediaFilteredReasons',
+ ]);
+ if (fromRaiMediaFilteredReasons != null) {
+ setValueByPath(toObject, ['raiMediaFilteredReasons'], fromRaiMediaFilteredReasons);
+ }
+ return toObject;
+}
+function generateVideosOperationFromVertex(apiClient, fromObject) {
+ const toObject = {};
+ const fromName = getValueByPath(fromObject, ['name']);
+ if (fromName != null) {
+ setValueByPath(toObject, ['name'], fromName);
+ }
+ const fromMetadata = getValueByPath(fromObject, ['metadata']);
+ if (fromMetadata != null) {
+ setValueByPath(toObject, ['metadata'], fromMetadata);
+ }
+ const fromDone = getValueByPath(fromObject, ['done']);
+ if (fromDone != null) {
+ setValueByPath(toObject, ['done'], fromDone);
+ }
+ const fromError = getValueByPath(fromObject, ['error']);
+ if (fromError != null) {
+ setValueByPath(toObject, ['error'], fromError);
+ }
+ const fromResponse = getValueByPath(fromObject, ['response']);
+ if (fromResponse != null) {
+ setValueByPath(toObject, ['response'], fromResponse);
+ }
+ const fromResult = getValueByPath(fromObject, ['response']);
+ if (fromResult != null) {
+ setValueByPath(toObject, ['result'], generateVideosResponseFromVertex(apiClient, fromResult));
+ }
+ return toObject;
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+class Operations extends BaseModule {
+ constructor(apiClient) {
+ super();
+ this.apiClient = apiClient;
+ }
+ /**
+ * Gets the status of a long-running operation.
+ *
+ * @param operation The Operation object returned by a previous API call.
+ * @return The updated Operation object, with the latest status or result.
+ */
+ async get(parameters) {
+ const operation = parameters.operation;
+ const config = parameters.config;
+ if (operation.name === undefined || operation.name === '') {
+ throw new Error('Operation name is required.');
+ }
+ if (this.apiClient.isVertexAI()) {
+ const resourceName = operation.name.split('/operations/')[0];
+ var httpOptions = undefined;
+ if (config && 'httpOptions' in config) {
+ httpOptions = config.httpOptions;
+ }
+ return this.fetchPredictVideosOperationInternal({
+ operationName: operation.name,
+ resourceName: resourceName,
+ config: { httpOptions: httpOptions },
+ });
+ }
+ else {
+ return this.getVideosOperationInternal({
+ operationName: operation.name,
+ config: config,
+ });
+ }
+ }
+ async getVideosOperationInternal(params) {
+ var _a, _b;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = getOperationParametersToVertex(this.apiClient, params);
+ path = formatMap('{operationName}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateVideosOperationFromVertex(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ const body = getOperationParametersToMldev(this.apiClient, params);
+ path = formatMap('{operationName}', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'GET',
+ httpOptions: (_b = params.config) === null || _b === void 0 ? void 0 : _b.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateVideosOperationFromMldev(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ }
+ async fetchPredictVideosOperationInternal(params) {
+ var _a;
+ let response;
+ let path = '';
+ let queryParams = {};
+ if (this.apiClient.isVertexAI()) {
+ const body = fetchPredictOperationParametersToVertex(this.apiClient, params);
+ path = formatMap('{resourceName}:fetchPredictOperation', body['_url']);
+ queryParams = body['_query'];
+ delete body['config'];
+ delete body['_url'];
+ delete body['_query'];
+ response = this.apiClient
+ .request({
+ path: path,
+ queryParams: queryParams,
+ body: JSON.stringify(body),
+ httpMethod: 'POST',
+ httpOptions: (_a = params.config) === null || _a === void 0 ? void 0 : _a.httpOptions,
+ })
+ .then((httpResponse) => {
+ return httpResponse.json();
+ });
+ return response.then((apiResponse) => {
+ const resp = generateVideosOperationFromVertex(this.apiClient, apiResponse);
+ return resp;
+ });
+ }
+ else {
+ throw new Error('This method is only supported by the Vertex AI.');
+ }
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+const GOOGLE_API_KEY_HEADER = 'x-goog-api-key';
+// TODO(b/395122533): We need a secure client side authentication mechanism.
+class WebAuth {
+ constructor(apiKey) {
+ this.apiKey = apiKey;
+ }
+ async addAuthHeaders(headers) {
+ if (headers.get(GOOGLE_API_KEY_HEADER) !== null) {
+ return;
+ }
+ headers.append(GOOGLE_API_KEY_HEADER, this.apiKey);
+ }
+}
+
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+const LANGUAGE_LABEL_PREFIX = 'gl-node/';
+/**
+ * The Google GenAI SDK.
+ *
+ * @remarks
+ * Provides access to the GenAI features through either the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Gemini API}
+ * or the {@link https://cloud.google.com/vertex-ai/docs/reference/rest | Vertex AI API}.
+ *
+ * The {@link GoogleGenAIOptions.vertexai} value determines which of the API services to use.
+ *
+ * When using the Gemini API, a {@link GoogleGenAIOptions.apiKey} must also be set,
+ * when using Vertex AI {@link GoogleGenAIOptions.project} and {@link GoogleGenAIOptions.location} must also be set.
+ *
+ * @example
+ * Initializing the SDK for using the Gemini API:
+ * ```ts
+ * import {GoogleGenAI} from '@google/genai';
+ * const ai = new GoogleGenAI({apiKey: 'GEMINI_API_KEY'});
+ * ```
+ *
+ * @example
+ * Initializing the SDK for using the Vertex AI API:
+ * ```ts
+ * import {GoogleGenAI} from '@google/genai';
+ * const ai = new GoogleGenAI({
+ * vertexai: true,
+ * project: 'PROJECT_ID',
+ * location: 'PROJECT_LOCATION'
+ * });
+ * ```
+ *
+ */
+class GoogleGenAI {
+ constructor(options) {
+ var _a;
+ if (options.apiKey == null) {
+ throw new Error(`An API Key must be set when running in an unspecified environment.\n + ${crossError().message}`);
+ }
+ this.vertexai = (_a = options.vertexai) !== null && _a !== void 0 ? _a : false;
+ this.apiKey = options.apiKey;
+ this.apiVersion = options.apiVersion;
+ const auth = new WebAuth(this.apiKey);
+ this.apiClient = new ApiClient({
+ auth: auth,
+ apiVersion: this.apiVersion,
+ apiKey: this.apiKey,
+ vertexai: this.vertexai,
+ httpOptions: options.httpOptions,
+ userAgentExtra: LANGUAGE_LABEL_PREFIX + 'cross',
+ uploader: new CrossUploader(),
+ });
+ this.models = new Models(this.apiClient);
+ this.live = new Live(this.apiClient, auth, new CrossWebSocketFactory());
+ this.chats = new Chats(this.models, this.apiClient);
+ this.caches = new Caches(this.apiClient);
+ this.files = new Files(this.apiClient);
+ this.operations = new Operations(this.apiClient);
+ }
+}
+
+exports.Caches = Caches;
+exports.Chat = Chat;
+exports.Chats = Chats;
+exports.ComputeTokensResponse = ComputeTokensResponse;
+exports.CountTokensResponse = CountTokensResponse;
+exports.CreateFileResponse = CreateFileResponse;
+exports.DeleteCachedContentResponse = DeleteCachedContentResponse;
+exports.DeleteFileResponse = DeleteFileResponse;
+exports.EmbedContentResponse = EmbedContentResponse;
+exports.Files = Files;
+exports.FunctionResponse = FunctionResponse;
+exports.GenerateContentResponse = GenerateContentResponse;
+exports.GenerateContentResponsePromptFeedback = GenerateContentResponsePromptFeedback;
+exports.GenerateContentResponseUsageMetadata = GenerateContentResponseUsageMetadata;
+exports.GenerateImagesResponse = GenerateImagesResponse;
+exports.GenerateVideosResponse = GenerateVideosResponse;
+exports.GoogleGenAI = GoogleGenAI;
+exports.HttpResponse = HttpResponse;
+exports.ListCachedContentsResponse = ListCachedContentsResponse;
+exports.ListFilesResponse = ListFilesResponse;
+exports.Live = Live;
+exports.LiveClientToolResponse = LiveClientToolResponse;
+exports.LiveSendToolResponseParameters = LiveSendToolResponseParameters;
+exports.Models = Models;
+exports.Operations = Operations;
+exports.Pager = Pager;
+exports.ReplayResponse = ReplayResponse;
+exports.Session = Session;
+exports.createModelContent = createModelContent;
+exports.createPartFromBase64 = createPartFromBase64;
+exports.createPartFromCodeExecutionResult = createPartFromCodeExecutionResult;
+exports.createPartFromExecutableCode = createPartFromExecutableCode;
+exports.createPartFromFunctionCall = createPartFromFunctionCall;
+exports.createPartFromFunctionResponse = createPartFromFunctionResponse;
+exports.createPartFromText = createPartFromText;
+exports.createPartFromUri = createPartFromUri;
+exports.createUserContent = createUserContent;
+//# sourceMappingURL=index.js.map
diff --git a/node_modules/@google/genai/dist/index.js.map b/node_modules/@google/genai/dist/index.js.map
new file mode 100644
index 0000000..3668b26
--- /dev/null
+++ b/node_modules/@google/genai/dist/index.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"index.js","sources":["../src/_common.ts","../src/_transformers.ts","../src/converters/_caches_converters.ts","../src/pagers.ts","../src/types.ts","../src/caches.ts","../src/chats.ts","../src/_api_client.ts","../src/cross/_cross_error.ts","../src/cross/_cross_uploader.ts","../src/cross/_cross_websocket.ts","../src/converters/_files_converters.ts","../src/files.ts","../src/converters/_models_converters.ts","../src/converters/_live_converters.ts","../src/live.ts","../src/models.ts","../src/converters/_operations_converters.ts","../src/operations.ts","../src/web/_web_auth.ts","../src/client.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {UploadFileConfig} from './types';\n\nexport class BaseModule {}\n\n// TODO (b/406332263): Move this to a place where it can be linked for doc.\n/** Parameters for the upload file method. */\nexport interface UploadFileParameters {\n /** The string path to the file to be uploaded or a Blob object. */\n file: string | Blob;\n /** Configuration that contains optional parameters. */\n config?: UploadFileConfig;\n}\n\nexport function formatMap(\n templateString: string,\n valueMap: Record,\n): string {\n // Use a regular expression to find all placeholders in the template string\n const regex = /\\{([^}]+)\\}/g;\n\n // Replace each placeholder with its corresponding value from the valueMap\n return templateString.replace(regex, (match, key) => {\n if (Object.prototype.hasOwnProperty.call(valueMap, key)) {\n const value = valueMap[key];\n // Convert the value to a string if it's not a string already\n return value !== undefined && value !== null ? String(value) : '';\n } else {\n // Handle missing keys\n throw new Error(`Key '${key}' not found in valueMap.`);\n }\n });\n}\n\nexport function setValueByPath(\n data: Record,\n keys: string[],\n value: unknown,\n): void {\n for (let i = 0; i < keys.length - 1; i++) {\n const key = keys[i];\n\n if (key.endsWith('[]')) {\n const keyName = key.slice(0, -2);\n if (!(keyName in data)) {\n if (Array.isArray(value)) {\n data[keyName] = Array.from({length: value.length}, () => ({}));\n } else {\n throw new Error(`Value must be a list given an array path ${key}`);\n }\n }\n\n if (Array.isArray(data[keyName])) {\n const arrayData = data[keyName] as Array;\n\n if (Array.isArray(value)) {\n for (let j = 0; j < arrayData.length; j++) {\n const entry = arrayData[j] as Record;\n setValueByPath(entry, keys.slice(i + 1), value[j]);\n }\n } else {\n for (const d of arrayData) {\n setValueByPath(\n d as Record,\n keys.slice(i + 1),\n value,\n );\n }\n }\n }\n return;\n } else if (key.endsWith('[0]')) {\n const keyName = key.slice(0, -3);\n if (!(keyName in data)) {\n data[keyName] = [{}];\n }\n const arrayData = (data as Record)[keyName];\n setValueByPath(\n (arrayData as Array>)[0],\n keys.slice(i + 1),\n value,\n );\n return;\n }\n\n if (!data[key] || typeof data[key] !== 'object') {\n data[key] = {};\n }\n\n data = data[key] as Record;\n }\n\n const keyToSet = keys[keys.length - 1];\n const existingData = data[keyToSet];\n\n if (existingData !== undefined) {\n if (\n !value ||\n (typeof value === 'object' && Object.keys(value).length === 0)\n ) {\n return;\n }\n\n if (value === existingData) {\n return;\n }\n\n if (\n typeof existingData === 'object' &&\n typeof value === 'object' &&\n existingData !== null &&\n value !== null\n ) {\n Object.assign(existingData, value);\n } else {\n throw new Error(`Cannot set value for an existing key. Key: ${keyToSet}`);\n }\n } else {\n data[keyToSet] = value;\n }\n}\n\nexport function getValueByPath(data: unknown, keys: string[]): unknown {\n try {\n if (keys.length === 1 && keys[0] === '_self') {\n return data;\n }\n\n for (let i = 0; i < keys.length; i++) {\n if (typeof data !== 'object' || data === null) {\n return undefined;\n }\n\n const key = keys[i];\n if (key.endsWith('[]')) {\n const keyName = key.slice(0, -2);\n if (keyName in data) {\n const arrayData = (data as Record)[keyName];\n if (!Array.isArray(arrayData)) {\n return undefined;\n }\n return arrayData.map((d) => getValueByPath(d, keys.slice(i + 1)));\n } else {\n return undefined;\n }\n } else {\n data = (data as Record)[key];\n }\n }\n\n return data;\n } catch (error) {\n if (error instanceof TypeError) {\n return undefined;\n }\n throw error;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {ApiClient} from './_api_client';\nimport * as types from './types';\n\nexport function tModel(apiClient: ApiClient, model: string | unknown): string {\n if (!model || typeof model !== 'string') {\n throw new Error('model is required and must be a string');\n }\n\n if (apiClient.isVertexAI()) {\n if (\n model.startsWith('publishers/') ||\n model.startsWith('projects/') ||\n model.startsWith('models/')\n ) {\n return model;\n } else if (model.indexOf('/') >= 0) {\n const parts = model.split('/', 2);\n return `publishers/${parts[0]}/models/${parts[1]}`;\n } else {\n return `publishers/google/models/${model}`;\n }\n } else {\n if (model.startsWith('models/') || model.startsWith('tunedModels/')) {\n return model;\n } else {\n return `models/${model}`;\n }\n }\n}\n\nexport function tCachesModel(\n apiClient: ApiClient,\n model: string | unknown,\n): string {\n const transformedModel = tModel(apiClient, model as string);\n if (!transformedModel) {\n return '';\n }\n\n if (transformedModel.startsWith('publishers/') && apiClient.isVertexAI()) {\n // vertex caches only support model name start with projects.\n return `projects/${apiClient.getProject()}/locations/${apiClient.getLocation()}/${transformedModel}`;\n } else if (transformedModel.startsWith('models/') && apiClient.isVertexAI()) {\n return `projects/${apiClient.getProject()}/locations/${apiClient.getLocation()}/publishers/google/${transformedModel}`;\n } else {\n return transformedModel;\n }\n}\n\nexport function tPart(\n apiClient: ApiClient,\n origin?: types.PartUnion | null,\n): types.Part {\n if (origin === null || origin === undefined) {\n throw new Error('PartUnion is required');\n }\n if (typeof origin === 'object') {\n return origin;\n }\n if (typeof origin === 'string') {\n return {text: origin};\n }\n throw new Error(`Unsupported part type: ${typeof origin}`);\n}\n\nexport function tParts(\n apiClient: ApiClient,\n origin?: types.PartListUnion | null,\n): types.Part[] {\n if (\n origin === null ||\n origin === undefined ||\n (Array.isArray(origin) && origin.length === 0)\n ) {\n throw new Error('PartListUnion is required');\n }\n if (Array.isArray(origin)) {\n return origin.map((item) => tPart(apiClient, item as types.PartUnion)!);\n }\n return [tPart(apiClient, origin)!];\n}\n\nfunction _isContent(origin: unknown): boolean {\n return (\n origin !== null &&\n origin !== undefined &&\n typeof origin === 'object' &&\n 'parts' in origin &&\n Array.isArray(origin.parts)\n );\n}\n\nfunction _isFunctionCallPart(origin: unknown): boolean {\n return (\n origin !== null &&\n origin !== undefined &&\n typeof origin === 'object' &&\n 'functionCall' in origin\n );\n}\n\nfunction _isUserPart(origin: unknown): boolean {\n if (origin === null || origin === undefined) {\n return false;\n }\n if (_isFunctionCallPart(origin)) {\n return false;\n }\n return true;\n}\n\nfunction _areUserParts(origin: types.PartListUnion[]): boolean {\n if (\n origin === null ||\n origin === undefined ||\n (Array.isArray(origin) && origin.length === 0)\n ) {\n return false;\n }\n return origin.every(_isUserPart);\n}\n\nexport function tContent(\n apiClient: ApiClient,\n origin?: types.ContentUnion,\n): types.Content {\n if (origin === null || origin === undefined) {\n throw new Error('ContentUnion is required');\n }\n if (_isContent(origin)) {\n // @ts-expect-error: _isContent is a utility function that checks if the\n // origin is a Content.\n return origin;\n }\n\n if (_isUserPart(origin)) {\n return {\n role: 'user',\n parts: tParts(apiClient, origin as types.PartListUnion)!,\n };\n } else {\n return {\n role: 'model',\n parts: tParts(apiClient, origin as types.PartListUnion)!,\n };\n }\n}\n\nexport function tContentsForEmbed(\n apiClient: ApiClient,\n origin: types.ContentListUnion,\n): types.ContentUnion[] {\n if (!origin) {\n return [];\n }\n if (apiClient.isVertexAI() && Array.isArray(origin)) {\n return origin.flatMap((item) => {\n const content = tContent(apiClient, item as types.ContentUnion);\n if (\n content.parts &&\n content.parts.length > 0 &&\n content.parts[0].text !== undefined\n ) {\n return [content.parts[0].text];\n }\n return [];\n });\n } else if (apiClient.isVertexAI()) {\n const content = tContent(apiClient, origin as types.ContentUnion);\n if (\n content.parts &&\n content.parts.length > 0 &&\n content.parts[0].text !== undefined\n ) {\n return [content.parts[0].text];\n }\n return [];\n }\n if (Array.isArray(origin)) {\n return origin.map(\n (item) => tContent(apiClient, item as types.ContentUnion)!,\n );\n }\n return [tContent(apiClient, origin as types.ContentUnion)!];\n}\n\nfunction _appendAccumulatedPartsAsContent(\n apiClient: ApiClient,\n result: types.Content[],\n accumulatedParts: types.PartUnion[],\n) {\n if (accumulatedParts.length === 0) {\n return;\n }\n if (_areUserParts(accumulatedParts)) {\n result.push({\n role: 'user',\n parts: tParts(apiClient, accumulatedParts),\n });\n } else {\n result.push({\n role: 'model',\n parts: tParts(apiClient, accumulatedParts),\n });\n }\n accumulatedParts.length = 0; // clear the array inplace\n}\n\nfunction _handleCurrentPart(\n apiClient: ApiClient,\n result: types.Content[],\n accumulatedParts: types.PartUnion[],\n currentPart: types.PartUnion,\n) {\n if (_isUserPart(currentPart) === _areUserParts(accumulatedParts)) {\n accumulatedParts.push(currentPart);\n } else {\n _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);\n accumulatedParts.length = 0;\n accumulatedParts.push(currentPart);\n }\n}\n\nexport function tContents(\n apiClient: ApiClient,\n origin?: types.ContentListUnion,\n): types.Content[] {\n if (\n origin === null ||\n origin === undefined ||\n (Array.isArray(origin) && origin.length === 0)\n ) {\n throw new Error('contents are required');\n }\n if (!Array.isArray(origin)) {\n return [tContent(apiClient, origin)];\n }\n\n const result: types.Content[] = [];\n const accumulatedParts: types.PartUnion[] = [];\n\n for (const content of origin) {\n if (_isContent(content)) {\n _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);\n // @ts-expect-error: content is a Content here\n result.push(content);\n } else if (\n typeof content === 'string' ||\n (typeof content === 'object' && !Array.isArray(content))\n ) {\n // @ts-expect-error: content is a part here\n _handleCurrentPart(apiClient, result, accumulatedParts, content);\n } else if (Array.isArray(content)) {\n // if there're consecutive user parts before the list,\n // convert to UserContent and append to result\n _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);\n result.push({\n role: 'user',\n parts: tParts(apiClient, content),\n });\n } else {\n throw new Error(`Unsupported content type: ${typeof content}`);\n }\n }\n _appendAccumulatedPartsAsContent(apiClient, result, accumulatedParts);\n\n return result;\n}\n\nexport function processSchema(apiClient: ApiClient, schema: types.Schema) {\n if (!apiClient.isVertexAI()) {\n if ('default' in schema) {\n throw new Error(\n 'Default value is not supported in the response schema for the Gemini API.',\n );\n }\n }\n\n if ('anyOf' in schema) {\n if (schema['anyOf'] !== undefined) {\n for (const subSchema of schema['anyOf']) {\n processSchema(apiClient, subSchema);\n }\n }\n }\n\n if ('items' in schema) {\n if (schema['items'] !== undefined) {\n processSchema(apiClient, schema['items']);\n }\n }\n\n if ('properties' in schema) {\n if (schema['properties'] !== undefined) {\n for (const subSchema of Object.values(schema['properties'])) {\n processSchema(apiClient, subSchema);\n }\n }\n }\n}\n\nexport function tSchema(\n apiClient: ApiClient,\n schema: types.Schema,\n): types.Schema {\n processSchema(apiClient, schema);\n return schema;\n}\n\nexport function tSpeechConfig(\n apiClient: ApiClient,\n speechConfig: types.SpeechConfigUnion,\n): types.SpeechConfig {\n if (typeof speechConfig === 'object' && 'voiceConfig' in speechConfig) {\n return speechConfig;\n } else if (typeof speechConfig === 'string') {\n return {\n voiceConfig: {\n prebuiltVoiceConfig: {\n voiceName: speechConfig,\n },\n },\n };\n } else {\n throw new Error(`Unsupported speechConfig type: ${typeof speechConfig}`);\n }\n}\n\nexport function tTool(apiClient: ApiClient, tool: types.Tool): types.Tool {\n return tool;\n}\n\nexport function tTools(\n apiClient: ApiClient,\n tool: types.Tool[] | unknown,\n): types.Tool[] {\n if (!Array.isArray(tool)) {\n throw new Error('tool is required and must be an array of Tools');\n }\n return tool;\n}\n\n/**\n * Prepends resource name with project, location, resource_prefix if needed.\n *\n * @param client The API client.\n * @param resourceName The resource name.\n * @param resourcePrefix The resource prefix.\n * @param splitsAfterPrefix The number of splits after the prefix.\n * @returns The completed resource name.\n *\n * Examples:\n *\n * ```\n * resource_name = '123'\n * resource_prefix = 'cachedContents'\n * splits_after_prefix = 1\n * client.vertexai = True\n * client.project = 'bar'\n * client.location = 'us-west1'\n * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)\n * returns: 'projects/bar/locations/us-west1/cachedContents/123'\n * ```\n *\n * ```\n * resource_name = 'projects/foo/locations/us-central1/cachedContents/123'\n * resource_prefix = 'cachedContents'\n * splits_after_prefix = 1\n * client.vertexai = True\n * client.project = 'bar'\n * client.location = 'us-west1'\n * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)\n * returns: 'projects/foo/locations/us-central1/cachedContents/123'\n * ```\n *\n * ```\n * resource_name = '123'\n * resource_prefix = 'cachedContents'\n * splits_after_prefix = 1\n * client.vertexai = False\n * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)\n * returns 'cachedContents/123'\n * ```\n *\n * ```\n * resource_name = 'some/wrong/cachedContents/resource/name/123'\n * resource_prefix = 'cachedContents'\n * splits_after_prefix = 1\n * client.vertexai = False\n * # client.vertexai = True\n * _resource_name(client, resource_name, resource_prefix, splits_after_prefix)\n * -> 'some/wrong/resource/name/123'\n * ```\n */\nfunction resourceName(\n client: ApiClient,\n resourceName: string,\n resourcePrefix: string,\n splitsAfterPrefix: number = 1,\n): string {\n const shouldAppendPrefix =\n !resourceName.startsWith(`${resourcePrefix}/`) &&\n resourceName.split('/').length === splitsAfterPrefix;\n if (client.isVertexAI()) {\n if (resourceName.startsWith('projects/')) {\n return resourceName;\n } else if (resourceName.startsWith('locations/')) {\n return `projects/${client.getProject()}/${resourceName}`;\n } else if (resourceName.startsWith(`${resourcePrefix}/`)) {\n return `projects/${client.getProject()}/locations/${client.getLocation()}/${resourceName}`;\n } else if (shouldAppendPrefix) {\n return `projects/${client.getProject()}/locations/${client.getLocation()}/${resourcePrefix}/${resourceName}`;\n } else {\n return resourceName;\n }\n }\n if (shouldAppendPrefix) {\n return `${resourcePrefix}/${resourceName}`;\n }\n return resourceName;\n}\n\nexport function tCachedContentName(\n apiClient: ApiClient,\n name: string | unknown,\n): string {\n if (typeof name !== 'string') {\n throw new Error('name must be a string');\n }\n return resourceName(apiClient, name, 'cachedContents');\n}\n\nexport function tTuningJobStatus(\n apiClient: ApiClient,\n status: string | unknown,\n): string {\n switch (status) {\n case 'STATE_UNSPECIFIED':\n return 'JOB_STATE_UNSPECIFIED';\n case 'CREATING':\n return 'JOB_STATE_RUNNING';\n case 'ACTIVE':\n return 'JOB_STATE_SUCCEEDED';\n case 'FAILED':\n return 'JOB_STATE_FAILED';\n default:\n return status as string;\n }\n}\n\nexport function tBytes(\n apiClient: ApiClient,\n fromImageBytes: string | unknown,\n): string {\n if (typeof fromImageBytes !== 'string') {\n throw new Error('fromImageBytes must be a string');\n }\n // TODO(b/389133914): Remove dummy bytes converter.\n return fromImageBytes;\n}\nexport function tFileName(\n apiClient: ApiClient,\n fromName: string | unknown,\n): string {\n if (typeof fromName !== 'string') {\n throw new Error('fromName must be a string');\n }\n // Remove the files/ prefx for MLdev urls to get the actual name of the file.\n if (fromName.startsWith('files/')) {\n return fromName.split('files/')[1];\n }\n return fromName;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\n// Code generated by the Google Gen AI SDK generator DO NOT EDIT.\n\nimport {ApiClient} from '../_api_client';\nimport * as common from '../_common';\nimport * as t from '../_transformers';\nimport * as types from '../types';\n\nexport function partToMldev(\n apiClient: ApiClient,\n fromObject: types.Part,\n): Record {\n const toObject: Record = {};\n\n if (common.getValueByPath(fromObject, ['videoMetadata']) !== undefined) {\n throw new Error('videoMetadata parameter is not supported in Gemini API.');\n }\n\n const fromThought = common.getValueByPath(fromObject, ['thought']);\n if (fromThought != null) {\n common.setValueByPath(toObject, ['thought'], fromThought);\n }\n\n const fromCodeExecutionResult = common.getValueByPath(fromObject, [\n 'codeExecutionResult',\n ]);\n if (fromCodeExecutionResult != null) {\n common.setValueByPath(\n toObject,\n ['codeExecutionResult'],\n fromCodeExecutionResult,\n );\n }\n\n const fromExecutableCode = common.getValueByPath(fromObject, [\n 'executableCode',\n ]);\n if (fromExecutableCode != null) {\n common.setValueByPath(toObject, ['executableCode'], fromExecutableCode);\n }\n\n const fromFileData = common.getValueByPath(fromObject, ['fileData']);\n if (fromFileData != null) {\n common.setValueByPath(toObject, ['fileData'], fromFileData);\n }\n\n const fromFunctionCall = common.getValueByPath(fromObject, ['functionCall']);\n if (fromFunctionCall != null) {\n common.setValueByPath(toObject, ['functionCall'], fromFunctionCall);\n }\n\n const fromFunctionResponse = common.getValueByPath(fromObject, [\n 'functionResponse',\n ]);\n if (fromFunctionResponse != null) {\n common.setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);\n }\n\n const fromInlineData = common.getValueByPath(fromObject, ['inlineData']);\n if (fromInlineData != null) {\n common.setValueByPath(toObject, ['inlineData'], fromInlineData);\n }\n\n const fromText = common.getValueByPath(fromObject, ['text']);\n if (fromText != null) {\n common.setValueByPath(toObject, ['text'], fromText);\n }\n\n return toObject;\n}\n\nexport function contentToMldev(\n apiClient: ApiClient,\n fromObject: types.Content,\n): Record {\n const toObject: Record = {};\n\n const fromParts = common.getValueByPath(fromObject, ['parts']);\n if (fromParts != null) {\n if (Array.isArray(fromParts)) {\n common.setValueByPath(\n toObject,\n ['parts'],\n fromParts.map((item) => {\n return partToMldev(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(toObject, ['parts'], fromParts);\n }\n }\n\n const fromRole = common.getValueByPath(fromObject, ['role']);\n if (fromRole != null) {\n common.setValueByPath(toObject, ['role'], fromRole);\n }\n\n return toObject;\n}\n\nexport function schemaToMldev(\n apiClient: ApiClient,\n fromObject: types.Schema,\n): Record {\n const toObject: Record = {};\n\n if (common.getValueByPath(fromObject, ['example']) !== undefined) {\n throw new Error('example parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['pattern']) !== undefined) {\n throw new Error('pattern parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['default']) !== undefined) {\n throw new Error('default parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['maxLength']) !== undefined) {\n throw new Error('maxLength parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['minLength']) !== undefined) {\n throw new Error('minLength parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['minProperties']) !== undefined) {\n throw new Error('minProperties parameter is not supported in Gemini API.');\n }\n\n if (common.getValueByPath(fromObject, ['maxProperties']) !== undefined) {\n throw new Error('maxProperties parameter is not supported in Gemini API.');\n }\n\n const fromAnyOf = common.getValueByPath(fromObject, ['anyOf']);\n if (fromAnyOf != null) {\n common.setValueByPath(toObject, ['anyOf'], fromAnyOf);\n }\n\n const fromDescription = common.getValueByPath(fromObject, ['description']);\n if (fromDescription != null) {\n common.setValueByPath(toObject, ['description'], fromDescription);\n }\n\n const fromEnum = common.getValueByPath(fromObject, ['enum']);\n if (fromEnum != null) {\n common.setValueByPath(toObject, ['enum'], fromEnum);\n }\n\n const fromFormat = common.getValueByPath(fromObject, ['format']);\n if (fromFormat != null) {\n common.setValueByPath(toObject, ['format'], fromFormat);\n }\n\n const fromItems = common.getValueByPath(fromObject, ['items']);\n if (fromItems != null) {\n common.setValueByPath(toObject, ['items'], fromItems);\n }\n\n const fromMaxItems = common.getValueByPath(fromObject, ['maxItems']);\n if (fromMaxItems != null) {\n common.setValueByPath(toObject, ['maxItems'], fromMaxItems);\n }\n\n const fromMaximum = common.getValueByPath(fromObject, ['maximum']);\n if (fromMaximum != null) {\n common.setValueByPath(toObject, ['maximum'], fromMaximum);\n }\n\n const fromMinItems = common.getValueByPath(fromObject, ['minItems']);\n if (fromMinItems != null) {\n common.setValueByPath(toObject, ['minItems'], fromMinItems);\n }\n\n const fromMinimum = common.getValueByPath(fromObject, ['minimum']);\n if (fromMinimum != null) {\n common.setValueByPath(toObject, ['minimum'], fromMinimum);\n }\n\n const fromNullable = common.getValueByPath(fromObject, ['nullable']);\n if (fromNullable != null) {\n common.setValueByPath(toObject, ['nullable'], fromNullable);\n }\n\n const fromProperties = common.getValueByPath(fromObject, ['properties']);\n if (fromProperties != null) {\n common.setValueByPath(toObject, ['properties'], fromProperties);\n }\n\n const fromPropertyOrdering = common.getValueByPath(fromObject, [\n 'propertyOrdering',\n ]);\n if (fromPropertyOrdering != null) {\n common.setValueByPath(toObject, ['propertyOrdering'], fromPropertyOrdering);\n }\n\n const fromRequired = common.getValueByPath(fromObject, ['required']);\n if (fromRequired != null) {\n common.setValueByPath(toObject, ['required'], fromRequired);\n }\n\n const fromTitle = common.getValueByPath(fromObject, ['title']);\n if (fromTitle != null) {\n common.setValueByPath(toObject, ['title'], fromTitle);\n }\n\n const fromType = common.getValueByPath(fromObject, ['type']);\n if (fromType != null) {\n common.setValueByPath(toObject, ['type'], fromType);\n }\n\n return toObject;\n}\n\nexport function functionDeclarationToMldev(\n apiClient: ApiClient,\n fromObject: types.FunctionDeclaration,\n): Record {\n const toObject: Record = {};\n\n if (common.getValueByPath(fromObject, ['response']) !== undefined) {\n throw new Error('response parameter is not supported in Gemini API.');\n }\n\n const fromDescription = common.getValueByPath(fromObject, ['description']);\n if (fromDescription != null) {\n common.setValueByPath(toObject, ['description'], fromDescription);\n }\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(toObject, ['name'], fromName);\n }\n\n const fromParameters = common.getValueByPath(fromObject, ['parameters']);\n if (fromParameters != null) {\n common.setValueByPath(toObject, ['parameters'], fromParameters);\n }\n\n return toObject;\n}\n\nexport function googleSearchToMldev(): Record {\n const toObject: Record = {};\n\n return toObject;\n}\n\nexport function dynamicRetrievalConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.DynamicRetrievalConfig,\n): Record {\n const toObject: Record = {};\n\n const fromMode = common.getValueByPath(fromObject, ['mode']);\n if (fromMode != null) {\n common.setValueByPath(toObject, ['mode'], fromMode);\n }\n\n const fromDynamicThreshold = common.getValueByPath(fromObject, [\n 'dynamicThreshold',\n ]);\n if (fromDynamicThreshold != null) {\n common.setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);\n }\n\n return toObject;\n}\n\nexport function googleSearchRetrievalToMldev(\n apiClient: ApiClient,\n fromObject: types.GoogleSearchRetrieval,\n): Record {\n const toObject: Record = {};\n\n const fromDynamicRetrievalConfig = common.getValueByPath(fromObject, [\n 'dynamicRetrievalConfig',\n ]);\n if (fromDynamicRetrievalConfig != null) {\n common.setValueByPath(\n toObject,\n ['dynamicRetrievalConfig'],\n dynamicRetrievalConfigToMldev(apiClient, fromDynamicRetrievalConfig),\n );\n }\n\n return toObject;\n}\n\nexport function toolToMldev(\n apiClient: ApiClient,\n fromObject: types.Tool,\n): Record {\n const toObject: Record = {};\n\n const fromFunctionDeclarations = common.getValueByPath(fromObject, [\n 'functionDeclarations',\n ]);\n if (fromFunctionDeclarations != null) {\n if (Array.isArray(fromFunctionDeclarations)) {\n common.setValueByPath(\n toObject,\n ['functionDeclarations'],\n fromFunctionDeclarations.map((item) => {\n return functionDeclarationToMldev(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(\n toObject,\n ['functionDeclarations'],\n fromFunctionDeclarations,\n );\n }\n }\n\n if (common.getValueByPath(fromObject, ['retrieval']) !== undefined) {\n throw new Error('retrieval parameter is not supported in Gemini API.');\n }\n\n const fromGoogleSearch = common.getValueByPath(fromObject, ['googleSearch']);\n if (fromGoogleSearch != null) {\n common.setValueByPath(toObject, ['googleSearch'], googleSearchToMldev());\n }\n\n const fromGoogleSearchRetrieval = common.getValueByPath(fromObject, [\n 'googleSearchRetrieval',\n ]);\n if (fromGoogleSearchRetrieval != null) {\n common.setValueByPath(\n toObject,\n ['googleSearchRetrieval'],\n googleSearchRetrievalToMldev(apiClient, fromGoogleSearchRetrieval),\n );\n }\n\n const fromCodeExecution = common.getValueByPath(fromObject, [\n 'codeExecution',\n ]);\n if (fromCodeExecution != null) {\n common.setValueByPath(toObject, ['codeExecution'], fromCodeExecution);\n }\n\n return toObject;\n}\n\nexport function functionCallingConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.FunctionCallingConfig,\n): Record {\n const toObject: Record = {};\n\n const fromMode = common.getValueByPath(fromObject, ['mode']);\n if (fromMode != null) {\n common.setValueByPath(toObject, ['mode'], fromMode);\n }\n\n const fromAllowedFunctionNames = common.getValueByPath(fromObject, [\n 'allowedFunctionNames',\n ]);\n if (fromAllowedFunctionNames != null) {\n common.setValueByPath(\n toObject,\n ['allowedFunctionNames'],\n fromAllowedFunctionNames,\n );\n }\n\n return toObject;\n}\n\nexport function toolConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.ToolConfig,\n): Record {\n const toObject: Record = {};\n\n const fromFunctionCallingConfig = common.getValueByPath(fromObject, [\n 'functionCallingConfig',\n ]);\n if (fromFunctionCallingConfig != null) {\n common.setValueByPath(\n toObject,\n ['functionCallingConfig'],\n functionCallingConfigToMldev(apiClient, fromFunctionCallingConfig),\n );\n }\n\n return toObject;\n}\n\nexport function createCachedContentConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.CreateCachedContentConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromTtl = common.getValueByPath(fromObject, ['ttl']);\n if (parentObject !== undefined && fromTtl != null) {\n common.setValueByPath(parentObject, ['ttl'], fromTtl);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (parentObject !== undefined && fromExpireTime != null) {\n common.setValueByPath(parentObject, ['expireTime'], fromExpireTime);\n }\n\n const fromDisplayName = common.getValueByPath(fromObject, ['displayName']);\n if (parentObject !== undefined && fromDisplayName != null) {\n common.setValueByPath(parentObject, ['displayName'], fromDisplayName);\n }\n\n const fromContents = common.getValueByPath(fromObject, ['contents']);\n if (parentObject !== undefined && fromContents != null) {\n if (Array.isArray(fromContents)) {\n common.setValueByPath(\n parentObject,\n ['contents'],\n t.tContents(\n apiClient,\n t.tContents(apiClient, fromContents).map((item) => {\n return contentToMldev(apiClient, item);\n }),\n ),\n );\n } else {\n common.setValueByPath(\n parentObject,\n ['contents'],\n t.tContents(apiClient, fromContents),\n );\n }\n }\n\n const fromSystemInstruction = common.getValueByPath(fromObject, [\n 'systemInstruction',\n ]);\n if (parentObject !== undefined && fromSystemInstruction != null) {\n common.setValueByPath(\n parentObject,\n ['systemInstruction'],\n contentToMldev(apiClient, t.tContent(apiClient, fromSystemInstruction)),\n );\n }\n\n const fromTools = common.getValueByPath(fromObject, ['tools']);\n if (parentObject !== undefined && fromTools != null) {\n if (Array.isArray(fromTools)) {\n common.setValueByPath(\n parentObject,\n ['tools'],\n fromTools.map((item) => {\n return toolToMldev(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(parentObject, ['tools'], fromTools);\n }\n }\n\n const fromToolConfig = common.getValueByPath(fromObject, ['toolConfig']);\n if (parentObject !== undefined && fromToolConfig != null) {\n common.setValueByPath(\n parentObject,\n ['toolConfig'],\n toolConfigToMldev(apiClient, fromToolConfig),\n );\n }\n\n return toObject;\n}\n\nexport function createCachedContentParametersToMldev(\n apiClient: ApiClient,\n fromObject: types.CreateCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromModel = common.getValueByPath(fromObject, ['model']);\n if (fromModel != null) {\n common.setValueByPath(\n toObject,\n ['model'],\n t.tCachesModel(apiClient, fromModel),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n createCachedContentConfigToMldev(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function getCachedContentParametersToMldev(\n apiClient: ApiClient,\n fromObject: types.GetCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(toObject, ['config'], fromConfig);\n }\n\n return toObject;\n}\n\nexport function deleteCachedContentParametersToMldev(\n apiClient: ApiClient,\n fromObject: types.DeleteCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(toObject, ['config'], fromConfig);\n }\n\n return toObject;\n}\n\nexport function updateCachedContentConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.UpdateCachedContentConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromTtl = common.getValueByPath(fromObject, ['ttl']);\n if (parentObject !== undefined && fromTtl != null) {\n common.setValueByPath(parentObject, ['ttl'], fromTtl);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (parentObject !== undefined && fromExpireTime != null) {\n common.setValueByPath(parentObject, ['expireTime'], fromExpireTime);\n }\n\n return toObject;\n}\n\nexport function updateCachedContentParametersToMldev(\n apiClient: ApiClient,\n fromObject: types.UpdateCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n updateCachedContentConfigToMldev(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function listCachedContentsConfigToMldev(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromPageSize = common.getValueByPath(fromObject, ['pageSize']);\n if (parentObject !== undefined && fromPageSize != null) {\n common.setValueByPath(parentObject, ['_query', 'pageSize'], fromPageSize);\n }\n\n const fromPageToken = common.getValueByPath(fromObject, ['pageToken']);\n if (parentObject !== undefined && fromPageToken != null) {\n common.setValueByPath(parentObject, ['_query', 'pageToken'], fromPageToken);\n }\n\n return toObject;\n}\n\nexport function listCachedContentsParametersToMldev(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsParameters,\n): Record {\n const toObject: Record = {};\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n listCachedContentsConfigToMldev(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function partToVertex(\n apiClient: ApiClient,\n fromObject: types.Part,\n): Record {\n const toObject: Record = {};\n\n const fromVideoMetadata = common.getValueByPath(fromObject, [\n 'videoMetadata',\n ]);\n if (fromVideoMetadata != null) {\n common.setValueByPath(toObject, ['videoMetadata'], fromVideoMetadata);\n }\n\n const fromThought = common.getValueByPath(fromObject, ['thought']);\n if (fromThought != null) {\n common.setValueByPath(toObject, ['thought'], fromThought);\n }\n\n const fromCodeExecutionResult = common.getValueByPath(fromObject, [\n 'codeExecutionResult',\n ]);\n if (fromCodeExecutionResult != null) {\n common.setValueByPath(\n toObject,\n ['codeExecutionResult'],\n fromCodeExecutionResult,\n );\n }\n\n const fromExecutableCode = common.getValueByPath(fromObject, [\n 'executableCode',\n ]);\n if (fromExecutableCode != null) {\n common.setValueByPath(toObject, ['executableCode'], fromExecutableCode);\n }\n\n const fromFileData = common.getValueByPath(fromObject, ['fileData']);\n if (fromFileData != null) {\n common.setValueByPath(toObject, ['fileData'], fromFileData);\n }\n\n const fromFunctionCall = common.getValueByPath(fromObject, ['functionCall']);\n if (fromFunctionCall != null) {\n common.setValueByPath(toObject, ['functionCall'], fromFunctionCall);\n }\n\n const fromFunctionResponse = common.getValueByPath(fromObject, [\n 'functionResponse',\n ]);\n if (fromFunctionResponse != null) {\n common.setValueByPath(toObject, ['functionResponse'], fromFunctionResponse);\n }\n\n const fromInlineData = common.getValueByPath(fromObject, ['inlineData']);\n if (fromInlineData != null) {\n common.setValueByPath(toObject, ['inlineData'], fromInlineData);\n }\n\n const fromText = common.getValueByPath(fromObject, ['text']);\n if (fromText != null) {\n common.setValueByPath(toObject, ['text'], fromText);\n }\n\n return toObject;\n}\n\nexport function contentToVertex(\n apiClient: ApiClient,\n fromObject: types.Content,\n): Record {\n const toObject: Record = {};\n\n const fromParts = common.getValueByPath(fromObject, ['parts']);\n if (fromParts != null) {\n if (Array.isArray(fromParts)) {\n common.setValueByPath(\n toObject,\n ['parts'],\n fromParts.map((item) => {\n return partToVertex(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(toObject, ['parts'], fromParts);\n }\n }\n\n const fromRole = common.getValueByPath(fromObject, ['role']);\n if (fromRole != null) {\n common.setValueByPath(toObject, ['role'], fromRole);\n }\n\n return toObject;\n}\n\nexport function schemaToVertex(\n apiClient: ApiClient,\n fromObject: types.Schema,\n): Record {\n const toObject: Record = {};\n\n const fromExample = common.getValueByPath(fromObject, ['example']);\n if (fromExample != null) {\n common.setValueByPath(toObject, ['example'], fromExample);\n }\n\n const fromPattern = common.getValueByPath(fromObject, ['pattern']);\n if (fromPattern != null) {\n common.setValueByPath(toObject, ['pattern'], fromPattern);\n }\n\n const fromDefault = common.getValueByPath(fromObject, ['default']);\n if (fromDefault != null) {\n common.setValueByPath(toObject, ['default'], fromDefault);\n }\n\n const fromMaxLength = common.getValueByPath(fromObject, ['maxLength']);\n if (fromMaxLength != null) {\n common.setValueByPath(toObject, ['maxLength'], fromMaxLength);\n }\n\n const fromMinLength = common.getValueByPath(fromObject, ['minLength']);\n if (fromMinLength != null) {\n common.setValueByPath(toObject, ['minLength'], fromMinLength);\n }\n\n const fromMinProperties = common.getValueByPath(fromObject, [\n 'minProperties',\n ]);\n if (fromMinProperties != null) {\n common.setValueByPath(toObject, ['minProperties'], fromMinProperties);\n }\n\n const fromMaxProperties = common.getValueByPath(fromObject, [\n 'maxProperties',\n ]);\n if (fromMaxProperties != null) {\n common.setValueByPath(toObject, ['maxProperties'], fromMaxProperties);\n }\n\n const fromAnyOf = common.getValueByPath(fromObject, ['anyOf']);\n if (fromAnyOf != null) {\n common.setValueByPath(toObject, ['anyOf'], fromAnyOf);\n }\n\n const fromDescription = common.getValueByPath(fromObject, ['description']);\n if (fromDescription != null) {\n common.setValueByPath(toObject, ['description'], fromDescription);\n }\n\n const fromEnum = common.getValueByPath(fromObject, ['enum']);\n if (fromEnum != null) {\n common.setValueByPath(toObject, ['enum'], fromEnum);\n }\n\n const fromFormat = common.getValueByPath(fromObject, ['format']);\n if (fromFormat != null) {\n common.setValueByPath(toObject, ['format'], fromFormat);\n }\n\n const fromItems = common.getValueByPath(fromObject, ['items']);\n if (fromItems != null) {\n common.setValueByPath(toObject, ['items'], fromItems);\n }\n\n const fromMaxItems = common.getValueByPath(fromObject, ['maxItems']);\n if (fromMaxItems != null) {\n common.setValueByPath(toObject, ['maxItems'], fromMaxItems);\n }\n\n const fromMaximum = common.getValueByPath(fromObject, ['maximum']);\n if (fromMaximum != null) {\n common.setValueByPath(toObject, ['maximum'], fromMaximum);\n }\n\n const fromMinItems = common.getValueByPath(fromObject, ['minItems']);\n if (fromMinItems != null) {\n common.setValueByPath(toObject, ['minItems'], fromMinItems);\n }\n\n const fromMinimum = common.getValueByPath(fromObject, ['minimum']);\n if (fromMinimum != null) {\n common.setValueByPath(toObject, ['minimum'], fromMinimum);\n }\n\n const fromNullable = common.getValueByPath(fromObject, ['nullable']);\n if (fromNullable != null) {\n common.setValueByPath(toObject, ['nullable'], fromNullable);\n }\n\n const fromProperties = common.getValueByPath(fromObject, ['properties']);\n if (fromProperties != null) {\n common.setValueByPath(toObject, ['properties'], fromProperties);\n }\n\n const fromPropertyOrdering = common.getValueByPath(fromObject, [\n 'propertyOrdering',\n ]);\n if (fromPropertyOrdering != null) {\n common.setValueByPath(toObject, ['propertyOrdering'], fromPropertyOrdering);\n }\n\n const fromRequired = common.getValueByPath(fromObject, ['required']);\n if (fromRequired != null) {\n common.setValueByPath(toObject, ['required'], fromRequired);\n }\n\n const fromTitle = common.getValueByPath(fromObject, ['title']);\n if (fromTitle != null) {\n common.setValueByPath(toObject, ['title'], fromTitle);\n }\n\n const fromType = common.getValueByPath(fromObject, ['type']);\n if (fromType != null) {\n common.setValueByPath(toObject, ['type'], fromType);\n }\n\n return toObject;\n}\n\nexport function functionDeclarationToVertex(\n apiClient: ApiClient,\n fromObject: types.FunctionDeclaration,\n): Record {\n const toObject: Record = {};\n\n const fromResponse = common.getValueByPath(fromObject, ['response']);\n if (fromResponse != null) {\n common.setValueByPath(\n toObject,\n ['response'],\n schemaToVertex(apiClient, fromResponse),\n );\n }\n\n const fromDescription = common.getValueByPath(fromObject, ['description']);\n if (fromDescription != null) {\n common.setValueByPath(toObject, ['description'], fromDescription);\n }\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(toObject, ['name'], fromName);\n }\n\n const fromParameters = common.getValueByPath(fromObject, ['parameters']);\n if (fromParameters != null) {\n common.setValueByPath(toObject, ['parameters'], fromParameters);\n }\n\n return toObject;\n}\n\nexport function googleSearchToVertex(): Record {\n const toObject: Record = {};\n\n return toObject;\n}\n\nexport function dynamicRetrievalConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.DynamicRetrievalConfig,\n): Record {\n const toObject: Record = {};\n\n const fromMode = common.getValueByPath(fromObject, ['mode']);\n if (fromMode != null) {\n common.setValueByPath(toObject, ['mode'], fromMode);\n }\n\n const fromDynamicThreshold = common.getValueByPath(fromObject, [\n 'dynamicThreshold',\n ]);\n if (fromDynamicThreshold != null) {\n common.setValueByPath(toObject, ['dynamicThreshold'], fromDynamicThreshold);\n }\n\n return toObject;\n}\n\nexport function googleSearchRetrievalToVertex(\n apiClient: ApiClient,\n fromObject: types.GoogleSearchRetrieval,\n): Record {\n const toObject: Record = {};\n\n const fromDynamicRetrievalConfig = common.getValueByPath(fromObject, [\n 'dynamicRetrievalConfig',\n ]);\n if (fromDynamicRetrievalConfig != null) {\n common.setValueByPath(\n toObject,\n ['dynamicRetrievalConfig'],\n dynamicRetrievalConfigToVertex(apiClient, fromDynamicRetrievalConfig),\n );\n }\n\n return toObject;\n}\n\nexport function toolToVertex(\n apiClient: ApiClient,\n fromObject: types.Tool,\n): Record {\n const toObject: Record = {};\n\n const fromFunctionDeclarations = common.getValueByPath(fromObject, [\n 'functionDeclarations',\n ]);\n if (fromFunctionDeclarations != null) {\n if (Array.isArray(fromFunctionDeclarations)) {\n common.setValueByPath(\n toObject,\n ['functionDeclarations'],\n fromFunctionDeclarations.map((item) => {\n return functionDeclarationToVertex(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(\n toObject,\n ['functionDeclarations'],\n fromFunctionDeclarations,\n );\n }\n }\n\n const fromRetrieval = common.getValueByPath(fromObject, ['retrieval']);\n if (fromRetrieval != null) {\n common.setValueByPath(toObject, ['retrieval'], fromRetrieval);\n }\n\n const fromGoogleSearch = common.getValueByPath(fromObject, ['googleSearch']);\n if (fromGoogleSearch != null) {\n common.setValueByPath(toObject, ['googleSearch'], googleSearchToVertex());\n }\n\n const fromGoogleSearchRetrieval = common.getValueByPath(fromObject, [\n 'googleSearchRetrieval',\n ]);\n if (fromGoogleSearchRetrieval != null) {\n common.setValueByPath(\n toObject,\n ['googleSearchRetrieval'],\n googleSearchRetrievalToVertex(apiClient, fromGoogleSearchRetrieval),\n );\n }\n\n const fromCodeExecution = common.getValueByPath(fromObject, [\n 'codeExecution',\n ]);\n if (fromCodeExecution != null) {\n common.setValueByPath(toObject, ['codeExecution'], fromCodeExecution);\n }\n\n return toObject;\n}\n\nexport function functionCallingConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.FunctionCallingConfig,\n): Record {\n const toObject: Record = {};\n\n const fromMode = common.getValueByPath(fromObject, ['mode']);\n if (fromMode != null) {\n common.setValueByPath(toObject, ['mode'], fromMode);\n }\n\n const fromAllowedFunctionNames = common.getValueByPath(fromObject, [\n 'allowedFunctionNames',\n ]);\n if (fromAllowedFunctionNames != null) {\n common.setValueByPath(\n toObject,\n ['allowedFunctionNames'],\n fromAllowedFunctionNames,\n );\n }\n\n return toObject;\n}\n\nexport function toolConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.ToolConfig,\n): Record {\n const toObject: Record = {};\n\n const fromFunctionCallingConfig = common.getValueByPath(fromObject, [\n 'functionCallingConfig',\n ]);\n if (fromFunctionCallingConfig != null) {\n common.setValueByPath(\n toObject,\n ['functionCallingConfig'],\n functionCallingConfigToVertex(apiClient, fromFunctionCallingConfig),\n );\n }\n\n return toObject;\n}\n\nexport function createCachedContentConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.CreateCachedContentConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromTtl = common.getValueByPath(fromObject, ['ttl']);\n if (parentObject !== undefined && fromTtl != null) {\n common.setValueByPath(parentObject, ['ttl'], fromTtl);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (parentObject !== undefined && fromExpireTime != null) {\n common.setValueByPath(parentObject, ['expireTime'], fromExpireTime);\n }\n\n const fromDisplayName = common.getValueByPath(fromObject, ['displayName']);\n if (parentObject !== undefined && fromDisplayName != null) {\n common.setValueByPath(parentObject, ['displayName'], fromDisplayName);\n }\n\n const fromContents = common.getValueByPath(fromObject, ['contents']);\n if (parentObject !== undefined && fromContents != null) {\n if (Array.isArray(fromContents)) {\n common.setValueByPath(\n parentObject,\n ['contents'],\n t.tContents(\n apiClient,\n t.tContents(apiClient, fromContents).map((item) => {\n return contentToVertex(apiClient, item);\n }),\n ),\n );\n } else {\n common.setValueByPath(\n parentObject,\n ['contents'],\n t.tContents(apiClient, fromContents),\n );\n }\n }\n\n const fromSystemInstruction = common.getValueByPath(fromObject, [\n 'systemInstruction',\n ]);\n if (parentObject !== undefined && fromSystemInstruction != null) {\n common.setValueByPath(\n parentObject,\n ['systemInstruction'],\n contentToVertex(apiClient, t.tContent(apiClient, fromSystemInstruction)),\n );\n }\n\n const fromTools = common.getValueByPath(fromObject, ['tools']);\n if (parentObject !== undefined && fromTools != null) {\n if (Array.isArray(fromTools)) {\n common.setValueByPath(\n parentObject,\n ['tools'],\n fromTools.map((item) => {\n return toolToVertex(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(parentObject, ['tools'], fromTools);\n }\n }\n\n const fromToolConfig = common.getValueByPath(fromObject, ['toolConfig']);\n if (parentObject !== undefined && fromToolConfig != null) {\n common.setValueByPath(\n parentObject,\n ['toolConfig'],\n toolConfigToVertex(apiClient, fromToolConfig),\n );\n }\n\n return toObject;\n}\n\nexport function createCachedContentParametersToVertex(\n apiClient: ApiClient,\n fromObject: types.CreateCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromModel = common.getValueByPath(fromObject, ['model']);\n if (fromModel != null) {\n common.setValueByPath(\n toObject,\n ['model'],\n t.tCachesModel(apiClient, fromModel),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n createCachedContentConfigToVertex(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function getCachedContentParametersToVertex(\n apiClient: ApiClient,\n fromObject: types.GetCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(toObject, ['config'], fromConfig);\n }\n\n return toObject;\n}\n\nexport function deleteCachedContentParametersToVertex(\n apiClient: ApiClient,\n fromObject: types.DeleteCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(toObject, ['config'], fromConfig);\n }\n\n return toObject;\n}\n\nexport function updateCachedContentConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.UpdateCachedContentConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromTtl = common.getValueByPath(fromObject, ['ttl']);\n if (parentObject !== undefined && fromTtl != null) {\n common.setValueByPath(parentObject, ['ttl'], fromTtl);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (parentObject !== undefined && fromExpireTime != null) {\n common.setValueByPath(parentObject, ['expireTime'], fromExpireTime);\n }\n\n return toObject;\n}\n\nexport function updateCachedContentParametersToVertex(\n apiClient: ApiClient,\n fromObject: types.UpdateCachedContentParameters,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(\n toObject,\n ['_url', 'name'],\n t.tCachedContentName(apiClient, fromName),\n );\n }\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n updateCachedContentConfigToVertex(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function listCachedContentsConfigToVertex(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsConfig,\n parentObject: Record,\n): Record {\n const toObject: Record = {};\n\n const fromPageSize = common.getValueByPath(fromObject, ['pageSize']);\n if (parentObject !== undefined && fromPageSize != null) {\n common.setValueByPath(parentObject, ['_query', 'pageSize'], fromPageSize);\n }\n\n const fromPageToken = common.getValueByPath(fromObject, ['pageToken']);\n if (parentObject !== undefined && fromPageToken != null) {\n common.setValueByPath(parentObject, ['_query', 'pageToken'], fromPageToken);\n }\n\n return toObject;\n}\n\nexport function listCachedContentsParametersToVertex(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsParameters,\n): Record {\n const toObject: Record = {};\n\n const fromConfig = common.getValueByPath(fromObject, ['config']);\n if (fromConfig != null) {\n common.setValueByPath(\n toObject,\n ['config'],\n listCachedContentsConfigToVertex(apiClient, fromConfig, toObject),\n );\n }\n\n return toObject;\n}\n\nexport function cachedContentFromMldev(\n apiClient: ApiClient,\n fromObject: types.CachedContent,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(toObject, ['name'], fromName);\n }\n\n const fromDisplayName = common.getValueByPath(fromObject, ['displayName']);\n if (fromDisplayName != null) {\n common.setValueByPath(toObject, ['displayName'], fromDisplayName);\n }\n\n const fromModel = common.getValueByPath(fromObject, ['model']);\n if (fromModel != null) {\n common.setValueByPath(toObject, ['model'], fromModel);\n }\n\n const fromCreateTime = common.getValueByPath(fromObject, ['createTime']);\n if (fromCreateTime != null) {\n common.setValueByPath(toObject, ['createTime'], fromCreateTime);\n }\n\n const fromUpdateTime = common.getValueByPath(fromObject, ['updateTime']);\n if (fromUpdateTime != null) {\n common.setValueByPath(toObject, ['updateTime'], fromUpdateTime);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (fromExpireTime != null) {\n common.setValueByPath(toObject, ['expireTime'], fromExpireTime);\n }\n\n const fromUsageMetadata = common.getValueByPath(fromObject, [\n 'usageMetadata',\n ]);\n if (fromUsageMetadata != null) {\n common.setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);\n }\n\n return toObject;\n}\n\nexport function deleteCachedContentResponseFromMldev(): Record<\n string,\n unknown\n> {\n const toObject: Record = {};\n\n return toObject;\n}\n\nexport function listCachedContentsResponseFromMldev(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsResponse,\n): Record {\n const toObject: Record = {};\n\n const fromNextPageToken = common.getValueByPath(fromObject, [\n 'nextPageToken',\n ]);\n if (fromNextPageToken != null) {\n common.setValueByPath(toObject, ['nextPageToken'], fromNextPageToken);\n }\n\n const fromCachedContents = common.getValueByPath(fromObject, [\n 'cachedContents',\n ]);\n if (fromCachedContents != null) {\n if (Array.isArray(fromCachedContents)) {\n common.setValueByPath(\n toObject,\n ['cachedContents'],\n fromCachedContents.map((item) => {\n return cachedContentFromMldev(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(toObject, ['cachedContents'], fromCachedContents);\n }\n }\n\n return toObject;\n}\n\nexport function cachedContentFromVertex(\n apiClient: ApiClient,\n fromObject: types.CachedContent,\n): Record {\n const toObject: Record = {};\n\n const fromName = common.getValueByPath(fromObject, ['name']);\n if (fromName != null) {\n common.setValueByPath(toObject, ['name'], fromName);\n }\n\n const fromDisplayName = common.getValueByPath(fromObject, ['displayName']);\n if (fromDisplayName != null) {\n common.setValueByPath(toObject, ['displayName'], fromDisplayName);\n }\n\n const fromModel = common.getValueByPath(fromObject, ['model']);\n if (fromModel != null) {\n common.setValueByPath(toObject, ['model'], fromModel);\n }\n\n const fromCreateTime = common.getValueByPath(fromObject, ['createTime']);\n if (fromCreateTime != null) {\n common.setValueByPath(toObject, ['createTime'], fromCreateTime);\n }\n\n const fromUpdateTime = common.getValueByPath(fromObject, ['updateTime']);\n if (fromUpdateTime != null) {\n common.setValueByPath(toObject, ['updateTime'], fromUpdateTime);\n }\n\n const fromExpireTime = common.getValueByPath(fromObject, ['expireTime']);\n if (fromExpireTime != null) {\n common.setValueByPath(toObject, ['expireTime'], fromExpireTime);\n }\n\n const fromUsageMetadata = common.getValueByPath(fromObject, [\n 'usageMetadata',\n ]);\n if (fromUsageMetadata != null) {\n common.setValueByPath(toObject, ['usageMetadata'], fromUsageMetadata);\n }\n\n return toObject;\n}\n\nexport function deleteCachedContentResponseFromVertex(): Record<\n string,\n unknown\n> {\n const toObject: Record = {};\n\n return toObject;\n}\n\nexport function listCachedContentsResponseFromVertex(\n apiClient: ApiClient,\n fromObject: types.ListCachedContentsResponse,\n): Record {\n const toObject: Record = {};\n\n const fromNextPageToken = common.getValueByPath(fromObject, [\n 'nextPageToken',\n ]);\n if (fromNextPageToken != null) {\n common.setValueByPath(toObject, ['nextPageToken'], fromNextPageToken);\n }\n\n const fromCachedContents = common.getValueByPath(fromObject, [\n 'cachedContents',\n ]);\n if (fromCachedContents != null) {\n if (Array.isArray(fromCachedContents)) {\n common.setValueByPath(\n toObject,\n ['cachedContents'],\n fromCachedContents.map((item) => {\n return cachedContentFromVertex(apiClient, item);\n }),\n );\n } else {\n common.setValueByPath(toObject, ['cachedContents'], fromCachedContents);\n }\n }\n\n return toObject;\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\n/**\n * Pagers for the GenAI List APIs.\n */\n\nexport enum PagedItem {\n PAGED_ITEM_BATCH_JOBS = 'batchJobs',\n PAGED_ITEM_MODELS = 'models',\n PAGED_ITEM_TUNING_JOBS = 'tuningJobs',\n PAGED_ITEM_FILES = 'files',\n PAGED_ITEM_CACHED_CONTENTS = 'cachedContents',\n}\n\ninterface PagedItemConfig {\n config?: {\n pageToken?: string;\n pageSize?: number;\n };\n}\n\ninterface PagedItemResponse {\n nextPageToken?: string;\n batchJobs?: T[];\n models?: T[];\n tuningJobs?: T[];\n files?: T[];\n cachedContents?: T[];\n}\n\n/**\n * Pager class for iterating through paginated results.\n */\nexport class Pager implements AsyncIterable {\n private nameInternal!: PagedItem;\n private pageInternal: T[] = [];\n private paramsInternal: PagedItemConfig = {};\n private pageInternalSize!: number;\n protected requestInternal!: (\n params: PagedItemConfig,\n ) => Promise>;\n protected idxInternal!: number;\n\n constructor(\n name: PagedItem,\n request: (params: PagedItemConfig) => Promise>,\n response: PagedItemResponse,\n params: PagedItemConfig,\n ) {\n this.requestInternal = request;\n this.init(name, response, params);\n }\n\n private init(\n name: PagedItem,\n response: PagedItemResponse,\n params: PagedItemConfig,\n ) {\n this.nameInternal = name;\n this.pageInternal = response[this.nameInternal] || [];\n this.idxInternal = 0;\n let requestParams: PagedItemConfig = {config: {}};\n if (!params) {\n requestParams = {config: {}};\n } else if (typeof params === 'object') {\n requestParams = {...params};\n } else {\n requestParams = params;\n }\n if (requestParams['config']) {\n requestParams['config']['pageToken'] = response['nextPageToken'];\n }\n this.paramsInternal = requestParams;\n this.pageInternalSize =\n requestParams['config']?.['pageSize'] ?? this.pageInternal.length;\n }\n\n private initNextPage(response: PagedItemResponse): void {\n this.init(this.nameInternal, response, this.paramsInternal);\n }\n\n /**\n * Returns the current page, which is a list of items.\n *\n * @remarks\n * The first page is retrieved when the pager is created. The returned list of\n * items could be a subset of the entire list.\n */\n get page(): T[] {\n return this.pageInternal;\n }\n\n /**\n * Returns the type of paged item (for example, ``batch_jobs``).\n */\n get name(): PagedItem {\n return this.nameInternal;\n }\n\n /**\n * Returns the length of the page fetched each time by this pager.\n *\n * @remarks\n * The number of items in the page is less than or equal to the page length.\n */\n get pageSize(): number {\n return this.pageInternalSize;\n }\n\n /**\n * Returns the parameters when making the API request for the next page.\n *\n * @remarks\n * Parameters contain a set of optional configs that can be\n * used to customize the API request. For example, the `pageToken` parameter\n * contains the token to request the next page.\n */\n get params(): PagedItemConfig {\n return this.paramsInternal;\n }\n\n /**\n * Returns the total number of items in the current page.\n */\n get pageLength(): number {\n return this.pageInternal.length;\n }\n\n /**\n * Returns the item at the given index.\n */\n getItem(index: number): T {\n return this.pageInternal[index];\n }\n\n /**\n * Returns an async iterator that support iterating through all items\n * retrieved from the API.\n *\n * @remarks\n * The iterator will automatically fetch the next page if there are more items\n * to fetch from the API.\n *\n * @example\n *\n * ```ts\n * const pager = await ai.files.list({config: {pageSize: 10}});\n * for await (const file of pager) {\n * console.log(file.name);\n * }\n * ```\n */\n [Symbol.asyncIterator](): AsyncIterator {\n return {\n next: async () => {\n if (this.idxInternal >= this.pageLength) {\n if (this.hasNextPage()) {\n await this.nextPage();\n } else {\n return {value: undefined, done: true};\n }\n }\n const item = this.getItem(this.idxInternal);\n this.idxInternal += 1;\n return {value: item, done: false};\n },\n return: async () => {\n return {value: undefined, done: true};\n },\n };\n }\n\n /**\n * Fetches the next page of items. This makes a new API request.\n *\n * @throws {Error} If there are no more pages to fetch.\n *\n * @example\n *\n * ```ts\n * const pager = await ai.files.list({config: {pageSize: 10}});\n * let page = pager.page;\n * while (true) {\n * for (const file of page) {\n * console.log(file.name);\n * }\n * if (!pager.hasNextPage()) {\n * break;\n * }\n * page = await pager.nextPage();\n * }\n * ```\n */\n async nextPage(): Promise {\n if (!this.hasNextPage()) {\n throw new Error('No more pages to fetch.');\n }\n const response = await this.requestInternal(this.params);\n this.initNextPage(response);\n return this.page;\n }\n\n /**\n * Returns true if there are more pages to fetch from the API.\n */\n hasNextPage(): boolean {\n if (this.params['config']?.['pageToken'] !== undefined) {\n return true;\n }\n return false;\n }\n}\n","/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\n// Code generated by the Google Gen AI SDK generator DO NOT EDIT.\n\nexport enum Outcome {\n OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED',\n OUTCOME_OK = 'OUTCOME_OK',\n OUTCOME_FAILED = 'OUTCOME_FAILED',\n OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED',\n}\n\nexport enum Language {\n LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED',\n PYTHON = 'PYTHON',\n}\n\nexport enum Type {\n TYPE_UNSPECIFIED = 'TYPE_UNSPECIFIED',\n STRING = 'STRING',\n NUMBER = 'NUMBER',\n INTEGER = 'INTEGER',\n BOOLEAN = 'BOOLEAN',\n ARRAY = 'ARRAY',\n OBJECT = 'OBJECT',\n}\n\nexport enum HarmCategory {\n HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED',\n HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',\n HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY',\n}\n\nexport enum HarmBlockMethod {\n HARM_BLOCK_METHOD_UNSPECIFIED = 'HARM_BLOCK_METHOD_UNSPECIFIED',\n SEVERITY = 'SEVERITY',\n PROBABILITY = 'PROBABILITY',\n}\n\nexport enum HarmBlockThreshold {\n HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n BLOCK_NONE = 'BLOCK_NONE',\n OFF = 'OFF',\n}\n\nexport enum Mode {\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\nexport enum FinishReason {\n FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED',\n STOP = 'STOP',\n MAX_TOKENS = 'MAX_TOKENS',\n SAFETY = 'SAFETY',\n RECITATION = 'RECITATION',\n OTHER = 'OTHER',\n BLOCKLIST = 'BLOCKLIST',\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n SPII = 'SPII',\n MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',\n IMAGE_SAFETY = 'IMAGE_SAFETY',\n}\n\nexport enum HarmProbability {\n HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED',\n NEGLIGIBLE = 'NEGLIGIBLE',\n LOW = 'LOW',\n MEDIUM = 'MEDIUM',\n HIGH = 'HIGH',\n}\n\nexport enum HarmSeverity {\n HARM_SEVERITY_UNSPECIFIED = 'HARM_SEVERITY_UNSPECIFIED',\n HARM_SEVERITY_NEGLIGIBLE = 'HARM_SEVERITY_NEGLIGIBLE',\n HARM_SEVERITY_LOW = 'HARM_SEVERITY_LOW',\n HARM_SEVERITY_MEDIUM = 'HARM_SEVERITY_MEDIUM',\n HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH',\n}\n\nexport enum BlockedReason {\n BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED',\n SAFETY = 'SAFETY',\n OTHER = 'OTHER',\n BLOCKLIST = 'BLOCKLIST',\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n}\n\nexport enum Modality {\n MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED',\n TEXT = 'TEXT',\n IMAGE = 'IMAGE',\n AUDIO = 'AUDIO',\n}\n\nexport enum State {\n STATE_UNSPECIFIED = 'STATE_UNSPECIFIED',\n ACTIVE = 'ACTIVE',\n ERROR = 'ERROR',\n}\n\nexport enum DynamicRetrievalConfigMode {\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\nexport enum FunctionCallingConfigMode {\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n AUTO = 'AUTO',\n ANY = 'ANY',\n NONE = 'NONE',\n}\n\nexport enum MediaResolution {\n MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED',\n MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW',\n MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM',\n MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH',\n}\n\nexport enum SafetyFilterLevel {\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n BLOCK_NONE = 'BLOCK_NONE',\n}\n\nexport enum PersonGeneration {\n DONT_ALLOW = 'DONT_ALLOW',\n ALLOW_ADULT = 'ALLOW_ADULT',\n ALLOW_ALL = 'ALLOW_ALL',\n}\n\nexport enum ImagePromptLanguage {\n auto = 'auto',\n en = 'en',\n ja = 'ja',\n ko = 'ko',\n hi = 'hi',\n}\n\nexport enum FileState {\n STATE_UNSPECIFIED = 'STATE_UNSPECIFIED',\n PROCESSING = 'PROCESSING',\n ACTIVE = 'ACTIVE',\n FAILED = 'FAILED',\n}\n\nexport enum FileSource {\n SOURCE_UNSPECIFIED = 'SOURCE_UNSPECIFIED',\n UPLOADED = 'UPLOADED',\n GENERATED = 'GENERATED',\n}\n\nexport enum MaskReferenceMode {\n MASK_MODE_DEFAULT = 'MASK_MODE_DEFAULT',\n MASK_MODE_USER_PROVIDED = 'MASK_MODE_USER_PROVIDED',\n MASK_MODE_BACKGROUND = 'MASK_MODE_BACKGROUND',\n MASK_MODE_FOREGROUND = 'MASK_MODE_FOREGROUND',\n MASK_MODE_SEMANTIC = 'MASK_MODE_SEMANTIC',\n}\n\nexport enum ControlReferenceType {\n CONTROL_TYPE_DEFAULT = 'CONTROL_TYPE_DEFAULT',\n CONTROL_TYPE_CANNY = 'CONTROL_TYPE_CANNY',\n CONTROL_TYPE_SCRIBBLE = 'CONTROL_TYPE_SCRIBBLE',\n CONTROL_TYPE_FACE_MESH = 'CONTROL_TYPE_FACE_MESH',\n}\n\nexport enum SubjectReferenceType {\n SUBJECT_TYPE_DEFAULT = 'SUBJECT_TYPE_DEFAULT',\n SUBJECT_TYPE_PERSON = 'SUBJECT_TYPE_PERSON',\n SUBJECT_TYPE_ANIMAL = 'SUBJECT_TYPE_ANIMAL',\n SUBJECT_TYPE_PRODUCT = 'SUBJECT_TYPE_PRODUCT',\n}\n\nexport enum MediaModality {\n MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED',\n TEXT = 'TEXT',\n IMAGE = 'IMAGE',\n VIDEO = 'VIDEO',\n AUDIO = 'AUDIO',\n DOCUMENT = 'DOCUMENT',\n}\n\n/** Metadata describes the input video content. */\nexport declare interface VideoMetadata {\n /** Optional. The end offset of the video. */\n endOffset?: string;\n /** Optional. The start offset of the video. */\n startOffset?: string;\n}\n\n/** Result of executing the [ExecutableCode]. Always follows a `part` containing the [ExecutableCode]. */\nexport declare interface CodeExecutionResult {\n /** Required. Outcome of the code execution. */\n outcome?: Outcome;\n /** Optional. Contains stdout when code execution is successful, stderr or other description otherwise. */\n output?: string;\n}\n\n/** Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [FunctionDeclaration] tool and [FunctionCallingConfig] mode is set to [Mode.CODE]. */\nexport declare interface ExecutableCode {\n /** Required. The code to be executed. */\n code?: string;\n /** Required. Programming language of the `code`. */\n language?: Language;\n}\n\n/** URI based data. */\nexport declare interface FileData {\n /** Required. URI. */\n fileUri?: string;\n /** Required. The IANA standard MIME type of the source data. */\n mimeType?: string;\n}\n\n/** A function call. */\nexport declare interface FunctionCall {\n /** The unique id of the function call. If populated, the client to execute the\n `function_call` and return the response with the matching `id`. */\n id?: string;\n /** Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. */\n args?: Record;\n /** Required. The name of the function to call. Matches [FunctionDeclaration.name]. */\n name?: string;\n}\n\n/** A function response. */\nexport class FunctionResponse {\n /** The id of the function call this response is for. Populated by the client\n to match the corresponding function call `id`. */\n id?: string;\n /** Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. */\n name?: string;\n /** Required. The function response in JSON object format. Use \"output\" key to specify function output and \"error\" key to specify error details (if any). If \"output\" and \"error\" keys are not specified, then whole \"response\" is treated as function output. */\n response?: Record;\n}\n\n/** Content blob. */\nexport declare interface Blob {\n /** Required. Raw bytes. */\n data?: string;\n /** Required. The IANA standard MIME type of the source data. */\n mimeType?: string;\n}\n\n/** A datatype containing media content.\n\n Exactly one field within a Part should be set, representing the specific type\n of content being conveyed. Using multiple fields within the same `Part`\n instance is considered invalid.\n */\nexport declare interface Part {\n /** Metadata for a given video. */\n videoMetadata?: VideoMetadata;\n /** Indicates if the part is thought from the model. */\n thought?: boolean;\n /** Optional. Result of executing the [ExecutableCode]. */\n codeExecutionResult?: CodeExecutionResult;\n /** Optional. Code generated by the model that is meant to be executed. */\n executableCode?: ExecutableCode;\n /** Optional. URI based data. */\n fileData?: FileData;\n /** Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. */\n functionCall?: FunctionCall;\n /** Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */\n functionResponse?: FunctionResponse;\n /** Optional. Inlined bytes data. */\n inlineData?: Blob;\n /** Optional. Text part (can be code). */\n text?: string;\n}\n/**\n * Creates a `Part` object from a `URI` string.\n */\nexport function createPartFromUri(uri: string, mimeType: string): Part {\n return {\n fileData: {\n fileUri: uri,\n mimeType: mimeType,\n },\n };\n}\n/**\n * Creates a `Part` object from a `text` string.\n */\nexport function createPartFromText(text: string): Part {\n return {\n text: text,\n };\n}\n/**\n * Creates a `Part` object from a `FunctionCall` object.\n */\nexport function createPartFromFunctionCall(\n name: string,\n args: Record,\n): Part {\n return {\n functionCall: {\n name: name,\n args: args,\n },\n };\n}\n/**\n * Creates a `Part` object from a `FunctionResponse` object.\n */\nexport function createPartFromFunctionResponse(\n id: string,\n name: string,\n response: Record,\n): Part {\n return {\n functionResponse: {\n id: id,\n name: name,\n response: response,\n },\n };\n}\n/**\n * Creates a `Part` object from a `base64` `string`.\n */\nexport function createPartFromBase64(data: string, mimeType: string): Part {\n return {\n inlineData: {\n data: data,\n mimeType: mimeType,\n },\n };\n}\n/**\n * Creates a `Part` object from the `outcome` and `output` of a `CodeExecutionResult` object.\n */\nexport function createPartFromCodeExecutionResult(\n outcome: Outcome,\n output: string,\n): Part {\n return {\n codeExecutionResult: {\n outcome: outcome,\n output: output,\n },\n };\n}\n/**\n * Creates a `Part` object from the `code` and `language` of an `ExecutableCode` object.\n */\nexport function createPartFromExecutableCode(\n code: string,\n language: Language,\n): Part {\n return {\n executableCode: {\n code: code,\n language: language,\n },\n };\n}\n\n/** Contains the multi-part content of a message. */\nexport declare interface Content {\n /** List of parts that constitute a single message. Each part may have\n a different IANA MIME type. */\n parts?: Part[];\n /** Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations, otherwise can be\n left blank or unset. If role is not specified, SDK will determine the role. */\n role?: string;\n}\nfunction _isPart(obj: unknown): obj is Part {\n if (typeof obj === 'object' && obj !== null) {\n return (\n 'fileData' in obj ||\n 'text' in obj ||\n 'functionCall' in obj ||\n 'functionResponse' in obj ||\n 'inlineData' in obj ||\n 'videoMetadata' in obj ||\n 'codeExecutionResult' in obj ||\n 'executableCode' in obj\n );\n }\n return false;\n}\nfunction _toParts(partOrString: PartListUnion | string): Part[] {\n const parts: Part[] = [];\n if (typeof partOrString === 'string') {\n parts.push(createPartFromText(partOrString));\n } else if (_isPart(partOrString)) {\n parts.push(partOrString);\n } else if (Array.isArray(partOrString)) {\n if (partOrString.length === 0) {\n throw new Error('partOrString cannot be an empty array');\n }\n for (const part of partOrString) {\n if (typeof part === 'string') {\n parts.push(createPartFromText(part));\n } else if (_isPart(part)) {\n parts.push(part);\n } else {\n throw new Error('element in PartUnion must be a Part object or string');\n }\n }\n } else {\n throw new Error('partOrString must be a Part object, string, or array');\n }\n return parts;\n}\n/**\n * Creates a `Content` object with a user role from a `PartListUnion` object or `string`.\n */\nexport function createUserContent(\n partOrString: PartListUnion | string,\n): Content {\n return {\n role: 'user',\n parts: _toParts(partOrString),\n };\n}\n\n/**\n * Creates a `Content` object with a model role from a `PartListUnion` object or `string`.\n */\nexport function createModelContent(\n partOrString: PartListUnion | string,\n): Content {\n return {\n role: 'model',\n parts: _toParts(partOrString),\n };\n}\n/** HTTP options to be used in each of the requests. */\nexport declare interface HttpOptions {\n /** The base URL for the AI platform service endpoint. */\n baseUrl?: string;\n /** Specifies the version of the API to use. */\n apiVersion?: string;\n /** Additional HTTP headers to be sent with the request. */\n headers?: Record;\n /** Timeout for the request in milliseconds. */\n timeout?: number;\n}\n\n/** Schema that defines the format of input and output data.\n\n Represents a select subset of an OpenAPI 3.0 schema object.\n */\nexport declare interface Schema {\n /** Optional. Example of the object. Will only populated when the object is the root. */\n example?: unknown;\n /** Optional. Pattern of the Type.STRING to restrict a string to a regular expression. */\n pattern?: string;\n /** Optional. Default value of the data. */\n default?: unknown;\n /** Optional. Maximum length of the Type.STRING */\n maxLength?: string;\n /** Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING */\n minLength?: string;\n /** Optional. Minimum number of the properties for Type.OBJECT. */\n minProperties?: string;\n /** Optional. Maximum number of the properties for Type.OBJECT. */\n maxProperties?: string;\n /** Optional. The value should be validated against any (one or more) of the subschemas in the list. */\n anyOf?: Schema[];\n /** Optional. The description of the data. */\n description?: string;\n /** Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:[\"EAST\", NORTH\", \"SOUTH\", \"WEST\"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:[\"101\", \"201\", \"301\"]} */\n enum?: string[];\n /** Optional. The format of the data. Supported formats: for NUMBER type: \"float\", \"double\" for INTEGER type: \"int32\", \"int64\" for STRING type: \"email\", \"byte\", etc */\n format?: string;\n /** Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. */\n items?: Schema;\n /** Optional. Maximum number of the elements for Type.ARRAY. */\n maxItems?: string;\n /** Optional. Maximum value of the Type.INTEGER and Type.NUMBER */\n maximum?: number;\n /** Optional. Minimum number of the elements for Type.ARRAY. */\n minItems?: string;\n /** Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER */\n minimum?: number;\n /** Optional. Indicates if the value may be null. */\n nullable?: boolean;\n /** Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. */\n properties?: Record;\n /** Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties. */\n propertyOrdering?: string[];\n /** Optional. Required properties of Type.OBJECT. */\n required?: string[];\n /** Optional. The title of the Schema. */\n title?: string;\n /** Optional. The type of the data. */\n type?: Type;\n}\n\n/** Safety settings. */\nexport declare interface SafetySetting {\n /** Determines if the harm block method uses probability or probability\n and severity scores. */\n method?: HarmBlockMethod;\n /** Required. Harm category. */\n category?: HarmCategory;\n /** Required. The harm block threshold. */\n threshold?: HarmBlockThreshold;\n}\n\n/** Defines a function that the model can generate JSON inputs for.\n\n The inputs are based on `OpenAPI 3.0 specifications\n `_.\n */\nexport declare interface FunctionDeclaration {\n /** Describes the output from the function in the OpenAPI JSON Schema\n Object format. */\n response?: Schema;\n /** Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function. */\n description?: string;\n /** Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64. */\n name?: string;\n /** Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1 */\n parameters?: Schema;\n}\n\n/** Tool to support Google Search in Model. Powered by Google. */\nexport declare interface GoogleSearch {}\n\n/** Describes the options to customize dynamic retrieval. */\nexport declare interface DynamicRetrievalConfig {\n /** The mode of the predictor to be used in dynamic retrieval. */\n mode?: DynamicRetrievalConfigMode;\n /** Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used. */\n dynamicThreshold?: number;\n}\n\n/** Tool to retrieve public web data for grounding, powered by Google. */\nexport declare interface GoogleSearchRetrieval {\n /** Specifies the dynamic retrieval configuration for the given source. */\n dynamicRetrievalConfig?: DynamicRetrievalConfig;\n}\n\n/** Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder */\nexport declare interface VertexAISearch {\n /** Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` */\n datastore?: string;\n /** Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` */\n engine?: string;\n}\n\n/** The definition of the Rag resource. */\nexport declare interface VertexRagStoreRagResource {\n /** Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */\n ragCorpus?: string;\n /** Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. */\n ragFileIds?: string[];\n}\n\n/** Retrieve from Vertex RAG Store for grounding. */\nexport declare interface VertexRagStore {\n /** Optional. Deprecated. Please use rag_resources instead. */\n ragCorpora?: string[];\n /** Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. */\n ragResources?: VertexRagStoreRagResource[];\n /** Optional. Number of top k results to return from the selected corpora. */\n similarityTopK?: number;\n /** Optional. Only return results with vector distance smaller than the threshold. */\n vectorDistanceThreshold?: number;\n}\n\n/** Defines a retrieval tool that model can call to access external knowledge. */\nexport declare interface Retrieval {\n /** Optional. Deprecated. This option is no longer supported. */\n disableAttribution?: boolean;\n /** Set to use data source powered by Vertex AI Search. */\n vertexAiSearch?: VertexAISearch;\n /** Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. */\n vertexRagStore?: VertexRagStore;\n}\n\n/** Tool that executes code generated by the model, and automatically returns the result to the model. See also [ExecutableCode]and [CodeExecutionResult] which are input and output to this tool. */\nexport declare interface ToolCodeExecution {}\n\n/** Tool details of a tool that the model may use to generate a response. */\nexport declare interface Tool {\n /** List of function declarations that the tool supports. */\n functionDeclarations?: FunctionDeclaration[];\n /** Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation. */\n retrieval?: Retrieval;\n /** Optional. Google Search tool type. Specialized retrieval tool\n that is powered by Google Search. */\n googleSearch?: GoogleSearch;\n /** Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. */\n googleSearchRetrieval?: GoogleSearchRetrieval;\n /** Optional. CodeExecution tool type. Enables the model to execute code as part of generation. This field is only used by the Gemini Developer API services. */\n codeExecution?: ToolCodeExecution;\n}\n\n/** Function calling config. */\nexport declare interface FunctionCallingConfig {\n /** Optional. Function calling mode. */\n mode?: FunctionCallingConfigMode;\n /** Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. */\n allowedFunctionNames?: string[];\n}\n\n/** Tool config.\n\n This config is shared for all tools provided in the request.\n */\nexport declare interface ToolConfig {\n /** Optional. Function calling config. */\n functionCallingConfig?: FunctionCallingConfig;\n}\n\n/** The configuration for the prebuilt speaker to use. */\nexport declare interface PrebuiltVoiceConfig {\n /** The name of the prebuilt voice to use.\n */\n voiceName?: string;\n}\n\n/** The configuration for the voice to use. */\nexport declare interface VoiceConfig {\n /** The configuration for the speaker to use.\n */\n prebuiltVoiceConfig?: PrebuiltVoiceConfig;\n}\n\n/** The speech generation configuration. */\nexport declare interface SpeechConfig {\n /** The configuration for the speaker to use.\n */\n voiceConfig?: VoiceConfig;\n}\n\n/** The thinking features configuration. */\nexport declare interface ThinkingConfig {\n /** Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.\n */\n includeThoughts?: boolean;\n}\n\n/** When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. */\nexport declare interface GenerationConfigRoutingConfigAutoRoutingMode {\n /** The model routing preference. */\n modelRoutingPreference?:\n | 'UNKNOWN'\n | 'PRIORITIZE_QUALITY'\n | 'BALANCED'\n | 'PRIORITIZE_COST';\n}\n\n/** When manual routing is set, the specified model will be used directly. */\nexport declare interface GenerationConfigRoutingConfigManualRoutingMode {\n /** The model name to use. Only the public LLM models are accepted. e.g. 'gemini-1.5-pro-001'. */\n modelName?: string;\n}\n\n/** The configuration for routing the request to a specific model. */\nexport declare interface GenerationConfigRoutingConfig {\n /** Automated routing. */\n autoMode?: GenerationConfigRoutingConfigAutoRoutingMode;\n /** Manual routing. */\n manualMode?: GenerationConfigRoutingConfigManualRoutingMode;\n}\n\n/** Optional model configuration parameters.\n\n For more information, see `Content generation parameters\n `_.\n */\nexport declare interface GenerateContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Instructions for the model to steer it toward better performance.\n For example, \"Answer as concisely as possible\" or \"Don't use technical\n terms in your response\".\n */\n systemInstruction?: ContentUnion;\n /** Value that controls the degree of randomness in token selection.\n Lower temperatures are good for prompts that require a less open-ended or\n creative response, while higher temperatures can lead to more diverse or\n creative results.\n */\n temperature?: number;\n /** Tokens are selected from the most to least probable until the sum\n of their probabilities equals this value. Use a lower value for less\n random responses and a higher value for more random responses.\n */\n topP?: number;\n /** For each token selection step, the ``top_k`` tokens with the\n highest probabilities are sampled. Then tokens are further filtered based\n on ``top_p`` with the final token selected using temperature sampling. Use\n a lower number for less random responses and a higher number for more\n random responses.\n */\n topK?: number;\n /** Number of response variations to return.\n */\n candidateCount?: number;\n /** Maximum number of tokens that can be generated in the response.\n */\n maxOutputTokens?: number;\n /** List of strings that tells the model to stop generating text if one\n of the strings is encountered in the response.\n */\n stopSequences?: string[];\n /** Whether to return the log probabilities of the tokens that were\n chosen by the model at each step.\n */\n responseLogprobs?: boolean;\n /** Number of top candidate tokens to return the log probabilities for\n at each generation step.\n */\n logprobs?: number;\n /** Positive values penalize tokens that already appear in the\n generated text, increasing the probability of generating more diverse\n content.\n */\n presencePenalty?: number;\n /** Positive values penalize tokens that repeatedly appear in the\n generated text, increasing the probability of generating more diverse\n content.\n */\n frequencyPenalty?: number;\n /** When ``seed`` is fixed to a specific number, the model makes a best\n effort to provide the same response for repeated requests. By default, a\n random number is used.\n */\n seed?: number;\n /** Output response media type of the generated candidate text.\n */\n responseMimeType?: string;\n /** Schema that the generated candidate text must adhere to.\n */\n responseSchema?: SchemaUnion;\n /** Configuration for model router requests.\n */\n routingConfig?: GenerationConfigRoutingConfig;\n /** Safety settings in the request to block unsafe content in the\n response.\n */\n safetySettings?: SafetySetting[];\n /** Code that enables the system to interact with external systems to\n perform an action outside of the knowledge and scope of the model.\n */\n tools?: ToolListUnion;\n /** Associates model output to a specific function call.\n */\n toolConfig?: ToolConfig;\n /** Labels with user-defined metadata to break down billed charges. */\n labels?: Record;\n /** Resource name of a context cache that can be used in subsequent\n requests.\n */\n cachedContent?: string;\n /** The requested modalities of the response. Represents the set of\n modalities that the model can return.\n */\n responseModalities?: string[];\n /** If specified, the media resolution specified will be used.\n */\n mediaResolution?: MediaResolution;\n /** The speech generation configuration.\n */\n speechConfig?: SpeechConfigUnion;\n /** If enabled, audio timestamp will be included in the request to the\n model.\n */\n audioTimestamp?: boolean;\n /** The thinking features configuration.\n */\n thinkingConfig?: ThinkingConfig;\n}\n\n/** Config for models.generate_content parameters. */\nexport declare interface GenerateContentParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** Content of the request.\n */\n contents: ContentListUnion;\n /** Configuration that contains optional model parameters.\n */\n config?: GenerateContentConfig;\n}\n\n/** Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp */\nexport declare interface GoogleTypeDate {\n /** Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. */\n day?: number;\n /** Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day. */\n month?: number;\n /** Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. */\n year?: number;\n}\n\n/** Source attributions for content. */\nexport declare interface Citation {\n /** Output only. End index into the content. */\n endIndex?: number;\n /** Output only. License of the attribution. */\n license?: string;\n /** Output only. Publication date of the attribution. */\n publicationDate?: GoogleTypeDate;\n /** Output only. Start index into the content. */\n startIndex?: number;\n /** Output only. Title of the attribution. */\n title?: string;\n /** Output only. Url reference of the attribution. */\n uri?: string;\n}\n\n/** Citation information when the model quotes another source. */\nexport declare interface CitationMetadata {\n /** Contains citation information when the model directly quotes, at\n length, from another source. Can include traditional websites and code\n repositories.\n */\n citations?: Citation[];\n}\n\n/** Chunk from context retrieved by the retrieval tools. */\nexport declare interface GroundingChunkRetrievedContext {\n /** Text of the attribution. */\n text?: string;\n /** Title of the attribution. */\n title?: string;\n /** URI reference of the attribution. */\n uri?: string;\n}\n\n/** Chunk from the web. */\nexport declare interface GroundingChunkWeb {\n /** Title of the chunk. */\n title?: string;\n /** URI reference of the chunk. */\n uri?: string;\n}\n\n/** Grounding chunk. */\nexport declare interface GroundingChunk {\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: GroundingChunkRetrievedContext;\n /** Grounding chunk from the web. */\n web?: GroundingChunkWeb;\n}\n\n/** Segment of the content. */\nexport declare interface Segment {\n /** Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. */\n endIndex?: number;\n /** Output only. The index of a Part object within its parent Content object. */\n partIndex?: number;\n /** Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero. */\n startIndex?: number;\n /** Output only. The text corresponding to the segment from the response. */\n text?: string;\n}\n\n/** Grounding support. */\nexport declare interface GroundingSupport {\n /** Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. This list must have the same size as the grounding_chunk_indices. */\n confidenceScores?: number[];\n /** A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim. */\n groundingChunkIndices?: number[];\n /** Segment of the content this support belongs to. */\n segment?: Segment;\n}\n\n/** Metadata related to retrieval in the grounding flow. */\nexport declare interface RetrievalMetadata {\n /** Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search. */\n googleSearchDynamicRetrievalScore?: number;\n}\n\n/** Google search entry point. */\nexport declare interface SearchEntryPoint {\n /** Optional. Web content snippet that can be embedded in a web page or an app webview. */\n renderedContent?: string;\n /** Optional. Base64 encoded JSON representing array of tuple. */\n sdkBlob?: string;\n}\n\n/** Metadata returned to client when grounding is enabled. */\nexport declare interface GroundingMetadata {\n /** List of supporting references retrieved from specified grounding source. */\n groundingChunks?: GroundingChunk[];\n /** Optional. List of grounding support. */\n groundingSupports?: GroundingSupport[];\n /** Optional. Output only. Retrieval metadata. */\n retrievalMetadata?: RetrievalMetadata;\n /** Optional. Queries executed by the retrieval tools. */\n retrievalQueries?: string[];\n /** Optional. Google search entry for the following-up web searches. */\n searchEntryPoint?: SearchEntryPoint;\n /** Optional. Web search queries for the following-up web search. */\n webSearchQueries?: string[];\n}\n\n/** Candidate for the logprobs token and score. */\nexport declare interface LogprobsResultCandidate {\n /** The candidate's log probability. */\n logProbability?: number;\n /** The candidate's token string value. */\n token?: string;\n /** The candidate's token id value. */\n tokenId?: number;\n}\n\n/** Candidates with top log probabilities at each decoding step. */\nexport declare interface LogprobsResultTopCandidates {\n /** Sorted by log probability in descending order. */\n candidates?: LogprobsResultCandidate[];\n}\n\n/** Logprobs Result */\nexport declare interface LogprobsResult {\n /** Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates. */\n chosenCandidates?: LogprobsResultCandidate[];\n /** Length = total number of decoding steps. */\n topCandidates?: LogprobsResultTopCandidates[];\n}\n\n/** Safety rating corresponding to the generated content. */\nexport declare interface SafetyRating {\n /** Output only. Indicates whether the content was filtered out because of this rating. */\n blocked?: boolean;\n /** Output only. Harm category. */\n category?: HarmCategory;\n /** Output only. Harm probability levels in the content. */\n probability?: HarmProbability;\n /** Output only. Harm probability score. */\n probabilityScore?: number;\n /** Output only. Harm severity levels in the content. */\n severity?: HarmSeverity;\n /** Output only. Harm severity score. */\n severityScore?: number;\n}\n\n/** A response candidate generated from the model. */\nexport declare interface Candidate {\n /** Contains the multi-part content of the response.\n */\n content?: Content;\n /** Source attribution of the generated content.\n */\n citationMetadata?: CitationMetadata;\n /** Describes the reason the model stopped generating tokens.\n */\n finishMessage?: string;\n /** Number of tokens for this candidate.\n */\n tokenCount?: number;\n /** The reason why the model stopped generating tokens.\n If empty, the model has not stopped generating the tokens.\n */\n finishReason?: FinishReason;\n /** Output only. Average log probability score of the candidate. */\n avgLogprobs?: number;\n /** Output only. Metadata specifies sources used to ground generated content. */\n groundingMetadata?: GroundingMetadata;\n /** Output only. Index of the candidate. */\n index?: number;\n /** Output only. Log-likelihood scores for the response tokens and top tokens */\n logprobsResult?: LogprobsResult;\n /** Output only. List of ratings for the safety of a response candidate. There is at most one rating per category. */\n safetyRatings?: SafetyRating[];\n}\n\n/** Content filter results for a prompt sent in the request. */\nexport class GenerateContentResponsePromptFeedback {\n /** Output only. Blocked reason. */\n blockReason?: BlockedReason;\n /** Output only. A readable block reason message. */\n blockReasonMessage?: string;\n /** Output only. Safety ratings. */\n safetyRatings?: SafetyRating[];\n}\n\n/** Represents token counting info for a single modality. */\nexport declare interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality?: MediaModality;\n /** Number of tokens. */\n tokenCount?: number;\n}\n\n/** Usage metadata about response(s). */\nexport class GenerateContentResponseUsageMetadata {\n /** Output only. List of modalities of the cached content in the request input. */\n cacheTokensDetails?: ModalityTokenCount[];\n /** Output only. Number of tokens in the cached part in the input (the cached content). */\n cachedContentTokenCount?: number;\n /** Number of tokens in the response(s). */\n candidatesTokenCount?: number;\n /** Output only. List of modalities that were returned in the response. */\n candidatesTokensDetails?: ModalityTokenCount[];\n /** Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content. */\n promptTokenCount?: number;\n /** Output only. List of modalities that were processed in the request input. */\n promptTokensDetails?: ModalityTokenCount[];\n /** Output only. Number of tokens present in thoughts output. */\n thoughtsTokenCount?: number;\n /** Output only. Number of tokens present in tool-use prompt(s). */\n toolUsePromptTokenCount?: number;\n /** Output only. List of modalities that were processed for tool-use request inputs. */\n toolUsePromptTokensDetails?: ModalityTokenCount[];\n /** Total token count for prompt, response candidates, and tool-use prompts (if present). */\n totalTokenCount?: number;\n}\n\n/** Response message for PredictionService.GenerateContent. */\nexport class GenerateContentResponse {\n /** Response variations returned by the model.\n */\n candidates?: Candidate[];\n /** Timestamp when the request is made to the server.\n */\n createTime?: string;\n /** Identifier for each response.\n */\n responseId?: string;\n /** Output only. The model version used to generate the response. */\n modelVersion?: string;\n /** Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */\n promptFeedback?: GenerateContentResponsePromptFeedback;\n /** Usage metadata about the response(s). */\n usageMetadata?: GenerateContentResponseUsageMetadata;\n /**\n * Returns the concatenation of all text parts from the first candidate in the response.\n *\n * @remarks\n * If there are multiple candidates in the response, the text from the first\n * one will be returned.\n * If there are non-text parts in the response, the concatenation of all text\n * parts will be returned, and a warning will be logged.\n * If there are thought parts in the response, the concatenation of all text\n * parts excluding the thought parts will be returned.\n *\n * @example\n * ```ts\n * const response = await ai.models.generateContent({\n * model: 'gemini-2.0-flash',\n * contents:\n * 'Why is the sky blue?',\n * });\n *\n * console.debug(response.text);\n * ```\n */\n get text(): string | undefined {\n if (this.candidates?.[0]?.content?.parts?.length === 0) {\n return undefined;\n }\n if (this.candidates && this.candidates.length > 1) {\n console.warn(\n 'there are multiple candidates in the response, returning text from the first one.',\n );\n }\n let text = '';\n let anyTextPartText = false;\n const nonTextParts = [];\n for (const part of this.candidates?.[0]?.content?.parts ?? []) {\n for (const [fieldName, fieldValue] of Object.entries(part)) {\n if (\n fieldName !== 'text' &&\n fieldName !== 'thought' &&\n (fieldValue !== null || fieldValue !== undefined)\n ) {\n nonTextParts.push(fieldName);\n }\n }\n if (typeof part.text === 'string') {\n if (typeof part.thought === 'boolean' && part.thought) {\n continue;\n }\n anyTextPartText = true;\n text += part.text;\n }\n }\n if (nonTextParts.length > 0) {\n console.warn(\n `there are non-text parts ${nonTextParts} in the response, returning concatenation of all text parts. Please refer to the non text parts for a full response from model.`,\n );\n }\n // part.text === '' is different from part.text is null\n return anyTextPartText ? text : undefined;\n }\n\n /**\n * Returns the function calls from the first candidate in the response.\n *\n * @remarks\n * If there are multiple candidates in the response, the function calls from\n * the first one will be returned.\n * If there are no function calls in the response, undefined will be returned.\n *\n * @example\n * ```ts\n * const controlLightFunctionDeclaration: FunctionDeclaration = {\n * name: 'controlLight',\n * parameters: {\n * type: Type.OBJECT,\n * description: 'Set the brightness and color temperature of a room light.',\n * properties: {\n * brightness: {\n * type: Type.NUMBER,\n * description:\n * 'Light level from 0 to 100. Zero is off and 100 is full brightness.',\n * },\n * colorTemperature: {\n * type: Type.STRING,\n * description:\n * 'Color temperature of the light fixture which can be `daylight`, `cool` or `warm`.',\n * },\n * },\n * required: ['brightness', 'colorTemperature'],\n * };\n * const response = await ai.models.generateContent({\n * model: 'gemini-2.0-flash',\n * contents: 'Dim the lights so the room feels cozy and warm.',\n * config: {\n * tools: [{functionDeclarations: [controlLightFunctionDeclaration]}],\n * toolConfig: {\n * functionCallingConfig: {\n * mode: FunctionCallingConfigMode.ANY,\n * allowedFunctionNames: ['controlLight'],\n * },\n * },\n * },\n * });\n * console.debug(JSON.stringify(response.functionCalls));\n * ```\n */\n get functionCalls(): FunctionCall[] | undefined {\n if (this.candidates?.[0]?.content?.parts?.length === 0) {\n return undefined;\n }\n if (this.candidates && this.candidates.length > 1) {\n console.warn(\n 'there are multiple candidates in the response, returning function calls from the first one.',\n );\n }\n const functionCalls = this.candidates?.[0]?.content?.parts\n ?.filter((part) => part.functionCall)\n .map((part) => part.functionCall)\n .filter(\n (functionCall): functionCall is FunctionCall =>\n functionCall !== undefined,\n );\n if (functionCalls?.length === 0) {\n return undefined;\n }\n return functionCalls;\n }\n /**\n * Returns the first executable code from the first candidate in the response.\n *\n * @remarks\n * If there are multiple candidates in the response, the executable code from\n * the first one will be returned.\n * If there are no executable code in the response, undefined will be\n * returned.\n *\n * @example\n * ```ts\n * const response = await ai.models.generateContent({\n * model: 'gemini-2.0-flash',\n * contents:\n * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'\n * config: {\n * tools: [{codeExecution: {}}],\n * },\n * });\n *\n * console.debug(response.executableCode);\n * ```\n */\n get executableCode(): string | undefined {\n if (this.candidates?.[0]?.content?.parts?.length === 0) {\n return undefined;\n }\n if (this.candidates && this.candidates.length > 1) {\n console.warn(\n 'there are multiple candidates in the response, returning executable code from the first one.',\n );\n }\n const executableCode = this.candidates?.[0]?.content?.parts\n ?.filter((part) => part.executableCode)\n .map((part) => part.executableCode)\n .filter(\n (executableCode): executableCode is ExecutableCode =>\n executableCode !== undefined,\n );\n if (executableCode?.length === 0) {\n return undefined;\n }\n\n return executableCode?.[0]?.code;\n }\n /**\n * Returns the first code execution result from the first candidate in the response.\n *\n * @remarks\n * If there are multiple candidates in the response, the code execution result from\n * the first one will be returned.\n * If there are no code execution result in the response, undefined will be returned.\n *\n * @example\n * ```ts\n * const response = await ai.models.generateContent({\n * model: 'gemini-2.0-flash',\n * contents:\n * 'What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.'\n * config: {\n * tools: [{codeExecution: {}}],\n * },\n * });\n *\n * console.debug(response.codeExecutionResult);\n * ```\n */\n get codeExecutionResult(): string | undefined {\n if (this.candidates?.[0]?.content?.parts?.length === 0) {\n return undefined;\n }\n if (this.candidates && this.candidates.length > 1) {\n console.warn(\n 'there are multiple candidates in the response, returning code execution result from the first one.',\n );\n }\n const codeExecutionResult = this.candidates?.[0]?.content?.parts\n ?.filter((part) => part.codeExecutionResult)\n .map((part) => part.codeExecutionResult)\n .filter(\n (codeExecutionResult): codeExecutionResult is CodeExecutionResult =>\n codeExecutionResult !== undefined,\n );\n if (codeExecutionResult?.length === 0) {\n return undefined;\n }\n return codeExecutionResult?.[0]?.output;\n }\n}\n\nexport /** Optional parameters for the embed_content method. */\ndeclare interface EmbedContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Type of task for which the embedding will be used.\n */\n taskType?: string;\n /** Title for the text. Only applicable when TaskType is\n `RETRIEVAL_DOCUMENT`.\n */\n title?: string;\n /** Reduced dimension for the output embedding. If set,\n excessive values in the output embedding are truncated from the end.\n Supported by newer models since 2024 only. You cannot set this value if\n using the earlier model (`models/embedding-001`).\n */\n outputDimensionality?: number;\n /** Vertex API only. The MIME type of the input.\n */\n mimeType?: string;\n /** Vertex API only. Whether to silently truncate inputs longer than\n the max sequence length. If this option is set to false, oversized inputs\n will lead to an INVALID_ARGUMENT error, similar to other text APIs.\n */\n autoTruncate?: boolean;\n}\n\n/** Parameters for the embed_content method. */\nexport declare interface EmbedContentParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** The content to embed. Only the `parts.text` fields will be counted.\n */\n contents: ContentListUnion;\n /** Configuration that contains optional parameters.\n */\n config?: EmbedContentConfig;\n}\n\n/** Statistics of the input text associated with the result of content embedding. */\nexport declare interface ContentEmbeddingStatistics {\n /** Vertex API only. If the input text was truncated due to having\n a length longer than the allowed maximum input.\n */\n truncated?: boolean;\n /** Vertex API only. Number of tokens of the input text.\n */\n tokenCount?: number;\n}\n\n/** The embedding generated from an input content. */\nexport declare interface ContentEmbedding {\n /** A list of floats representing an embedding.\n */\n values?: number[];\n /** Vertex API only. Statistics of the input text associated with this\n embedding.\n */\n statistics?: ContentEmbeddingStatistics;\n}\n\n/** Request-level metadata for the Vertex Embed Content API. */\nexport declare interface EmbedContentMetadata {\n /** Vertex API only. The total number of billable characters included\n in the request.\n */\n billableCharacterCount?: number;\n}\n\n/** Response for the embed_content method. */\nexport class EmbedContentResponse {\n /** The embeddings for each request, in the same order as provided in\n the batch request.\n */\n embeddings?: ContentEmbedding[];\n /** Vertex API only. Metadata about the request.\n */\n metadata?: EmbedContentMetadata;\n}\n\n/** The config for generating an images. */\nexport declare interface GenerateImagesConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Cloud Storage URI used to store the generated images.\n */\n outputGcsUri?: string;\n /** Description of what to discourage in the generated images.\n */\n negativePrompt?: string;\n /** Number of images to generate.\n */\n numberOfImages?: number;\n /** Aspect ratio of the generated images.\n */\n aspectRatio?: string;\n /** Controls how much the model adheres to the text prompt. Large\n values increase output and prompt alignment, but may compromise image\n quality.\n */\n guidanceScale?: number;\n /** Random seed for image generation. This is not available when\n ``add_watermark`` is set to true.\n */\n seed?: number;\n /** Filter level for safety filtering.\n */\n safetyFilterLevel?: SafetyFilterLevel;\n /** Allows generation of people by the model.\n */\n personGeneration?: PersonGeneration;\n /** Whether to report the safety scores of each generated image and\n the positive prompt in the response.\n */\n includeSafetyAttributes?: boolean;\n /** Whether to include the Responsible AI filter reason if the image\n is filtered out of the response.\n */\n includeRaiReason?: boolean;\n /** Language of the text in the prompt.\n */\n language?: ImagePromptLanguage;\n /** MIME type of the generated image.\n */\n outputMimeType?: string;\n /** Compression quality of the generated image (for ``image/jpeg``\n only).\n */\n outputCompressionQuality?: number;\n /** Whether to add a watermark to the generated images.\n */\n addWatermark?: boolean;\n /** Whether to use the prompt rewriting logic.\n */\n enhancePrompt?: boolean;\n}\n\n/** The parameters for generating images. */\nexport declare interface GenerateImagesParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** Text prompt that typically describes the images to output.\n */\n prompt: string;\n /** Configuration for generating images.\n */\n config?: GenerateImagesConfig;\n}\n\n/** An image. */\nexport declare interface Image {\n /** The Cloud Storage URI of the image. ``Image`` can contain a value\n for this field or the ``image_bytes`` field but not both.\n */\n gcsUri?: string;\n /** The image bytes data. ``Image`` can contain a value for this field\n or the ``gcs_uri`` field but not both.\n */\n imageBytes?: string;\n /** The MIME type of the image. */\n mimeType?: string;\n}\n\n/** Safety attributes of a GeneratedImage or the user-provided prompt. */\nexport declare interface SafetyAttributes {\n /** List of RAI categories.\n */\n categories?: string[];\n /** List of scores of each categories.\n */\n scores?: number[];\n /** Internal use only.\n */\n contentType?: string;\n}\n\n/** An output image. */\nexport declare interface GeneratedImage {\n /** The output image data.\n */\n image?: Image;\n /** Responsible AI filter reason if the image is filtered out of the\n response.\n */\n raiFilteredReason?: string;\n /** Safety attributes of the image. Lists of RAI categories and their\n scores of each content.\n */\n safetyAttributes?: SafetyAttributes;\n /** The rewritten prompt used for the image generation if the prompt\n enhancer is enabled.\n */\n enhancedPrompt?: string;\n}\n\n/** The output images response. */\nexport class GenerateImagesResponse {\n /** List of generated images.\n */\n generatedImages?: GeneratedImage[];\n /** Safety attributes of the positive prompt. Only populated if\n ``include_safety_attributes`` is set to True.\n */\n positivePromptSafetyAttributes?: SafetyAttributes;\n}\n\n/** Generation config. */\nexport declare interface GenerationConfig {\n /** Optional. If enabled, audio timestamp will be included in the request to the model. */\n audioTimestamp?: boolean;\n /** Optional. Number of candidates to generate. */\n candidateCount?: number;\n /** Optional. Frequency penalties. */\n frequencyPenalty?: number;\n /** Optional. Logit probabilities. */\n logprobs?: number;\n /** Optional. The maximum number of output tokens to generate per message. */\n maxOutputTokens?: number;\n /** Optional. Positive penalties. */\n presencePenalty?: number;\n /** Optional. If true, export the logprobs results in response. */\n responseLogprobs?: boolean;\n /** Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. */\n responseMimeType?: string;\n /** Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response. */\n responseSchema?: Schema;\n /** Optional. Routing configuration. */\n routingConfig?: GenerationConfigRoutingConfig;\n /** Optional. Seed. */\n seed?: number;\n /** Optional. Stop sequences. */\n stopSequences?: string[];\n /** Optional. Controls the randomness of predictions. */\n temperature?: number;\n /** Optional. If specified, top-k sampling will be used. */\n topK?: number;\n /** Optional. If specified, nucleus sampling will be used. */\n topP?: number;\n}\n\n/** Config for the count_tokens method. */\nexport declare interface CountTokensConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Instructions for the model to steer it toward better performance.\n */\n systemInstruction?: ContentUnion;\n /** Code that enables the system to interact with external systems to\n perform an action outside of the knowledge and scope of the model.\n */\n tools?: Tool[];\n /** Configuration that the model uses to generate the response. Not\n supported by the Gemini Developer API.\n */\n generationConfig?: GenerationConfig;\n}\n\n/** Parameters for counting tokens. */\nexport declare interface CountTokensParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** Input content. */\n contents: ContentListUnion;\n /** Configuration for counting tokens. */\n config?: CountTokensConfig;\n}\n\n/** Response for counting tokens. */\nexport class CountTokensResponse {\n /** Total number of tokens. */\n totalTokens?: number;\n /** Number of tokens in the cached part of the prompt (the cached content). */\n cachedContentTokenCount?: number;\n}\n\n/** Optional parameters for computing tokens. */\nexport declare interface ComputeTokensConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Parameters for computing tokens. */\nexport declare interface ComputeTokensParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** Input content. */\n contents: ContentListUnion;\n /** Optional parameters for the request.\n */\n config?: ComputeTokensConfig;\n}\n\n/** Tokens info with a list of tokens and the corresponding list of token ids. */\nexport declare interface TokensInfo {\n /** Optional. Optional fields for the role from the corresponding Content. */\n role?: string;\n /** A list of token ids from the input. */\n tokenIds?: string[];\n /** A list of tokens from the input. */\n tokens?: string[];\n}\n\n/** Response for computing tokens. */\nexport class ComputeTokensResponse {\n /** Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances. */\n tokensInfo?: TokensInfo[];\n}\n\n/** Configuration for generating videos. */\nexport declare interface GenerateVideosConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Number of output videos. */\n numberOfVideos?: number;\n /** The gcs bucket where to save the generated videos. */\n outputGcsUri?: string;\n /** Frames per second for video generation. */\n fps?: number;\n /** Duration of the clip for video generation in seconds. */\n durationSeconds?: number;\n /** The RNG seed. If RNG seed is exactly same for each request with unchanged inputs, the prediction results will be consistent. Otherwise, a random RNG seed will be used each time to produce a different result. */\n seed?: number;\n /** The aspect ratio for the generated video. 16:9 (landscape) and 9:16 (portrait) are supported. */\n aspectRatio?: string;\n /** The resolution for the generated video. 1280x720, 1920x1080 are supported. */\n resolution?: string;\n /** Whether allow to generate person videos, and restrict to specific ages. Supported values are: dont_allow, allow_adult. */\n personGeneration?: string;\n /** The pubsub topic where to publish the video generation progress. */\n pubsubTopic?: string;\n /** Optional field in addition to the text content. Negative prompts can be explicitly stated here to help generate the video. */\n negativePrompt?: string;\n /** Whether to use the prompt rewriting logic. */\n enhancePrompt?: boolean;\n}\n\n/** Class that represents the parameters for generating an image. */\nexport declare interface GenerateVideosParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** The text prompt for generating the videos. Optional for image to video use cases. */\n prompt?: string;\n /** The input image for generating the videos.\n Optional if prompt is provided. */\n image?: Image;\n /** Configuration for generating videos. */\n config?: GenerateVideosConfig;\n}\n\n/** A generated video. */\nexport declare interface Video {\n /** Path to another storage. */\n uri?: string;\n /** Video bytes. */\n videoBytes?: string;\n /** Video encoding, for example \"video/mp4\". */\n mimeType?: string;\n}\n\n/** A generated video. */\nexport declare interface GeneratedVideo {\n /** The output video */\n video?: Video;\n}\n\n/** Response with generated videos. */\nexport class GenerateVideosResponse {\n /** List of the generated videos */\n generatedVideos?: GeneratedVideo[];\n /** Returns if any videos were filtered due to RAI policies. */\n raiMediaFilteredCount?: number;\n /** Returns rai failure reasons if any. */\n raiMediaFilteredReasons?: string[];\n}\n\n/** A video generation operation. */\nexport declare interface GenerateVideosOperation {\n /** The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`. */\n name?: string;\n /** Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any. */\n metadata?: Record;\n /** If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. */\n done?: boolean;\n /** The error result of the operation in case of failure or cancellation. */\n error?: Record;\n /** The normal response of the operation in case of success. */\n response?: Record;\n /** The generated videos. */\n result?: GenerateVideosResponse;\n}\n\n/** Optional configuration for cached content creation. */\nexport declare interface CreateCachedContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: \"3.5s\". */\n ttl?: string;\n /** Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z. */\n expireTime?: string;\n /** The user-generated meaningful display name of the cached content.\n */\n displayName?: string;\n /** The content to cache.\n */\n contents?: ContentListUnion;\n /** Developer set system instruction.\n */\n systemInstruction?: ContentUnion;\n /** A list of `Tools` the model may use to generate the next response.\n */\n tools?: Tool[];\n /** Configuration for the tools to use. This config is shared for all tools.\n */\n toolConfig?: ToolConfig;\n}\n\n/** Parameters for caches.create method. */\nexport declare interface CreateCachedContentParameters {\n /** ID of the model to use. Example: gemini-1.5-flash */\n model: string;\n /** Configuration that contains optional parameters.\n */\n config?: CreateCachedContentConfig;\n}\n\n/** Metadata on the usage of the cached content. */\nexport declare interface CachedContentUsageMetadata {\n /** Duration of audio in seconds. */\n audioDurationSeconds?: number;\n /** Number of images. */\n imageCount?: number;\n /** Number of text characters. */\n textCount?: number;\n /** Total number of tokens that the cached content consumes. */\n totalTokenCount?: number;\n /** Duration of video in seconds. */\n videoDurationSeconds?: number;\n}\n\n/** A resource used in LLM queries for users to explicitly specify what to cache. */\nexport declare interface CachedContent {\n /** The server-generated resource name of the cached content. */\n name?: string;\n /** The user-generated meaningful display name of the cached content. */\n displayName?: string;\n /** The name of the publisher model to use for cached content. */\n model?: string;\n /** Creation time of the cache entry. */\n createTime?: string;\n /** When the cache entry was last updated in UTC time. */\n updateTime?: string;\n /** Expiration time of the cached content. */\n expireTime?: string;\n /** Metadata on the usage of the cached content. */\n usageMetadata?: CachedContentUsageMetadata;\n}\n\n/** Optional parameters for caches.get method. */\nexport declare interface GetCachedContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Parameters for caches.get method. */\nexport declare interface GetCachedContentParameters {\n /** The server-generated resource name of the cached content.\n */\n name: string;\n /** Optional parameters for the request.\n */\n config?: GetCachedContentConfig;\n}\n\n/** Optional parameters for caches.delete method. */\nexport declare interface DeleteCachedContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Parameters for caches.delete method. */\nexport declare interface DeleteCachedContentParameters {\n /** The server-generated resource name of the cached content.\n */\n name: string;\n /** Optional parameters for the request.\n */\n config?: DeleteCachedContentConfig;\n}\n\n/** Empty response for caches.delete method. */\nexport class DeleteCachedContentResponse {}\n\n/** Optional parameters for caches.update method. */\nexport declare interface UpdateCachedContentConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: \"3.5s\". */\n ttl?: string;\n /** Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z. */\n expireTime?: string;\n}\n\nexport declare interface UpdateCachedContentParameters {\n /** The server-generated resource name of the cached content.\n */\n name: string;\n /** Configuration that contains optional parameters.\n */\n config?: UpdateCachedContentConfig;\n}\n\n/** Config for caches.list method. */\nexport declare interface ListCachedContentsConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n pageSize?: number;\n pageToken?: string;\n}\n\n/** Parameters for caches.list method. */\nexport declare interface ListCachedContentsParameters {\n /** Configuration that contains optional parameters.\n */\n config?: ListCachedContentsConfig;\n}\n\nexport class ListCachedContentsResponse {\n nextPageToken?: string;\n /** List of cached contents.\n */\n cachedContents?: CachedContent[];\n}\n\n/** Used to override the default configuration. */\nexport declare interface ListFilesConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n pageSize?: number;\n pageToken?: string;\n}\n\n/** Generates the parameters for the list method. */\nexport declare interface ListFilesParameters {\n /** Used to override the default configuration. */\n config?: ListFilesConfig;\n}\n\n/** Status of a File that uses a common error model. */\nexport declare interface FileStatus {\n /** A list of messages that carry the error details. There is a common set of message types for APIs to use. */\n details?: Record[];\n /** A list of messages that carry the error details. There is a common set of message types for APIs to use. */\n message?: string;\n /** The status code. 0 for OK, 1 for CANCELLED */\n code?: number;\n}\n\n/** A file uploaded to the API. */\nexport declare interface File {\n /** The `File` resource name. The ID (name excluding the \"files/\" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456` */\n name?: string;\n /** Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image' */\n displayName?: string;\n /** Output only. MIME type of the file. */\n mimeType?: string;\n /** Output only. Size of the file in bytes. */\n sizeBytes?: string;\n /** Output only. The timestamp of when the `File` was created. */\n createTime?: string;\n /** Output only. The timestamp of when the `File` will be deleted. Only set if the `File` is scheduled to expire. */\n expirationTime?: string;\n /** Output only. The timestamp of when the `File` was last updated. */\n updateTime?: string;\n /** Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded in base64 format. */\n sha256Hash?: string;\n /** Output only. The URI of the `File`. */\n uri?: string;\n /** Output only. The URI of the `File`, only set for downloadable (generated) files. */\n downloadUri?: string;\n /** Output only. Processing state of the File. */\n state?: FileState;\n /** Output only. The source of the `File`. */\n source?: FileSource;\n /** Output only. Metadata for a video. */\n videoMetadata?: Record;\n /** Output only. Error status if File processing failed. */\n error?: FileStatus;\n}\n\n/** Response for the list files method. */\nexport class ListFilesResponse {\n /** A token to retrieve next page of results. */\n nextPageToken?: string;\n /** The list of files. */\n files?: File[];\n}\n\n/** Used to override the default configuration. */\nexport declare interface CreateFileConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Generates the parameters for the private _create method. */\nexport declare interface CreateFileParameters {\n /** The file to be uploaded.\n mime_type: (Required) The MIME type of the file. Must be provided.\n name: (Optional) The name of the file in the destination (e.g.\n 'files/sample-image').\n display_name: (Optional) The display name of the file.\n */\n file: File;\n /** Used to override the default configuration. */\n config?: CreateFileConfig;\n}\n\n/** A wrapper class for the http response. */\nexport class HttpResponse {\n /** Used to retain the processed HTTP headers in the response. */\n headers?: Record;\n /**\n * The original http response.\n */\n responseInternal: Response;\n\n constructor(response: Response) {\n // Process the headers.\n const headers: Record = {};\n for (const pair of response.headers.entries()) {\n headers[pair[0]] = pair[1];\n }\n this.headers = headers;\n\n // Keep the original response.\n this.responseInternal = response;\n }\n\n json(): Promise {\n return this.responseInternal.json();\n }\n}\n\n/** Callbacks for the live API. */\nexport interface LiveCallbacks {\n onopen?: (() => void) | null;\n onmessage: (e: LiveServerMessage) => void;\n onerror?: ((e: ErrorEvent) => void) | null;\n onclose?: ((e: CloseEvent) => void) | null;\n}\n/** Response for the create file method. */\nexport class CreateFileResponse {\n /** Used to retain the full HTTP response. */\n sdkHttpResponse?: HttpResponse;\n}\n\n/** Used to override the default configuration. */\nexport declare interface GetFileConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Generates the parameters for the get method. */\nexport declare interface GetFileParameters {\n /** The name identifier for the file to retrieve. */\n name: string;\n /** Used to override the default configuration. */\n config?: GetFileConfig;\n}\n\n/** Used to override the default configuration. */\nexport declare interface DeleteFileConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Generates the parameters for the get method. */\nexport declare interface DeleteFileParameters {\n /** The name identifier for the file to be deleted. */\n name: string;\n /** Used to override the default configuration. */\n config?: DeleteFileConfig;\n}\n\n/** Response for the delete file method. */\nexport class DeleteFileResponse {}\n\nexport declare interface GetOperationConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Parameters for the GET method. */\nexport declare interface GetOperationParameters {\n /** The server-assigned name for the operation. */\n operationName: string;\n /** Used to override the default configuration. */\n config?: GetOperationConfig;\n}\n\nexport declare interface FetchPredictOperationConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Parameters for the fetchPredictOperation method. */\nexport declare interface FetchPredictOperationParameters {\n /** The server-assigned name for the operation. */\n operationName: string;\n resourceName: string;\n /** Used to override the default configuration. */\n config?: FetchPredictOperationConfig;\n}\n\nexport declare interface TestTableItem {\n /** The name of the test. This is used to derive the replay id. */\n name?: string;\n /** The parameters to the test. Use pydantic models. */\n parameters?: Record;\n /** Expects an exception for MLDev matching the string. */\n exceptionIfMldev?: string;\n /** Expects an exception for Vertex matching the string. */\n exceptionIfVertex?: string;\n /** Use if you don't want to use the default replay id which is derived from the test name. */\n overrideReplayId?: string;\n /** True if the parameters contain an unsupported union type. This test will be skipped for languages that do not support the union type. */\n hasUnion?: boolean;\n /** When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource. */\n skipInApiMode?: string;\n /** Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic. */\n ignoreKeys?: string[];\n}\n\nexport declare interface TestTableFile {\n comment?: string;\n testMethod?: string;\n parameterNames?: string[];\n testTable?: TestTableItem[];\n}\n\n/** Represents a single request in a replay. */\nexport declare interface ReplayRequest {\n method?: string;\n url?: string;\n headers?: Record;\n bodySegments?: Record[];\n}\n\n/** Represents a single response in a replay. */\nexport class ReplayResponse {\n statusCode?: number;\n headers?: Record;\n bodySegments?: Record[];\n sdkResponseSegments?: Record[];\n}\n\n/** Represents a single interaction, request and response in a replay. */\nexport declare interface ReplayInteraction {\n request?: ReplayRequest;\n response?: ReplayResponse;\n}\n\n/** Represents a recorded session. */\nexport declare interface ReplayFile {\n replayId?: string;\n interactions?: ReplayInteraction[];\n}\n\n/** Used to override the default configuration. */\nexport declare interface UploadFileConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** The name of the file in the destination (e.g., 'files/sample-image'. If not provided one will be generated. */\n name?: string;\n /** mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension. */\n mimeType?: string;\n /** Optional display name of the file. */\n displayName?: string;\n}\n\n/** Used to override the default configuration. */\nexport declare interface DownloadFileConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n}\n\n/** Configuration for upscaling an image.\n\n For more information on this configuration, refer to\n the `Imagen API reference documentation\n `_.\n */\nexport declare interface UpscaleImageConfig {\n /** Used to override HTTP request options. */\n httpOptions?: HttpOptions;\n /** Whether to include a reason for filtered-out images in the\n response. */\n includeRaiReason?: boolean;\n /** The image format that the output should be saved as. */\n outputMimeType?: string;\n /** The level of compression if the ``output_mime_type`` is\n ``image/jpeg``. */\n outputCompressionQuality?: number;\n}\n\n/** User-facing config UpscaleImageParameters. */\nexport declare interface UpscaleImageParameters {\n /** The model to use. */\n model: string;\n /** The input image to upscale. */\n image: Image;\n /** The factor to upscale the image (x2 or x4). */\n upscaleFactor: string;\n /** Configuration for upscaling. */\n config?: UpscaleImageConfig;\n}\n\n/** A raw reference image.\n\n A raw reference image represents the base image to edit, provided by the user.\n It can optionally be provided in addition to a mask reference image or\n a style reference image.\n */\nexport declare interface RawReferenceImage {\n /** The reference image for the editing operation. */\n referenceImage?: Image;\n /** The id of the reference image. */\n referenceId?: number;\n /** The type of the reference image. Only set by the SDK. */\n referenceType?: string;\n}\n\n/** Configuration for a Mask reference image. */\nexport declare interface MaskReferenceConfig {\n /** Prompts the model to generate a mask instead of you needing to\n provide one (unless MASK_MODE_USER_PROVIDED is used). */\n maskMode?: MaskReferenceMode;\n /** A list of up to 5 class ids to use for semantic segmentation.\n Automatically creates an image mask based on specific objects. */\n segmentationClasses?: number[];\n /** Dilation percentage of the mask provided.\n Float between 0 and 1. */\n maskDilation?: number;\n}\n\n/** A mask reference image.\n\n This encapsulates either a mask image provided by the user and configs for\n the user provided mask, or only config parameters for the model to generate\n a mask.\n\n A mask image is an image whose non-zero values indicate where to edit the base\n image. If the user provides a mask image, the mask must be in the same\n dimensions as the raw image.\n */\nexport declare interface MaskReferenceImage {\n /** The reference image for the editing operation. */\n referenceImage?: Image;\n /** The id of the reference image. */\n referenceId?: number;\n /** The type of the reference image. Only set by the SDK. */\n referenceType?: string;\n /** Configuration for the mask reference image. */\n config?: MaskReferenceConfig;\n}\n\n/** Configuration for a Control reference image. */\nexport declare interface ControlReferenceConfig {\n /** The type of control reference image to use. */\n controlType?: ControlReferenceType;\n /** Defaults to False. When set to True, the control image will be\n computed by the model based on the control type. When set to False,\n the control image must be provided by the user. */\n enableControlImageComputation?: boolean;\n}\n\n/** A control reference image.\n\n The image of the control reference image is either a control image provided\n by the user, or a regular image which the backend will use to generate a\n control image of. In the case of the latter, the\n enable_control_image_computation field in the config should be set to True.\n\n A control image is an image that represents a sketch image of areas for the\n model to fill in based on the prompt.\n */\nexport declare interface ControlReferenceImage {\n /** The reference image for the editing operation. */\n referenceImage?: Image;\n /** The id of the reference image. */\n referenceId?: number;\n /** The type of the reference image. Only set by the SDK. */\n referenceType?: string;\n /** Configuration for the control reference image. */\n config?: ControlReferenceConfig;\n}\n\n/** Configuration for a Style reference image. */\nexport declare interface StyleReferenceConfig {\n /** A text description of the style to use for the generated image. */\n styleDescription?: string;\n}\n\n/** A style reference image.\n\n This encapsulates a style reference image provided by the user, and\n additionally optional config parameters for the style reference image.\n\n A raw reference image can also be provided as a destination for the style to\n be applied to.\n */\nexport declare interface StyleReferenceImage {\n /** The reference image for the editing operation. */\n referenceImage?: Image;\n /** The id of the reference image. */\n referenceId?: number;\n /** The type of the reference image. Only set by the SDK. */\n referenceType?: string;\n /** Configuration for the style reference image. */\n config?: StyleReferenceConfig;\n}\n\n/** Configuration for a Subject reference image. */\nexport declare interface SubjectReferenceConfig {\n /** The subject type of a subject reference image. */\n subjectType?: SubjectReferenceType;\n /** Subject description for the image. */\n subjectDescription?: string;\n}\n\n/** A subject reference image.\n\n This encapsulates a subject reference image provided by the user, and\n additionally optional config parameters for the subject reference image.\n\n A raw reference image can also be provided as a destination for the subject to\n be applied to.\n */\nexport declare interface SubjectReferenceImage {\n /** The reference image for the editing operation. */\n referenceImage?: Image;\n /** The id of the reference image. */\n referenceId?: number;\n /** The type of the reference image. Only set by the SDK. */\n referenceType?: string;\n /** Configuration for the subject reference image. */\n config?: SubjectReferenceConfig;\n}\n\n/** Sent in response to a `LiveGenerateContentSetup` message from the client. */\nexport declare interface LiveServerSetupComplete {}\n\n/** Incremental server update generated by the model in response to client messages.\n\n Content is generated as quickly as possible, and not in real time. Clients\n may choose to buffer and play it out in real time.\n */\nexport declare interface LiveServerContent {\n /** The content that the model has generated as part of the current conversation with the user. */\n modelTurn?: Content;\n /** If true, indicates that the model is done generating. Generation will only start in response to additional client messages. Can be set alongside `content`, indicating that the `content` is the last in the turn. */\n turnComplete?: boolean;\n /** If true, indicates that a client message has interrupted current model generation. If the client is playing out the content in realtime, this is a good signal to stop and empty the current queue. */\n interrupted?: boolean;\n}\n\n/** Request for the client to execute the `function_calls` and return the responses with the matching `id`s. */\nexport declare interface LiveServerToolCall {\n /** The function call to be executed. */\n functionCalls?: FunctionCall[];\n}\n\n/** Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled.\n\n If there were side-effects to those tool calls, clients may attempt to undo\n the tool calls. This message occurs only in cases where the clients interrupt\n server turns.\n */\nexport declare interface LiveServerToolCallCancellation {\n /** The ids of the tool calls to be cancelled. */\n ids?: string[];\n}\n\n/** Response message for API call. */\nexport declare interface LiveServerMessage {\n /** Sent in response to a `LiveClientSetup` message from the client. */\n setupComplete?: LiveServerSetupComplete;\n /** Content generated by the model in response to client messages. */\n serverContent?: LiveServerContent;\n /** Request for the client to execute the `function_calls` and return the responses with the matching `id`s. */\n toolCall?: LiveServerToolCall;\n /** Notification for the client that a previously issued `ToolCallMessage` with the specified `id`s should have been not executed and should be cancelled. */\n toolCallCancellation?: LiveServerToolCallCancellation;\n}\n\n/** Message contains configuration that will apply for the duration of the streaming session. */\nexport declare interface LiveClientSetup {\n /** \n The fully qualified name of the publisher model or tuned model endpoint to\n use.\n */\n model?: string;\n /** The generation configuration for the session.\n\nThe following fields are supported:\n- `response_logprobs`\n- `response_mime_type`\n- `logprobs`\n- `response_schema`\n- `stop_sequence`\n- `routing_config`\n- `audio_timestamp`\n */\n generationConfig?: GenerationConfig;\n /** The user provided system instructions for the model.\n Note: only text should be used in parts and content in each part will be\n in a separate paragraph. */\n systemInstruction?: Content;\n /** A list of `Tools` the model may use to generate the next response.\n\n A `Tool` is a piece of code that enables the system to interact with\n external systems to perform an action, or set of actions, outside of\n knowledge and scope of the model. */\n tools?: ToolListUnion;\n}\n\n/** Incremental update of the current conversation delivered from the client.\n\n All the content here will unconditionally be appended to the conversation\n history and used as part of the prompt to the model to generate content.\n\n A message here will interrupt any current model generation.\n */\nexport declare interface LiveClientContent {\n /** The content appended to the current conversation with the model.\n\n For single-turn queries, this is a single instance. For multi-turn\n queries, this is a repeated field that contains conversation history and\n latest request.\n */\n turns?: Content[];\n /** If true, indicates that the server content generation should start with\n the currently accumulated prompt. Otherwise, the server will await\n additional messages before starting generation. */\n turnComplete?: boolean;\n}\n\n/** User input that is sent in real time.\n\n This is different from `ClientContentUpdate` in a few ways:\n\n - Can be sent continuously without interruption to model generation.\n - If there is a need to mix data interleaved across the\n `ClientContentUpdate` and the `RealtimeUpdate`, server attempts to\n optimize for best response, but there are no guarantees.\n - End of turn is not explicitly specified, but is rather derived from user\n activity (for example, end of speech).\n - Even before the end of turn, the data is processed incrementally\n to optimize for a fast start of the response from the model.\n - Is always assumed to be the user's input (cannot be used to populate\n conversation history).\n */\nexport declare interface LiveClientRealtimeInput {\n /** Inlined bytes data for media input. */\n mediaChunks?: Blob[];\n}\n\n/** Client generated response to a `ToolCall` received from the server.\n\n Individual `FunctionResponse` objects are matched to the respective\n `FunctionCall` objects by the `id` field.\n\n Note that in the unary and server-streaming GenerateContent APIs function\n calling happens by exchanging the `Content` parts, while in the bidi\n GenerateContent APIs function calling happens over this dedicated set of\n messages.\n */\nexport class LiveClientToolResponse {\n /** The response to the function calls. */\n functionResponses?: FunctionResponse[];\n}\n\n/** Messages sent by the client in the API call. */\nexport declare interface LiveClientMessage {\n /** Message to be sent by the system when connecting to the API. SDK users should not send this message. */\n setup?: LiveClientSetup;\n /** Incremental update of the current conversation delivered from the client. */\n clientContent?: LiveClientContent;\n /** User input that is sent in real time. */\n realtimeInput?: LiveClientRealtimeInput;\n /** Response to a `ToolCallMessage` received from the server. */\n toolResponse?: LiveClientToolResponse;\n}\n\n/** Session config for the API connection. */\nexport declare interface LiveConnectConfig {\n /** The generation configuration for the session. */\n generationConfig?: GenerationConfig;\n /** The requested modalities of the response. Represents the set of\n modalities that the model can return. Defaults to AUDIO if not specified.\n */\n responseModalities?: Modality[];\n /** The speech generation configuration.\n */\n speechConfig?: SpeechConfig;\n /** The user provided system instructions for the model.\n Note: only text should be used in parts and content in each part will be\n in a separate paragraph. */\n systemInstruction?: Content;\n /** A list of `Tools` the model may use to generate the next response.\n\n A `Tool` is a piece of code that enables the system to interact with\n external systems to perform an action, or set of actions, outside of\n knowledge and scope of the model. */\n tools?: ToolListUnion;\n}\n\n/** Parameters for connecting to the live API. */\nexport declare interface LiveConnectParameters {\n /** ID of the model to use. For a list of models, see `Google models\n `_. */\n model: string;\n /** callbacks */\n callbacks: LiveCallbacks;\n /** Optional configuration parameters for the request.\n */\n config?: LiveConnectConfig;\n}\n\n/** Parameters for initializing a new chat session.\n\n These parameters are used when creating a chat session with the\n `chats.create()` method.\n */\nexport declare interface CreateChatParameters {\n /** The name of the model to use for the chat session.\n\n For example: 'gemini-2.0-flash', 'gemini-1.5-pro', etc. See gemini API\n docs to find the available models.\n */\n model: string;\n /** Config for the entire chat session.\n\n This config applies to all requests within the session\n unless overridden by a per-request `config` in `SendMessageParameters`.\n */\n config?: GenerateContentConfig;\n /** The initial conversation history for the chat session.\n\n This allows you to start the chat with a pre-existing history. The history\n must be a list of `Content` alternating between 'user' and 'model' roles.\n It should start with a 'user' message.\n */\n history?: Content[];\n}\n\n/** Parameters for sending a message within a chat session.\n\n These parameters are used with the `chat.sendMessage()` method.\n */\nexport declare interface SendMessageParameters {\n /** The message to send to the model.\n\n The SDK will combine all parts into a single 'user' content to send to\n the model.\n */\n message: PartListUnion;\n /** Config for this specific request.\n\n Please note that the per-request config does not change the chat level\n config, nor inherit from it. If you intend to use some values from the\n chat's default config, you must explicitly copy them into this per-request\n config.\n */\n config?: GenerateContentConfig;\n}\n\n/** Parameters for sending client content to the live API. */\nexport declare interface LiveSendClientContentParameters {\n /** Client content to send to the session. */\n turns?: ContentListUnion;\n /** If true, indicates that the server content generation should start with\n the currently accumulated prompt. Otherwise, the server will await\n additional messages before starting generation. */\n turnComplete?: boolean;\n}\n\n/** Parameters for sending realtime input to the live API. */\nexport declare interface LiveSendRealtimeInputParameters {\n /** Realtime input to send to the session. */\n media: Blob;\n}\n\n/** Parameters for sending tool responses to the live API. */\nexport class LiveSendToolResponseParameters {\n /** Tool responses to send to the session. */\n functionResponses: FunctionResponse[] | FunctionResponse = [];\n}\n\n/** Parameters for the get method of the operations module. */\nexport declare interface OperationGetParameters {\n /** The operation to be retrieved. */\n operation: GenerateVideosOperation;\n /** Used to override the default configuration. */\n config?: GetOperationConfig;\n}\n\nexport type PartUnion = Part | string;\n\nexport type PartListUnion = PartUnion[] | PartUnion;\n\nexport type ContentUnion = Content | PartUnion[] | PartUnion;\n\nexport type ContentListUnion = ContentUnion[] | ContentUnion;\n\nexport type SchemaUnion = Schema;\n\nexport type SpeechConfigUnion = SpeechConfig | string;\n\nexport type ToolListUnion = Tool[];\n","/**\n * @license\n * Copyright 2025 Google LLC\n * SPDX-License-Identifier: Apache-2.0\n */\n\n// Code generated by the Google Gen AI SDK generator DO NOT EDIT.\n\nimport {ApiClient} from './_api_client';\nimport * as common from './_common';\nimport {BaseModule} from './_common';\nimport * as converters from './converters/_caches_converters';\nimport {PagedItem, Pager} from './pagers';\nimport * as types from './types';\n\nexport class Caches extends BaseModule {\n constructor(private readonly apiClient: ApiClient) {\n super();\n }\n\n /**\n * Lists cached content configurations.\n *\n * @param params - The parameters for the list request.\n * @return The paginated results of the list of cached contents.\n *\n * @example\n * ```ts\n * const cachedContents = await ai.caches.list({config: {'pageSize': 2}});\n * for (const cachedContent of cachedContents) {\n * console.log(cachedContent);\n * }\n * ```\n */\n list = async (\n params: types.ListCachedContentsParameters = {},\n ): Promise> => {\n return new Pager(\n PagedItem.PAGED_ITEM_CACHED_CONTENTS,\n (x: types.ListCachedContentsParameters) => this.listInternal(x),\n await this.listInternal(params),\n params,\n );\n };\n\n /**\n * Creates a cached contents resource.\n *\n * @remarks\n * Context caching is only supported for specific models. See [Gemini\n * Developer API reference] (https://ai.google.dev/gemini-api/docs/caching?lang=node/context-cac)\n * and [Vertex AI reference] (https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview#supported_models)\n * for more information.\n *\n * @param params - The parameters for the create request.\n * @return The created cached content.\n *\n * @example\n * ```ts\n * const contents = ...; // Initialize the content to cache.\n * const response = await ai.caches.create({\n * model: 'gemini-1.5-flash',\n * config: {\n * 'contents': contents,\n * 'displayName': 'test cache',\n * 'systemInstruction': 'What is the sum of the two pdfs?',\n * 'ttl': '86400s',\n * }\n * });\n * ```\n */\n async create(\n params: types.CreateCachedContentParameters,\n ): Promise {\n let response: Promise;\n let path: string = '';\n let queryParams: Record = {};\n if (this.apiClient.isVertexAI()) {\n const body = converters.createCachedContentParametersToVertex(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n 'cachedContents',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'POST',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromVertex(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n } else {\n const body = converters.createCachedContentParametersToMldev(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n 'cachedContents',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'POST',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromMldev(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n }\n }\n\n /**\n * Gets cached content configurations.\n *\n * @param params - The parameters for the get request.\n * @return The cached content.\n *\n * @example\n * ```ts\n * await ai.caches.get({name: 'gemini-1.5-flash'});\n * ```\n */\n async get(\n params: types.GetCachedContentParameters,\n ): Promise {\n let response: Promise;\n let path: string = '';\n let queryParams: Record = {};\n if (this.apiClient.isVertexAI()) {\n const body = converters.getCachedContentParametersToVertex(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'GET',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromVertex(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n } else {\n const body = converters.getCachedContentParametersToMldev(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'GET',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromMldev(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n }\n }\n\n /**\n * Deletes cached content.\n *\n * @param params - The parameters for the delete request.\n * @return The empty response returned by the API.\n *\n * @example\n * ```ts\n * await ai.caches.delete({name: 'gemini-1.5-flash'});\n * ```\n */\n async delete(\n params: types.DeleteCachedContentParameters,\n ): Promise {\n let response: Promise;\n let path: string = '';\n let queryParams: Record = {};\n if (this.apiClient.isVertexAI()) {\n const body = converters.deleteCachedContentParametersToVertex(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'DELETE',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then(() => {\n const resp = converters.deleteCachedContentResponseFromVertex();\n const typedResp = new types.DeleteCachedContentResponse();\n Object.assign(typedResp, resp);\n return typedResp;\n });\n } else {\n const body = converters.deleteCachedContentParametersToMldev(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'DELETE',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then(() => {\n const resp = converters.deleteCachedContentResponseFromMldev();\n const typedResp = new types.DeleteCachedContentResponse();\n Object.assign(typedResp, resp);\n return typedResp;\n });\n }\n }\n\n /**\n * Updates cached content configurations.\n *\n * @param params - The parameters for the update request.\n * @return The updated cached content.\n *\n * @example\n * ```ts\n * const response = await ai.caches.update({\n * name: 'gemini-1.5-flash',\n * config: {'ttl': '7600s'}\n * });\n * ```\n */\n async update(\n params: types.UpdateCachedContentParameters,\n ): Promise {\n let response: Promise;\n let path: string = '';\n let queryParams: Record = {};\n if (this.apiClient.isVertexAI()) {\n const body = converters.updateCachedContentParametersToVertex(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'PATCH',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromVertex(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n } else {\n const body = converters.updateCachedContentParametersToMldev(\n this.apiClient,\n params,\n );\n path = common.formatMap(\n '{name}',\n body['_url'] as Record,\n );\n queryParams = body['_query'] as Record;\n delete body['config'];\n delete body['_url'];\n delete body['_query'];\n\n response = this.apiClient\n .request({\n path: path,\n queryParams: queryParams,\n body: JSON.stringify(body),\n httpMethod: 'PATCH',\n httpOptions: params.config?.httpOptions,\n })\n .then((httpResponse) => {\n return httpResponse.json();\n }) as Promise;\n\n return response.then((apiResponse) => {\n const resp = converters.cachedContentFromMldev(\n this.apiClient,\n apiResponse,\n );\n\n return resp as types.CachedContent;\n });\n }\n }\n\n private async listInternal(\n params: types.ListCachedContentsParameters,\n ): Promise {\n let response: Promise