From b70150fb84c92488da0c16f1737e4226d076abea Mon Sep 17 00:00:00 2001 From: Ben Batha Date: Fri, 3 Oct 2025 17:17:48 -0400 Subject: [PATCH 1/4] Include images From 37bf67af6097a6396e8f96a64d9224312355ff0f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:18:44 +0000 Subject: [PATCH 2/4] feat(api): Images generations --- src/gradient/resources/images/generations.py | 690 +++++++++++++++++++ 1 file changed, 690 insertions(+) diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py index 8a5cfdb0..994b9d6e 100644 --- a/src/gradient/resources/images/generations.py +++ b/src/gradient/resources/images/generations.py @@ -1,3 +1,4 @@ +<<<<<<< HEAD # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations @@ -704,3 +705,692 @@ def __init__(self, generations: AsyncGenerationsResource) -> None: self.create = async_to_streamed_response_wrapper( generations.create, ) +||||||| parent of 6327cdf (feat(api): Images generations) +======= +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, overload + +import httpx + +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._streaming import Stream, AsyncStream +from ..._base_client import make_request_options +from ...types.images import generation_create_params +from ...types.shared.image_gen_stream_event import ImageGenStreamEvent +from ...types.images.generation_create_response import GenerationCreateResponse + +__all__ = ["GenerationsResource", "AsyncGenerationsResource"] + + +class GenerationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> GenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return GenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return GenerationsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + return self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=Stream[ImageGenStreamEvent], + ) + + +class AsyncGenerationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncGenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncGenerationsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + return await self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=await async_maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageGenStreamEvent], + ) + + +class GenerationsResourceWithRawResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_raw_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithRawResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_raw_response_wrapper( + generations.create, + ) + + +class GenerationsResourceWithStreamingResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_streamed_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithStreamingResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_streamed_response_wrapper( + generations.create, + ) +>>>>>>> 6327cdf (feat(api): Images generations) From ccde5834b38644bb5f523a7edbbec543bbbd3237 Mon Sep 17 00:00:00 2001 From: bkannappan Date: Mon, 6 Oct 2025 15:25:42 -0500 Subject: [PATCH 3/4] fix next --- src/gradient/resources/images/generations.py | 690 ------------------- 1 file changed, 690 deletions(-) diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py index 994b9d6e..8a5cfdb0 100644 --- a/src/gradient/resources/images/generations.py +++ b/src/gradient/resources/images/generations.py @@ -1,4 +1,3 @@ -<<<<<<< HEAD # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations @@ -705,692 +704,3 @@ def __init__(self, generations: AsyncGenerationsResource) -> None: self.create = async_to_streamed_response_wrapper( generations.create, ) -||||||| parent of 6327cdf (feat(api): Images generations) -======= -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, overload - -import httpx - -from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._streaming import Stream, AsyncStream -from ..._base_client import make_request_options -from ...types.images import generation_create_params -from ...types.shared.image_gen_stream_event import ImageGenStreamEvent -from ...types.images.generation_create_response import GenerationCreateResponse - -__all__ = ["GenerationsResource", "AsyncGenerationsResource"] - - -class GenerationsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> GenerationsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return GenerationsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return GenerationsResourceWithStreamingResponse(self) - - @overload - def create( - self, - *, - prompt: str, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - stream: Optional[Literal[False]] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - prompt: str, - stream: Literal[True], - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> Stream[ImageGenStreamEvent]: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - prompt: str, - stream: bool, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["prompt"], ["prompt", "stream"]) - def create( - self, - *, - prompt: str, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - stream: Optional[Literal[False]] | Literal[True] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: - return self._post( - "/images/generations" - if self._client._base_url_overridden - else f"{self._client.inference_endpoint}/v1/images/generations", - body=maybe_transform( - { - "prompt": prompt, - "background": background, - "model": model, - "moderation": moderation, - "n": n, - "output_compression": output_compression, - "output_format": output_format, - "partial_images": partial_images, - "quality": quality, - "size": size, - "stream": stream, - "user": user, - }, - generation_create_params.GenerationCreateParamsStreaming - if stream - else generation_create_params.GenerationCreateParamsNonStreaming, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=GenerationCreateResponse, - stream=stream or False, - stream_cls=Stream[ImageGenStreamEvent], - ) - - -class AsyncGenerationsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return AsyncGenerationsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return AsyncGenerationsResourceWithStreamingResponse(self) - - @overload - async def create( - self, - *, - prompt: str, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - stream: Optional[Literal[False]] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - prompt: str, - stream: Literal[True], - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> AsyncStream[ImageGenStreamEvent]: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - prompt: str, - stream: bool, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: - """ - Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest - image generation model with automatic prompt optimization and enhanced visual - capabilities. - - Args: - prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 - characters and provides automatic prompt optimization for best results. - - stream: If set to true, partial image data will be streamed as the image is being - generated. When streaming, the response will be sent as server-sent events with - partial image chunks. When stream is true, partial_images must be greater - than 0. - - background: - The background setting for the image generation. GPT-IMAGE-1 supports: - transparent, opaque, auto. - - model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering - the best quality with automatic optimization and enhanced capabilities. - - moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, - auto. - - n: The number of images to generate. GPT-IMAGE-1 only supports n=1. - - output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. - - output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, - jpeg. - - partial_images: The number of partial image chunks to return during streaming generation. This - parameter is optional with a default of 0. When stream=true, this must be - greater than 0 to receive progressive updates of the image as it's being - generated. Higher values provide more frequent updates but may increase response - overhead. - - quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto - (automatically select best quality), high, medium, low. - - size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically - select best size), 1536x1024 (landscape), 1024x1536 (portrait). - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["prompt"], ["prompt", "stream"]) - async def create( - self, - *, - prompt: str, - background: Optional[str] | Omit = omit, - model: str | Omit = omit, - moderation: Optional[str] | Omit = omit, - n: Optional[int] | Omit = omit, - output_compression: Optional[int] | Omit = omit, - output_format: Optional[str] | Omit = omit, - partial_images: Optional[int] | Omit = omit, - quality: Optional[str] | Omit = omit, - size: Optional[str] | Omit = omit, - stream: Optional[Literal[False]] | Literal[True] | Omit = omit, - user: Optional[str] | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: - return await self._post( - "/images/generations" - if self._client._base_url_overridden - else f"{self._client.inference_endpoint}/v1/images/generations", - body=await async_maybe_transform( - { - "prompt": prompt, - "background": background, - "model": model, - "moderation": moderation, - "n": n, - "output_compression": output_compression, - "output_format": output_format, - "partial_images": partial_images, - "quality": quality, - "size": size, - "stream": stream, - "user": user, - }, - generation_create_params.GenerationCreateParamsStreaming - if stream - else generation_create_params.GenerationCreateParamsNonStreaming, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=GenerationCreateResponse, - stream=stream or False, - stream_cls=AsyncStream[ImageGenStreamEvent], - ) - - -class GenerationsResourceWithRawResponse: - def __init__(self, generations: GenerationsResource) -> None: - self._generations = generations - - self.create = to_raw_response_wrapper( - generations.create, - ) - - -class AsyncGenerationsResourceWithRawResponse: - def __init__(self, generations: AsyncGenerationsResource) -> None: - self._generations = generations - - self.create = async_to_raw_response_wrapper( - generations.create, - ) - - -class GenerationsResourceWithStreamingResponse: - def __init__(self, generations: GenerationsResource) -> None: - self._generations = generations - - self.create = to_streamed_response_wrapper( - generations.create, - ) - - -class AsyncGenerationsResourceWithStreamingResponse: - def __init__(self, generations: AsyncGenerationsResource) -> None: - self._generations = generations - - self.create = async_to_streamed_response_wrapper( - generations.create, - ) ->>>>>>> 6327cdf (feat(api): Images generations) From 550910becec30feba33083b7dfccc35f26e32c34 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:10:09 +0000 Subject: [PATCH 4/4] release: 3.2.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e0dc5001..1f73031b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.1.0" + ".": "3.2.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 0fed586c..ed2eb2db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 3.2.0 (2025-10-06) + +Full Changelog: [v3.1.0...v3.2.0](https://github.com/digitalocean/gradient-python/compare/v3.1.0...v3.2.0) + +### Features + +* **api:** Images generations ([37bf67a](https://github.com/digitalocean/gradient-python/commit/37bf67af6097a6396e8f96a64d9224312355ff0f)) + ## 3.1.0 (2025-10-03) Full Changelog: [v3.0.2...v3.1.0](https://github.com/digitalocean/gradient-python/compare/v3.0.2...v3.1.0) diff --git a/pyproject.toml b/pyproject.toml index e2ea2e5c..dade45c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.1.0" +version = "3.2.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 69cb2fcb..6607400d 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.1.0" # x-release-please-version +__version__ = "3.2.0" # x-release-please-version