diff --git a/.gitignore b/.gitignore index 487b51b9..e7e1da61 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,67 @@ node_modules # IDE .vscode *.code-workspace + +# V2 +# ------------------------------------ +# Node +# ------------------------------------ +# node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +package-lock.json +yarn.lock +pnpm-lock.yaml + +# Optional: ignore local `.env` files +.env +.env.*local + +# ------------------------------------ +# Logs +# ------------------------------------ +logs/ +*.log +*.log.* +debug.log + +# ------------------------------------ +# OS / Editor files +# ------------------------------------ +# .DS_Store +Thumbs.db +.idea/ + +# VSCode — allow settings.json, ignore rest +# .vscode/* +# !.vscode/settings.json +# !.vscode/extensions.json + +# ------------------------------------ +# Mintlify / Build Stuff +# ------------------------------------ +.mintlify/ +.mintlify-cache/ +.out/ +dist/ +build/ + +# ------------------------------------ +# Temporary files +# ------------------------------------ +*.tmp +*.temp +*.swp +*.swo + +# ------------------------------------ +# TypeScript +# ------------------------------------ +*.tsbuildinfo + +# ------------------------------------ +# External docs (fetched at build time) +# ------------------------------------ +snippets/external/ diff --git a/README.md b/README.md index 3238db29..6d2b0d8b 100644 --- a/README.md +++ b/README.md @@ -12,5 +12,5 @@ npm i -g mintlify Run the following command at the root of your documentation (where mint.json is) ```bash -mintlify dev +mint dev ``` diff --git a/README_V2.md b/README_V2.md new file mode 100644 index 00000000..c6a61728 --- /dev/null +++ b/README_V2.md @@ -0,0 +1,165 @@ +V2 Docs are being ported in to this repo. + +I will work on a branch called docs-v2 and then merge into main when fully ready +and deprecate the old docs into a v1 on the new docs. + +Add to all pages: [SEO](https://www.mintlify.com/docs/optimize/seo) eg + +--- + +## "twitter:image": "/images/social-preview.jpg" + +## Search Keywords eg: + +## keywords: ['configuration', 'setup', 'getting started'] + +TODO: + +- Remove/Change Navbar in V2 (Global Setting) +- Add redirects (Global Setting) +- Add Analytics (Global Setting) +- Add Footer (Global Setting) +- Add SEO (Global Setting) +- Add Custom Domain (Global Setting) +- Add Custom 404 (Global Setting)? +- "description": + "![Rick Roll](https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExN2FteDJ4bno5MHU5Y3QxdGx3eWR2emdhejRhc2c1Y2d3ejY5ajlxMSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/Ju7l5y9osyymQ/giphy.gif) + \n Sorry About That." +- "description": + "![404 Robot](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/9b86454e-e7d0-46f5-8f77-fcfd2309c760/original=true,quality=90/F0DB1F6D051016659028C1570BD9F3F89FF00EC59E1A35319969E6DF05EEB4CF.jpeg)" + +Notes from stakeholders/feedback + +- “The gateways section should definitely include… technical documentation on + how to run and operate a gateway node because that’s missing.” +- + +Notes on layout + +- Consider moving resource and help anchors to right tabs on menu (styling). + Would prefer navbar buttons - but external links only there :/ + +- Consider having an Index & FAQ/Glossary page in each tab - Possibly use AI to + make it per page (llm intiially then n8n integration keeps it updated) + +About: + +- Protocol: Called Protocol Actors or Network Participants? Both? +- I am not convinced about the side bar sections. + +Removing: "v2/pages/01_about/livepeer-protocol/livepeer-actors/gateways", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/orchestrators", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/delegators", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/end-users" + +Community + +- move HUBS to appropriate tabs +- Hate the naming of all connect items. + +Developer + +Gateways + +#### Direct Usage & Platform Integration + +| Category | Reason | Business Explanation | +| ------------------ | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| Direct Usage / Ops | Run your own workloads | Content providers run gateways to process their own video/AI workloads end-to-end, controlling ingestion, routing, retries, and delivery. | + +#### Reliability, Performance & QoS + +| Category | Reason | Business Explanation | +| ----------- | ---------------------------------- | ----------------------------------------------------------------------------------------------------- | +| Reliability | Enforce SLAs on orchestrators | Gateways select orchestrators, apply retries/failover, and enforce latency and uptime guarantees. | +| Reliability | QoS enforcement & workload shaping | Gateways control routing, retries, failover, and latency-vs-cost trade-offs beyond protocol defaults. | + +#### Platform + +| Category | Reason | Business Explanation | +| -------- | ------------------------- | ------------------------------------------------------------------------------- | +| Platform | Embed in a larger product | Gateways act as internal infrastructure powering broader media or AI platforms. | + +#### Economics + +| Category | Reason | Business Explanation | +| --------- | ------------------------------ | ------------------------------------------------------------------------------------------------------- | +| Economics | Service-layer monetization | Service providers charge end users above orchestrator cost for reliability, compliance, or convenience. | +| Economics | Avoid third-party gateway fees | Running your own gateway avoids routing fees, pricing risk, and policy constraints imposed by others. | + +#### Demand Control & Traffic Ownership + +| Category | Reason | Business Explanation | +| -------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| Demand Control | Demand aggregation & traffic ownership | Gateways own ingress, customer relationships, usage data, and traffic predictability across apps or customers. | +| Demand Control | Workload normalization | Gateways smooth bursty demand into predictable, orchestrator-friendly workloads. | + +#### Performance + +| Category | Reason | Business Explanation | +| ----------- | --------------------------- | --------------------------------------------------------------------------------------------------- | +| Performance | Geographic request steering | Gateways route users to regionally optimal orchestrators to reduce latency and improve reliability. | + +#### Security & Compliance + +| Category | Reason | Business Explanation | +| -------- | --------------------------------- | ------------------------------------------------------------------------------------------ | +| Security | Enterprise policy enforcement | Gateways enforce IP allowlists, auth, rate limits, audit logs, and deterministic behavior. | +| Security | Cost-explosion & abuse protection | Gateways block buggy or malicious clients before they generate runaway compute costs. | + +#### Product Differentiation & UX + +| Category | Reason | Business Explanation | +| -------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| Product | Product differentiation above protocol | Custom APIs, SDKs, dashboards, billing abstractions, and AI workflow presets live at the gateway layer. | +| Product | Stable API surface | Gateways shield customers from protocol or orchestrator churn via versioning and controlled change. | + +#### Observability & Feedback Loops + +| Category | Reason | Business Explanation | +| ------------- | -------------------------- | ------------------------------------------------------------------------------------------------------ | +| Observability | Analytics & feedback loops | Gateways see end-to-end request patterns, failures, latency, model performance, and customer behavior. | + +#### Strategy, Optionality & Ecosystem Power + +| Category | Reason | Business Explanation | +| -------- | ---------------------- | -------------------------------------------------------------------------------------------------------- | +| Strategy | Strategic independence | Running your own gateway avoids pricing, roadmap, availability, and censorship risk from other gateways. | +| Strategy | Future optionality | Early gateway operators gain leverage if incentives or network economics evolve. | + +#### Ecosystem Influence + +| Category | Reason | Business Explanation | +| --------- | ------------------- | -------------------------------------------------------------------------------------------------------------------- | +| Ecosystem | Ecosystem influence | Gateways sit at a coordination choke-point that shapes standards, surfaces protocol gaps, and influences real usage. | + +## NOTES ON SOME FETCHED DATA + +Since useState, useEffect, and fetch work in Mintlify JSX components, you can +pull: + +Release info - versions, release notes, assets, dates Repo stats - stars, forks, +open issues count File contents - README, config files, code examples (via +raw.githubusercontent.com) Contributors - list of contributors, avatars Commit +history - recent commits, changelog-style updates Issues/PRs - open issues +count, specific issue details + +**EXAMPLE** + +I'm fetching the latest release of livepeer dynamically in some places eg. +gateways/linux-install. with the `LatestRelease` component. + +### !!! Caveats: + +- Rate limits - GitHub API is 60 requests/hour for unauthenticated requests. If + many users load the page, could hit limits +- Client-side loading - Shows"loading..." briefly before content appears +- No SSR - Content won't be in the initial HTML (affects SEO if that matters) + +### Future Recommendation: + +For high-traffic pages, we might want a build-time approach instead (fetch once +during deploy, not on every page load). + +Then we can use a n8n hook or github action to redeploy the docs when a new +release is published. diff --git a/ai/worker/api/gateway.openapi.yaml b/ai/worker/api/gateway.openapi.yaml new file mode 100644 index 00000000..77ab9abf --- /dev/null +++ b/ai/worker/api/gateway.openapi.yaml @@ -0,0 +1,1246 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: +- url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway +- url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToImageParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImage' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToVideo' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/VideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genUpscale' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genAudioToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/TextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '415': + description: Unsupported Media Type + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genSegmentAnything2' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/MasksResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LLMRequest' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LLMResponse' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageToTextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: Generate a text-to-speech audio file based on the provided text + input and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToSpeechParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/AudioResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: '#/components/schemas/MediaURL' + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: '' + return_timestamps: + type: string + title: Return Timestamps + description: 'Return timestamps for the transcribed text. Supported values: + ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default + is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means + word-based timestamps.' + default: 'true' + type: object + required: + - audio + - model_id + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + - model_id + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: '' + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: '' + type: object + required: + - image + - model_id + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: '' + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: Used for conditioning the amount of motion for the generation. + The higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + - model_id + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + point_coords: + type: string + title: Point Coords + description: Nx2 array of point prompts to the model, where each point is + in (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: Labels for the point prompts, where 1 indicates a foreground + point and 0 indicates a background point. + box: + type: string + title: Box + description: A length 4 array given as a box prompt to the model, in XYXY + format. + mask_input: + type: string + title: Mask Input + description: A low-resolution mask input to the model, typically from a + previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image dimensions. + default: true + type: object + required: + - image + - model_id + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + - model_id + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + HTTPError: + properties: + detail: + allOf: + - $ref: '#/components/schemas/APIError' + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: '#/components/schemas/ValidationError' + type: array + title: Detail + type: object + title: HTTPValidationError + ImageResponse: + properties: + images: + items: + $ref: '#/components/schemas/Media' + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: '' + delta: + allOf: + - $ref: '#/components/schemas/LLMMessage' + message: + allOf: + - $ref: '#/components/schemas/LLMMessage' + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: '#/components/schemas/LLMMessage' + type: array + title: Messages + model: + type: string + title: Model + default: '' + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: '#/components/schemas/LLMTokenUsage' + choices: + items: + $ref: '#/components/schemas/LLMChoice' + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: URL for subscribing via Trickle protocol for updates in the + live video-to-video generation params. + default: '' + events_url: + type: string + title: Events Url + description: URL for publishing events via Trickle protocol for pipeline + status and logs. + default: '' + model_id: + type: string + title: Model Id + description: Name of the pipeline to run in the live video to video job. + Notice that this is named model_id for consistency with other routes, + but it does not refer to a Hugging Face model ID. The exact model(s) depends + on the pipeline implementation and might be configurable via the `params` + argument. + default: '' + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + - model_id + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: '' + events_url: + type: string + title: Events Url + description: URL for subscribing to events for pipeline status and logs + default: '' + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: '#/components/schemas/Chunk' + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. Separate multiple + prompts with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - model_id + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: '' + text: + type: string + title: Text + description: Text input for speech generation. + default: '' + description: + type: string + title: Description + description: Description of speaker to steer text to speech generation. + default: A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + required: + - model_id + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + images: + items: + $ref: '#/components/schemas/Media' + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: VideoResponse + description: Response model for image generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/ai/worker/api/openapi.json b/ai/worker/api/openapi.json new file mode 100644 index 00000000..074a0e47 --- /dev/null +++ b/ai/worker/api/openapi.json @@ -0,0 +1,1959 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Livepeer AI Runner", + "description": "An application to run AI pipelines", + "version": "0.0.0" + }, + "servers": [ + { + "url": "https://dream-gateway.livepeer.cloud", + "description": "Livepeer Cloud Community Gateway" + }, + { + "url": "https://livepeer.studio/api/beta/generate", + "description": "Livepeer Studio Gateway" + } + ], + "paths": { + "/text-to-image": { + "post": { + "tags": [ + "generate" + ], + "summary": "Text To Image", + "description": "Generate images from text prompts.", + "operationId": "genTextToImage", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToImageParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "textToImage" + } + }, + "/image-to-image": { + "post": { + "tags": [ + "generate" + ], + "summary": "Image To Image", + "description": "Apply image transformations to a provided image.", + "operationId": "genImageToImage", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToImage" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToImage" + } + }, + "/image-to-video": { + "post": { + "tags": [ + "generate" + ], + "summary": "Image To Video", + "description": "Generate a video from a provided image.", + "operationId": "genImageToVideo", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToVideo" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToVideo" + } + }, + "/upscale": { + "post": { + "tags": [ + "generate" + ], + "summary": "Upscale", + "description": "Upscale an image by increasing its resolution.", + "operationId": "genUpscale", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genUpscale" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "upscale" + } + }, + "/audio-to-text": { + "post": { + "tags": [ + "generate" + ], + "summary": "Audio To Text", + "description": "Transcribe audio files to text.", + "operationId": "genAudioToText", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genAudioToText" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "413": { + "description": "Request Entity Too Large", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "415": { + "description": "Unsupported Media Type", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "audioToText" + } + }, + "/segment-anything-2": { + "post": { + "tags": [ + "generate" + ], + "summary": "Segment Anything 2", + "description": "Segment objects in an image.", + "operationId": "genSegmentAnything2", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genSegmentAnything2" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MasksResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "segmentAnything2" + } + }, + "/llm": { + "post": { + "tags": [ + "generate" + ], + "summary": "LLM", + "description": "Generate text using a language model.", + "operationId": "genLLM", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LLMRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LLMResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "llm" + } + }, + "/image-to-text": { + "post": { + "tags": [ + "generate" + ], + "summary": "Image To Text", + "description": "Transform image files to text.", + "operationId": "genImageToText", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToText" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageToTextResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "413": { + "description": "Request Entity Too Large", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToText" + } + }, + "/live-video-to-video": { + "post": { + "tags": [ + "generate" + ], + "summary": "Live Video To Video", + "description": "Apply transformations to a live video streamed to the returned endpoints.", + "operationId": "genLiveVideoToVideo", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveVideoToVideoParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveVideoToVideoResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "liveVideoToVideo" + } + }, + "/text-to-speech": { + "post": { + "tags": [ + "generate" + ], + "summary": "Text To Speech", + "description": "Generate a text-to-speech audio file based on the provided text input and speaker description.", + "operationId": "genTextToSpeech", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToSpeechParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AudioResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "textToSpeech" + } + }, + "/health": { + "get": { + "summary": "Health", + "operationId": "health", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthCheck" + } + } + } + } + } + } + }, + "/hardware/info": { + "get": { + "summary": "Hardware Info", + "operationId": "hardware_info", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HardwareInformation" + } + } + } + } + } + } + }, + "/hardware/stats": { + "get": { + "summary": "Hardware Stats", + "operationId": "hardware_stats", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HardwareStats" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "APIError": { + "properties": { + "msg": { + "type": "string", + "title": "Msg", + "description": "The error message." + } + }, + "type": "object", + "required": [ + "msg" + ], + "title": "APIError", + "description": "API error response model." + }, + "AudioResponse": { + "properties": { + "audio": { + "allOf": [ + { + "$ref": "#/components/schemas/MediaURL" + } + ], + "description": "The generated audio." + } + }, + "type": "object", + "required": [ + "audio" + ], + "title": "AudioResponse", + "description": "Response model for audio generation." + }, + "Body_genAudioToText": { + "properties": { + "audio": { + "type": "string", + "format": "binary", + "title": "Audio", + "description": "Uploaded audio file to be transcribed." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for transcription.", + "default": "" + }, + "return_timestamps": { + "type": "string", + "title": "Return Timestamps", + "description": "Return timestamps for the transcribed text. Supported values: 'sentence', 'word', or a string boolean ('true' or 'false'). Default is 'true' ('sentence'). 'false' means no timestamps. 'word' means word-based timestamps.", + "default": "true" + }, + "metadata": { + "type": "string", + "title": "Metadata", + "description": "Additional job information to be passed to the pipeline.", + "default": "{}" + } + }, + "type": "object", + "required": [ + "audio" + ], + "title": "Body_genAudioToText" + }, + "Body_genImageToImage": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide image generation." + }, + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to modify with the pipeline." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "loras": { + "type": "string", + "title": "Loras", + "description": "A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { \"latent-consistency/lcm-lora-sdxl\": 1.0, \"nerijs/pixel-art-xl\": 1.2}.", + "default": "" + }, + "strength": { + "type": "number", + "title": "Strength", + "description": "Degree of transformation applied to the reference image (0 to 1).", + "default": 0.8 + }, + "guidance_scale": { + "type": "number", + "title": "Guidance Scale", + "description": "Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality).", + "default": 7.5 + }, + "image_guidance_scale": { + "type": "number", + "title": "Image Guidance Scale", + "description": "Degree to which the generated image is pushed towards the initial image.", + "default": 1.5 + }, + "negative_prompt": { + "type": "string", + "title": "Negative Prompt", + "description": "Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 100 + }, + "num_images_per_prompt": { + "type": "integer", + "title": "Num Images Per Prompt", + "description": "Number of images to generate per prompt.", + "default": 1 + } + }, + "type": "object", + "required": [ + "prompt", + "image" + ], + "title": "Body_genImageToImage" + }, + "Body_genImageToText": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to transform with the pipeline." + }, + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide transformation.", + "default": "" + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for transformation.", + "default": "" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "Body_genImageToText" + }, + "Body_genImageToVideo": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to generate a video from." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for video generation.", + "default": "" + }, + "height": { + "type": "integer", + "title": "Height", + "description": "The height in pixels of the generated video.", + "default": 576 + }, + "width": { + "type": "integer", + "title": "Width", + "description": "The width in pixels of the generated video.", + "default": 1024 + }, + "fps": { + "type": "integer", + "title": "Fps", + "description": "The frames per second of the generated video.", + "default": 6 + }, + "motion_bucket_id": { + "type": "integer", + "title": "Motion Bucket Id", + "description": "Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video.", + "default": 127 + }, + "noise_aug_strength": { + "type": "number", + "title": "Noise Aug Strength", + "description": "Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion.", + "default": 0.02 + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 25 + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "Body_genImageToVideo" + }, + "Body_genSegmentAnything2": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Image to segment." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "point_coords": { + "type": "string", + "title": "Point Coords", + "description": "Nx2 array of point prompts to the model, where each point is in (X,Y) in pixels." + }, + "point_labels": { + "type": "string", + "title": "Point Labels", + "description": "Labels for the point prompts, where 1 indicates a foreground point and 0 indicates a background point." + }, + "box": { + "type": "string", + "title": "Box", + "description": "A length 4 array given as a box prompt to the model, in XYXY format." + }, + "mask_input": { + "type": "string", + "title": "Mask Input", + "description": "A low-resolution mask input to the model, typically from a previous prediction iteration, with the form 1xHxW (H=W=256 for SAM)." + }, + "multimask_output": { + "type": "boolean", + "title": "Multimask Output", + "description": "If true, the model will return three masks for ambiguous input prompts, often producing better masks than a single prediction.", + "default": true + }, + "return_logits": { + "type": "boolean", + "title": "Return Logits", + "description": "If true, returns un-thresholded mask logits instead of a binary mask.", + "default": true + }, + "normalize_coords": { + "type": "boolean", + "title": "Normalize Coords", + "description": "If true, the point coordinates will be normalized to the range [0,1], with point_coords expected to be with respect to image dimensions.", + "default": true + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "Body_genSegmentAnything2" + }, + "Body_genUpscale": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide upscaled image generation." + }, + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to modify with the pipeline." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for upscaled image generation.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 75 + } + }, + "type": "object", + "required": [ + "prompt", + "image" + ], + "title": "Body_genUpscale" + }, + "Chunk": { + "properties": { + "timestamp": { + "items": {}, + "type": "array", + "title": "Timestamp", + "description": "The timestamp of the chunk." + }, + "text": { + "type": "string", + "title": "Text", + "description": "The text of the chunk." + } + }, + "type": "object", + "required": [ + "timestamp", + "text" + ], + "title": "Chunk", + "description": "A chunk of text with a timestamp." + }, + "GPUComputeInfo": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "memory_total": { + "type": "integer", + "title": "Memory Total" + }, + "memory_free": { + "type": "integer", + "title": "Memory Free" + }, + "major": { + "type": "integer", + "title": "Major" + }, + "minor": { + "type": "integer", + "title": "Minor" + } + }, + "type": "object", + "required": [ + "id", + "name", + "memory_total", + "memory_free", + "major", + "minor" + ], + "title": "GPUComputeInfo", + "description": "Model for detailed GPU compute information." + }, + "GPUUtilizationInfo": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "memory_total": { + "type": "integer", + "title": "Memory Total" + }, + "memory_free": { + "type": "integer", + "title": "Memory Free" + }, + "utilization_compute": { + "type": "integer", + "title": "Utilization Compute" + }, + "utilization_memory": { + "type": "integer", + "title": "Utilization Memory" + } + }, + "type": "object", + "required": [ + "id", + "name", + "memory_total", + "memory_free", + "utilization_compute", + "utilization_memory" + ], + "title": "GPUUtilizationInfo", + "description": "Model for GPU utilization statistics." + }, + "HTTPError": { + "properties": { + "detail": { + "allOf": [ + { + "$ref": "#/components/schemas/APIError" + } + ], + "description": "Detailed error information." + } + }, + "type": "object", + "required": [ + "detail" + ], + "title": "HTTPError", + "description": "HTTP error response model." + }, + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "HardwareInformation": { + "properties": { + "pipeline": { + "type": "string", + "title": "Pipeline" + }, + "model_id": { + "type": "string", + "title": "Model Id" + }, + "gpu_info": { + "additionalProperties": { + "$ref": "#/components/schemas/GPUComputeInfo" + }, + "type": "object", + "title": "Gpu Info" + } + }, + "type": "object", + "required": [ + "pipeline", + "model_id", + "gpu_info" + ], + "title": "HardwareInformation", + "description": "Response model for GPU information." + }, + "HardwareStats": { + "properties": { + "pipeline": { + "type": "string", + "title": "Pipeline" + }, + "model_id": { + "type": "string", + "title": "Model Id" + }, + "gpu_stats": { + "additionalProperties": { + "$ref": "#/components/schemas/GPUUtilizationInfo" + }, + "type": "object", + "title": "Gpu Stats" + } + }, + "type": "object", + "required": [ + "pipeline", + "model_id", + "gpu_stats" + ], + "title": "HardwareStats", + "description": "Response model for real-time GPU statistics." + }, + "HealthCheck": { + "properties": { + "status": { + "type": "string", + "enum": [ + "OK", + "ERROR", + "IDLE" + ], + "title": "Status", + "description": "The health status of the pipeline" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "HealthCheck" + }, + "ImageResponse": { + "properties": { + "images": { + "items": { + "$ref": "#/components/schemas/Media" + }, + "type": "array", + "title": "Images", + "description": "The generated images." + } + }, + "type": "object", + "required": [ + "images" + ], + "title": "ImageResponse", + "description": "Response model for image generation." + }, + "ImageToTextResponse": { + "properties": { + "text": { + "type": "string", + "title": "Text", + "description": "The generated text." + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "ImageToTextResponse", + "description": "Response model for text generation." + }, + "LLMChoice": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason", + "default": "" + }, + "delta": { + "allOf": [ + { + "$ref": "#/components/schemas/LLMMessage" + } + ] + }, + "message": { + "allOf": [ + { + "$ref": "#/components/schemas/LLMMessage" + } + ] + } + }, + "type": "object", + "required": [ + "index" + ], + "title": "LLMChoice" + }, + "LLMMessage": { + "properties": { + "role": { + "type": "string", + "title": "Role" + }, + "content": { + "type": "string", + "title": "Content" + } + }, + "type": "object", + "required": [ + "role", + "content" + ], + "title": "LLMMessage" + }, + "LLMRequest": { + "properties": { + "messages": { + "items": { + "$ref": "#/components/schemas/LLMMessage" + }, + "type": "array", + "title": "Messages" + }, + "model": { + "type": "string", + "title": "Model", + "default": "" + }, + "temperature": { + "type": "number", + "title": "Temperature", + "default": 0.7 + }, + "max_tokens": { + "type": "integer", + "title": "Max Tokens", + "default": 256 + }, + "top_p": { + "type": "number", + "title": "Top P", + "default": 1 + }, + "top_k": { + "type": "integer", + "title": "Top K", + "default": -1 + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + } + }, + "type": "object", + "required": [ + "messages" + ], + "title": "LLMRequest" + }, + "LLMResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "usage": { + "$ref": "#/components/schemas/LLMTokenUsage" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/LLMChoice" + }, + "type": "array", + "title": "Choices" + } + }, + "type": "object", + "required": [ + "id", + "model", + "created", + "usage", + "choices" + ], + "title": "LLMResponse" + }, + "LLMTokenUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "completion_tokens", + "total_tokens" + ], + "title": "LLMTokenUsage" + }, + "LiveVideoToVideoParams": { + "properties": { + "subscribe_url": { + "type": "string", + "title": "Subscribe Url", + "description": "Source URL of the incoming stream to subscribe to." + }, + "publish_url": { + "type": "string", + "title": "Publish Url", + "description": "Destination URL of the outgoing stream to publish." + }, + "control_url": { + "type": "string", + "title": "Control Url", + "description": "URL for subscribing via Trickle protocol for updates in the live video-to-video generation params.", + "default": "" + }, + "events_url": { + "type": "string", + "title": "Events Url", + "description": "URL for publishing events via Trickle protocol for pipeline status and logs.", + "default": "" + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Name of the pipeline to run in the live video to video job. Notice that this is named model_id for consistency with other routes, but it does not refer to a Hugging Face model ID. The exact model(s) depends on the pipeline implementation and might be configurable via the `params` argument.", + "default": "" + }, + "params": { + "type": "object", + "title": "Params", + "description": "Initial parameters for the pipeline.", + "default": {} + } + }, + "type": "object", + "required": [ + "subscribe_url", + "publish_url" + ], + "title": "LiveVideoToVideoParams" + }, + "LiveVideoToVideoResponse": { + "properties": { + "subscribe_url": { + "type": "string", + "title": "Subscribe Url", + "description": "Source URL of the incoming stream to subscribe to" + }, + "publish_url": { + "type": "string", + "title": "Publish Url", + "description": "Destination URL of the outgoing stream to publish to" + }, + "control_url": { + "type": "string", + "title": "Control Url", + "description": "URL for updating the live video-to-video generation", + "default": "" + }, + "events_url": { + "type": "string", + "title": "Events Url", + "description": "URL for subscribing to events for pipeline status and logs", + "default": "" + } + }, + "type": "object", + "required": [ + "subscribe_url", + "publish_url" + ], + "title": "LiveVideoToVideoResponse", + "description": "Response model for live video-to-video generation." + }, + "MasksResponse": { + "properties": { + "masks": { + "type": "string", + "title": "Masks", + "description": "The generated masks." + }, + "scores": { + "type": "string", + "title": "Scores", + "description": "The model's confidence scores for each generated mask." + }, + "logits": { + "type": "string", + "title": "Logits", + "description": "The raw, unnormalized predictions (logits) for the masks." + } + }, + "type": "object", + "required": [ + "masks", + "scores", + "logits" + ], + "title": "MasksResponse", + "description": "Response model for object segmentation." + }, + "Media": { + "properties": { + "url": { + "type": "string", + "title": "Url", + "description": "The URL where the media can be accessed." + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "The seed used to generate the media." + }, + "nsfw": { + "type": "boolean", + "title": "Nsfw", + "description": "Whether the media was flagged as NSFW." + } + }, + "type": "object", + "required": [ + "url", + "seed", + "nsfw" + ], + "title": "Media", + "description": "A media object containing information about the generated media." + }, + "MediaURL": { + "properties": { + "url": { + "type": "string", + "title": "Url", + "description": "The URL where the media can be accessed." + } + }, + "type": "object", + "required": [ + "url" + ], + "title": "MediaURL", + "description": "A URL from which media can be accessed." + }, + "TextResponse": { + "properties": { + "text": { + "type": "string", + "title": "Text", + "description": "The generated text." + }, + "chunks": { + "items": { + "$ref": "#/components/schemas/Chunk" + }, + "type": "array", + "title": "Chunks", + "description": "The generated text chunks." + } + }, + "type": "object", + "required": [ + "text", + "chunks" + ], + "title": "TextResponse", + "description": "Response model for text generation." + }, + "TextToImageParams": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "loras": { + "type": "string", + "title": "Loras", + "description": "A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { \"latent-consistency/lcm-lora-sdxl\": 1.0, \"nerijs/pixel-art-xl\": 1.2}.", + "default": "" + }, + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model." + }, + "height": { + "type": "integer", + "title": "Height", + "description": "The height in pixels of the generated image.", + "default": 576 + }, + "width": { + "type": "integer", + "title": "Width", + "description": "The width in pixels of the generated image.", + "default": 1024 + }, + "guidance_scale": { + "type": "number", + "title": "Guidance Scale", + "description": "Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality).", + "default": 7.5 + }, + "negative_prompt": { + "type": "string", + "title": "Negative Prompt", + "description": "Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 50 + }, + "num_images_per_prompt": { + "type": "integer", + "title": "Num Images Per Prompt", + "description": "Number of images to generate per prompt.", + "default": 1 + } + }, + "type": "object", + "required": [ + "prompt" + ], + "title": "TextToImageParams" + }, + "TextToSpeechParams": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for text to speech generation.", + "default": "" + }, + "text": { + "type": "string", + "title": "Text", + "description": "Text input for speech generation.", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "description": "Description of speaker to steer text to speech generation.", + "default": "A male speaker delivers a slightly expressive and animated speech with a moderate speed and pitch." + } + }, + "type": "object", + "title": "TextToSpeechParams" + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", + "required": [ + "loc", + "msg", + "type" + ], + "title": "ValidationError" + }, + "VideoResponse": { + "properties": { + "frames": { + "items": { + "items": { + "$ref": "#/components/schemas/Media" + }, + "type": "array" + }, + "type": "array", + "title": "Frames", + "description": "The generated video frames." + } + }, + "type": "object", + "required": [ + "frames" + ], + "title": "VideoResponse", + "description": "Response model for video generation." + } + }, + "securitySchemes": { + "HTTPBearer": { + "type": "http", + "scheme": "bearer" + } + } + } +} diff --git a/ai/worker/api/openapi.yaml b/ai/worker/api/openapi.yaml new file mode 100644 index 00000000..94758142 --- /dev/null +++ b/ai/worker/api/openapi.yaml @@ -0,0 +1,1390 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: +- url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway +- url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToImageParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImage' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToVideo' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/VideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genUpscale' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genAudioToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/TextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '415': + description: Unsupported Media Type + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genSegmentAnything2' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/MasksResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LLMRequest' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LLMResponse' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageToTextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: Generate a text-to-speech audio file based on the provided text + input and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToSpeechParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/AudioResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech + /health: + get: + summary: Health + operationId: health + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + /hardware/info: + get: + summary: Hardware Info + operationId: hardware_info + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareInformation' + /hardware/stats: + get: + summary: Hardware Stats + operationId: hardware_stats + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareStats' +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: '#/components/schemas/MediaURL' + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: '' + return_timestamps: + type: string + title: Return Timestamps + description: 'Return timestamps for the transcribed text. Supported values: + ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default + is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means + word-based timestamps.' + default: 'true' + metadata: + type: string + title: Metadata + description: Additional job information to be passed to the pipeline. + default: '{}' + type: object + required: + - audio + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: '' + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: '' + type: object + required: + - image + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: '' + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: Used for conditioning the amount of motion for the generation. + The higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + point_coords: + type: string + title: Point Coords + description: Nx2 array of point prompts to the model, where each point is + in (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: Labels for the point prompts, where 1 indicates a foreground + point and 0 indicates a background point. + box: + type: string + title: Box + description: A length 4 array given as a box prompt to the model, in XYXY + format. + mask_input: + type: string + title: Mask Input + description: A low-resolution mask input to the model, typically from a + previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image dimensions. + default: true + type: object + required: + - image + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + GPUComputeInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + major: + type: integer + title: Major + minor: + type: integer + title: Minor + type: object + required: + - id + - name + - memory_total + - memory_free + - major + - minor + title: GPUComputeInfo + description: Model for detailed GPU compute information. + GPUUtilizationInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + utilization_compute: + type: integer + title: Utilization Compute + utilization_memory: + type: integer + title: Utilization Memory + type: object + required: + - id + - name + - memory_total + - memory_free + - utilization_compute + - utilization_memory + title: GPUUtilizationInfo + description: Model for GPU utilization statistics. + HTTPError: + properties: + detail: + allOf: + - $ref: '#/components/schemas/APIError' + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: '#/components/schemas/ValidationError' + type: array + title: Detail + type: object + title: HTTPValidationError + HardwareInformation: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_info: + additionalProperties: + $ref: '#/components/schemas/GPUComputeInfo' + type: object + title: Gpu Info + type: object + required: + - pipeline + - model_id + - gpu_info + title: HardwareInformation + description: Response model for GPU information. + HardwareStats: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_stats: + additionalProperties: + $ref: '#/components/schemas/GPUUtilizationInfo' + type: object + title: Gpu Stats + type: object + required: + - pipeline + - model_id + - gpu_stats + title: HardwareStats + description: Response model for real-time GPU statistics. + HealthCheck: + properties: + status: + type: string + enum: + - OK + - ERROR + - IDLE + title: Status + description: The health status of the pipeline + type: object + required: + - status + title: HealthCheck + ImageResponse: + properties: + images: + items: + $ref: '#/components/schemas/Media' + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: '' + delta: + allOf: + - $ref: '#/components/schemas/LLMMessage' + message: + allOf: + - $ref: '#/components/schemas/LLMMessage' + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: '#/components/schemas/LLMMessage' + type: array + title: Messages + model: + type: string + title: Model + default: '' + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: '#/components/schemas/LLMTokenUsage' + choices: + items: + $ref: '#/components/schemas/LLMChoice' + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: URL for subscribing via Trickle protocol for updates in the + live video-to-video generation params. + default: '' + events_url: + type: string + title: Events Url + description: URL for publishing events via Trickle protocol for pipeline + status and logs. + default: '' + model_id: + type: string + title: Model Id + description: Name of the pipeline to run in the live video to video job. + Notice that this is named model_id for consistency with other routes, + but it does not refer to a Hugging Face model ID. The exact model(s) depends + on the pipeline implementation and might be configurable via the `params` + argument. + default: '' + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: '' + events_url: + type: string + title: Events Url + description: URL for subscribing to events for pipeline status and logs + default: '' + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: '#/components/schemas/Chunk' + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. Separate multiple + prompts with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: '' + text: + type: string + title: Text + description: Text input for speech generation. + default: '' + description: + type: string + title: Description + description: Description of speaker to steer text to speech generation. + default: A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + frames: + items: + items: + $ref: '#/components/schemas/Media' + type: array + type: array + title: Frames + description: The generated video frames. + type: object + required: + - frames + title: VideoResponse + description: Response model for video generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/ai/worker/api/openapi.yaml.backup b/ai/worker/api/openapi.yaml.backup new file mode 100644 index 00000000..94758142 --- /dev/null +++ b/ai/worker/api/openapi.yaml.backup @@ -0,0 +1,1390 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: +- url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway +- url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToImageParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImage' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToVideo' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/VideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genUpscale' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genAudioToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/TextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '415': + description: Unsupported Media Type + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genSegmentAnything2' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/MasksResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LLMRequest' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LLMResponse' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageToTextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: Generate a text-to-speech audio file based on the provided text + input and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToSpeechParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/AudioResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech + /health: + get: + summary: Health + operationId: health + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + /hardware/info: + get: + summary: Hardware Info + operationId: hardware_info + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareInformation' + /hardware/stats: + get: + summary: Hardware Stats + operationId: hardware_stats + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareStats' +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: '#/components/schemas/MediaURL' + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: '' + return_timestamps: + type: string + title: Return Timestamps + description: 'Return timestamps for the transcribed text. Supported values: + ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default + is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means + word-based timestamps.' + default: 'true' + metadata: + type: string + title: Metadata + description: Additional job information to be passed to the pipeline. + default: '{}' + type: object + required: + - audio + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: '' + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: '' + type: object + required: + - image + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: '' + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: Used for conditioning the amount of motion for the generation. + The higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + point_coords: + type: string + title: Point Coords + description: Nx2 array of point prompts to the model, where each point is + in (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: Labels for the point prompts, where 1 indicates a foreground + point and 0 indicates a background point. + box: + type: string + title: Box + description: A length 4 array given as a box prompt to the model, in XYXY + format. + mask_input: + type: string + title: Mask Input + description: A low-resolution mask input to the model, typically from a + previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image dimensions. + default: true + type: object + required: + - image + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + GPUComputeInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + major: + type: integer + title: Major + minor: + type: integer + title: Minor + type: object + required: + - id + - name + - memory_total + - memory_free + - major + - minor + title: GPUComputeInfo + description: Model for detailed GPU compute information. + GPUUtilizationInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + utilization_compute: + type: integer + title: Utilization Compute + utilization_memory: + type: integer + title: Utilization Memory + type: object + required: + - id + - name + - memory_total + - memory_free + - utilization_compute + - utilization_memory + title: GPUUtilizationInfo + description: Model for GPU utilization statistics. + HTTPError: + properties: + detail: + allOf: + - $ref: '#/components/schemas/APIError' + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: '#/components/schemas/ValidationError' + type: array + title: Detail + type: object + title: HTTPValidationError + HardwareInformation: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_info: + additionalProperties: + $ref: '#/components/schemas/GPUComputeInfo' + type: object + title: Gpu Info + type: object + required: + - pipeline + - model_id + - gpu_info + title: HardwareInformation + description: Response model for GPU information. + HardwareStats: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_stats: + additionalProperties: + $ref: '#/components/schemas/GPUUtilizationInfo' + type: object + title: Gpu Stats + type: object + required: + - pipeline + - model_id + - gpu_stats + title: HardwareStats + description: Response model for real-time GPU statistics. + HealthCheck: + properties: + status: + type: string + enum: + - OK + - ERROR + - IDLE + title: Status + description: The health status of the pipeline + type: object + required: + - status + title: HealthCheck + ImageResponse: + properties: + images: + items: + $ref: '#/components/schemas/Media' + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: '' + delta: + allOf: + - $ref: '#/components/schemas/LLMMessage' + message: + allOf: + - $ref: '#/components/schemas/LLMMessage' + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: '#/components/schemas/LLMMessage' + type: array + title: Messages + model: + type: string + title: Model + default: '' + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: '#/components/schemas/LLMTokenUsage' + choices: + items: + $ref: '#/components/schemas/LLMChoice' + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: URL for subscribing via Trickle protocol for updates in the + live video-to-video generation params. + default: '' + events_url: + type: string + title: Events Url + description: URL for publishing events via Trickle protocol for pipeline + status and logs. + default: '' + model_id: + type: string + title: Model Id + description: Name of the pipeline to run in the live video to video job. + Notice that this is named model_id for consistency with other routes, + but it does not refer to a Hugging Face model ID. The exact model(s) depends + on the pipeline implementation and might be configurable via the `params` + argument. + default: '' + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: '' + events_url: + type: string + title: Events Url + description: URL for subscribing to events for pipeline status and logs + default: '' + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: '#/components/schemas/Chunk' + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. Separate multiple + prompts with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: '' + text: + type: string + title: Text + description: Text input for speech generation. + default: '' + description: + type: string + title: Description + description: Description of speaker to steer text to speech generation. + default: A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + frames: + items: + items: + $ref: '#/components/schemas/Media' + type: array + type: array + title: Frames + description: The generated video frames. + type: object + required: + - frames + title: VideoResponse + description: Response model for video generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/docs.json b/docs.json new file mode 100644 index 00000000..c51f738d --- /dev/null +++ b/docs.json @@ -0,0 +1,3233 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "theme": "palm", + "name": "Livepeer Docs", + "colors": { + "primary": "#18794E", + "light": "#2b9a66", + "dark": "#18794E" + }, + "favicon": "/favicon.png", + "navigation": { + "versions": [ + { + "version": "v2", + "default": true, + "languages": [ + { + "language": "en", + "tabs": [ + { + "tab": "Internal Hub", + "hidden": true, + "icon": "info-circle", + "anchors": [ + { + "anchor": "Internal Hub", + "icon": "info-circle", + "groups": [ + { + "group": "Internal Hub", + "pages": [ + "v2/pages/09_internal/internal-overview", + "v2/pages/09_internal/docs-status", + "v2/pages/09_internal/strategic-alignment", + "v2/pages/09_internal/docs-philosophy", + "v2/pages/09_internal/definitions", + "v2/pages/09_internal/personas", + "v2/pages/09_internal/ecosystem", + "v2/pages/09_internal/references" + ] + } + ] + } + ] + }, + { + "tab": "Home", + "icon": "house-heart", + "anchors": [ + { + "anchor": "Home", + "icon": "house-heart", + "groups": [ + { + "group": "Home", + "icon": "house-heart", + "pages": [ + "v2/pages/00_home/Landing", + "v2/pages/00_home/home/livepeer-tl-dr", + "v2/pages/00_home/home/trending-at-livepeer" + ] + }, + { + "group": "Livepeer Showcase", + "icon": "clapperboard-play", + "pages": [ + "v2/pages/00_home/project-showcase/projects-built-on-livepeer", + "v2/pages/00_home/project-showcase/livepeer-applications", + "v2/pages/00_home/project-showcase/industry-verticals" + ] + }, + { + "group": "Get Started", + "icon": "arrow-right-to-bracket", + "pages": [ + "v2/pages/00_home/get-started/use-livepeer", + "v2/pages/00_home/get-started/stream-video-quickstart", + "v2/pages/00_home/get-started/livepeer-ai-quickstart", + "v2/pages/00_home/get-started/build-on-livepeer" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "About", + "icon": "graduation-cap", + "anchors": [ + { + "anchor": "About Livepeer", + "icon": "graduation-cap", + "groups": [ + { + "group": "About Livepeer", + "icon": "graduation-cap", + "pages": [ + "v2/pages/01_about/about-livepeer/livepeer-overview", + "v2/pages/01_about/about-livepeer/why-livepeer", + "v2/pages/01_about/about-livepeer/livepeer-evolution", + "v2/pages/01_about/about-livepeer/livepeer-ecosystem" + ] + }, + { + "group": "Livepeer Protocol", + "icon": "cube", + "pages": [ + "v2/pages/01_about/livepeer-protocol/protocol-overview", + "v2/pages/01_about/livepeer-protocol/livepeer-whitepaper", + "v2/pages/01_about/livepeer-protocol/technical-overview" + ] + }, + { + "group": "Livepeer Network", + "icon": "circle-nodes", + "pages": [ + "v2/pages/01_about/livepeer-network/actor-overview", + "v2/pages/01_about/livepeer-network/livepeer-token-economics", + "v2/pages/01_about/livepeer-network/livepeer-governance" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Community", + "icon": "people-group", + "anchors": [ + { + "anchor": "Community", + "icon": "people-group", + "groups": [ + { + "group": "Livepeer Community", + "icon": "people-group", + "pages": [ + "v2/pages/02_community/community-home", + "v2/pages/02_community/livepeer-community/livepeer-Latest-Topics", + "v2/pages/02_community/livepeer-community/community-guidelines" + ] + }, + { + "group": "Livepeer Connect", + "icon": "hashtag", + "pages": [ + "v2/pages/02_community/livepeer-connect/news-and-socials", + "v2/pages/02_community/livepeer-connect/events-and-community-streams", + "v2/pages/02_community/livepeer-connect/forums-and-discussions" + ] + }, + { + "group": "Livepeer Contribute", + "icon": "door-open", + "pages": [ + "v2/pages/02_community/livepeer-contribute/contribute", + "v2/pages/02_community/livepeer-contribute/opportunities", + "v2/pages/02_community/livepeer-contribute/build-livepeer" + ] + }, + { + "group": "[MOVE HERE] Help Center", + "icon": "comments-question-check", + "hidden": true, + "pages": [ + "v2/pages/02_community/livepeer-community/trending-test" + ] + }, + { + "group": "[TO DELETE] Tests", + "pages": [ + "v2/pages/02_community/livepeer-community/trending-test" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Developers", + "icon": "display-code", + "anchors": [ + { + "anchor": "Developers", + "icon": "display-code", + "groups": [ + { + "group": "Building on Livepeer", + "icon": "code", + "pages": [ + "v2/pages/03_developers/developer-home", + "v2/pages/03_developers/building-on-livepeer/developer-guide" + ] + }, + { + "group": "Quickstart", + "icon": "fast-forward", + "pages": [ + { + "group": "Real-time Video", + "pages": [ + "v2/pages/03_developers/building-on-livepeer/quick-starts/livepeer-ai", + "v2/pages/03_developers/livepeer-real-time-video/video-streaming-on-livepeer/README.mdx" + ] + }, + { + "group": "AI Pipelines", + "pages": [ + "v2/pages/03_developers/building-on-livepeer/quick-starts/video-streaming", + "v2/pages/03_developers/building-on-livepeer/quick-starts/livepeer-ai" + ] + } + ] + }, + { + "group": "Developer Platforms", + "icon": "gear-code", + "pages": [ + "v2/pages/03_developers/developer-platforms/builder-hub", + { + "group": "Daydream", + "pages": [ + "v2/pages/03_developers/developer-platforms/daydream/daydream" + ] + }, + { + "group": "Livepeer Studio", + "pages": [ + "v2/pages/03_developers/developer-platforms/livepeer-studio/livepeer-studio" + ] + }, + { + "group": "Frameworks", + "pages": [ + "v2/pages/03_developers/developer-platforms/frameworks/frameworks" + ] + }, + { + "group": "Streamplace", + "pages": [ + "v2/pages/03_developers/developer-platforms/streamplace/streamplace" + ] + }, + { + "group": "All Ecosystem Products", + "pages": [ + "v2/pages/03_developers/developer-platforms/all-ecosystem/ecosystem-products/ecosystem-products" + ] + } + ] + }, + { + "group": "Developer Tools", + "icon": "tools", + "pages": [ + "v2/pages/03_developers/developer-tools/tooling-hub", + "v2/pages/03_developers/developer-tools/livepeer-explorer", + "v2/pages/03_developers/developer-tools/livepeer-cloud", + "v2/pages/03_developers/developer-tools/dashboards" + ] + }, + { + "group": "Guides & Tutorials", + "icon": "laptop-file", + "pages": [ + "v2/pages/03_developers/guides-and-resources/developer-guides", + "v2/pages/03_developers/guides-and-resources/resources", + "v2/pages/03_developers/guides-and-resources/developer-help", + "v2/pages/03_developers/guides-and-resources/contribution-guide" + ] + }, + { + "group": "Builder Opportunities", + "icon": "lightbulb", + "pages": [ + "v2/pages/03_developers/builder-opportunities/dev-programs", + "v2/pages/03_developers/builder-opportunities/livepeer-rfps" + ] + }, + { + "group": "Technical References", + "icon": "books", + "pages": [ + { + "group": "SDKs & APIs", + "pages": [ + "v2/pages/03_developers/technical-references-sdks.-and-apis/sdks", + "v2/pages/03_developers/technical-references-sdks.-and-apis/apis" + ] + }, + "v2/pages/03_developers/technical-references/awesome-livepeer", + "v2/pages/03_developers/technical-references/wiki", + "v2/pages/03_developers/technical-references/deepwiki" + ] + }, + { + "group": "Changelog & Migrations", + "icon": "swap", + "pages": [ + "v2/pages/07_resources/changelog/changelog", + "v2/pages/07_resources/changelog/migration-guides" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Gateways", + "icon": "torii-gate", + "anchors": [ + { + "anchor": "Gateways", + "icon": "torii-gate", + "groups": [ + { + "group": "About Gateways", + "icon": "graduation-cap", + "pages": [ + "v2/pages/04_gateways/gateways-home", + "v2/pages/04_gateways/about-gateways/gateway-explainer", + "v2/pages/04_gateways/about-gateways/gateway-functions", + "v2/pages/04_gateways/about-gateways/gateway-architecture", + "v2/pages/04_gateways/about-gateways/gateway-economics" + ] + }, + { + "group": "Gateway Services & Providers", + "icon": "wand-magic-sparkles", + "pages": [ + "v2/pages/04_gateways/using-gateways/choosing-a-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers", + { + "group": "Provider Docs", + "pages": [ + "v2/pages/04_gateways/using-gateways/gateway-providers/daydream-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/livepeer-studio-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/cloud-spe-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/streamplace" + ] + } + ] + }, + { + "group": "Run Your Own Gateway", + "icon": "sign-posts-wrench", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/run-a-gateway", + "v2/pages/04_gateways/run-a-gateway/why-run-a-gateway", + { + "group": "Requirements Check & Set", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/requirements/setup", + "v2/pages/04_gateways/run-a-gateway/requirements/on-chain setup/on-chain", + "v2/pages/04_gateways/run-a-gateway/requirements/on-chain setup/fund-gateway" + ] + }, + { + "group": "Installation", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/install/install-overview", + "v2/pages/04_gateways/run-a-gateway/install/docker-install", + "v2/pages/04_gateways/run-a-gateway/install/linux-install", + "v2/pages/04_gateways/run-a-gateway/install/windows-install", + "v2/pages/04_gateways/run-a-gateway/install/community-projects" + ] + }, + { + "group": "Configuration", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/configure/configuration-overview", + "v2/pages/04_gateways/run-a-gateway/configure/video-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/ai-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/dual-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/pricing-configuration" + ] + }, + { + "group": "Testing", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/test/test-gateway", + "v2/pages/04_gateways/run-a-gateway/test/publish-content", + "v2/pages/04_gateways/run-a-gateway/test/playback-content" + ] + }, + { + "group": "Network Connect", + "tag": "Go Live!", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/connect/lp-marketplace", + "v2/pages/04_gateways/run-a-gateway/connect/discover-offerings", + "v2/pages/04_gateways/run-a-gateway/connect/connect-with-offerings" + ] + }, + { + "group": "Monitor & Optimise", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/monitor/monitor-and-optimise" + ] + } + ] + }, + { + "group": "Gateway Tools & Dashboards", + "icon": "tools", + "pages": [ + "v2/pages/04_gateways/gateway-tools/explorer", + "v2/pages/04_gateways/gateway-tools/livepeer-tools" + ] + }, + { + "group": "Gateway Guides & Resources", + "icon": "laptop-file", + "pages": [ + "v2/pages/04_gateways/guides-and-resources/community-guides", + "v2/pages/04_gateways/guides-and-resources/community-projects", + "v2/pages/04_gateways/guides-and-resources/faq" + ] + }, + { + "group": "Technical References", + "icon": "code", + "pages": [ + "v2/pages/04_gateways/references/technical-architecture", + "v2/pages/04_gateways/references/configuration-flags", + "v2/pages/04_gateways/references/arbitrum-rpc", + { + "group": "API Reference [AI]", + "pages": [ + "v2/pages/04_gateways/references/api-reference/AI-API/ai", + "v2/pages/04_gateways/references/api-reference/AI-API/text-to-image", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-image", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-video", + "v2/pages/04_gateways/references/api-reference/AI-API/upscale", + "v2/pages/04_gateways/references/api-reference/AI-API/audio-to-text", + "v2/pages/04_gateways/references/api-reference/AI-API/segment-anything-2", + "v2/pages/04_gateways/references/api-reference/AI-API/llm", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-text", + "v2/pages/04_gateways/references/api-reference/AI-API/live-video-to-video", + "v2/pages/04_gateways/references/api-reference/AI-API/text-to-speech", + "v2/pages/04_gateways/references/api-reference/AI-API/health", + "v2/pages/04_gateways/references/api-reference/AI-API/hardware-info", + "v2/pages/04_gateways/references/api-reference/AI-API/hardware-stats" + ] + } + ] + } + ] + }, + { + "anchor": "Quickstart", + "icon": "fast-forward", + "pages": ["v2/pages/04_gateways/quickstart"] + }, + { + "anchor": "Resources", + "icon": "books", + "pages": ["v2/pages/04_gateways/quickstart"] + }, + { + "anchor": " ", + "icon": "-", + "pages": [" "] + } + ] + }, + { + "tab": "GPU Nodes", + "icon": "microchip", + "anchors": [ + { + "anchor": "GPU Nodes", + "icon": "microchip", + "groups": [ + { + "group": "About Orchestrators (GPU Nodes)", + "icon": "graduation-cap", + "pages": [ + "v2/pages/05_orchestrators/orchestrators-home", + "v2/pages/05_orchestrators/about-orchestrators/overview", + { + "group": "Orchestrator Functions", + "pages": [ + "v2/pages/05_orchestrators/about-orchestrators/orchestrator-functions/transcoding", + "v2/pages/05_orchestrators/about-orchestrators/orchestrator-functions/ai-pipelines" + ] + } + ] + }, + { + "group": "Set up an Orchestrator", + "icon": "gear-code", + "pages": [ + "v2/pages/05_orchestrators/setting-up-an-orchestrator/hardware-requirements", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/orchestrator-stats", + { + "group": "Setting Up An Orchestrator", + "pages": [ + "v2/pages/05_orchestrators/setting-up-an-orchestrator/setting-up-an-orchestrator/quickstart-add-your-gpu-to-livepeer", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/join-a-pool", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/setting-up-an-orchestrator/data-centres-and-large-scale-hardware-providers" + ] + } + ] + }, + { + "group": "Orchestrator Tooling", + "icon": "tools", + "pages": [ + "v2/pages/05_orchestrators/orchestrator-tooling/orchestrator-tools", + "v2/pages/05_orchestrators/orchestrator-tooling/orchestrator-dashboards" + ] + }, + { + "group": "Orchestrator Guides & Resources", + "icon": "laptop-file", + "pages": [ + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-guides-and-references", + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-resources", + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-community-and-help" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Delegators & LPT", + "icon": "hand-holding-dollar", + "anchors": [ + { + "anchor": "Delegators & LPT", + "icon": "hand-holding-dollar", + "groups": [ + { + "group": "About LPT", + "icon": "graduation-cap", + "pages": [ + "v2/pages/06_delegators/token-home", + "v2/pages/06_delegators/about-lpt-livepeer-token/overview", + "v2/pages/06_delegators/about-lpt-livepeer-token/why-have-a-token", + "v2/pages/06_delegators/about-lpt-livepeer-token/livepeer-token-economics", + "v2/pages/06_delegators/about-lpt-livepeer-token/how-to-get-lpt", + "v2/pages/06_delegators/about-lpt-livepeer-token/delegators" + ] + }, + { + "group": "Delegating LPT", + "icon": "money-bill-transfer", + "pages": [ + "v2/pages/06_delegators/delegating-lpt/overview", + "v2/pages/06_delegators/delegating-lpt/delegation-economics", + "v2/pages/06_delegators/delegating-lpt/how-to-delegate-lpt" + ] + }, + { + "group": "Livepeer Governance", + "icon": "box-ballot", + "pages": [ + "v2/pages/06_delegators/livepeer-governance/overview", + "v2/pages/06_delegators/livepeer-governance/livepeer-governance", + "v2/pages/06_delegators/livepeer-governance/livepeer-treasury" + ] + }, + { "group": "Livepeer Treasury", "pages": [] }, + { + "group": "Guides & Resources", + "icon": "books", + "pages": [ + "v2/pages/06_delegators/token-resources/lpt-exchanges", + "v2/pages/06_delegators/token-resources/lpt-eth-usage" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Reference HUB", + "hidden": false, + "icon": "books", + "anchors": [ + { + "anchor": "Reference & Help HUB", + "icon": "books", + "groups": [ + { + "group": "Home", + "icon": "house", + "pages": ["v2/pages/07_resources/resources_hub"] + }, + { + "group": "Documentation Guide", + "icon": "book-open", + "pages": [ + "v2/pages/07_resources/documentation-guide/documentation-overview", + "v2/pages/07_resources/documentation-guide/documentation-guide", + "v2/pages/07_resources/documentation-guide/docs-features-and-ai-integrations", + "v2/pages/07_resources/documentation-guide/contribute-to-the-docs" + ] + }, + { + "group": "Livepeer Concepts", + "icon": "graduation-cap", + "pages": [ + "v2/pages/07_resources/concepts/livepeer-core-concepts", + "v2/pages/07_resources/livepeer-glossary", + "v2/pages/07_resources/concepts/livepeer-actors" + ] + }, + { + "group": "Developer References", + "icon": "book", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Gateway References", + "icon": "wand-magic-sparkles", + "pages": [ + "v2/pages/07_resources/ai-inference-on-livepeer/livepeer-ai/livepeer-ai-content-directory" + ] + }, + { + "group": "Orchestrator References", + "icon": "microchip", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "LPT & Delegator References", + "icon": "hand-holding-dollar", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Community Resources", + "icon": "", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Partner Resources", + "icon": "handshake", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Technical References", + "icon": "code", + "pages": [ + { + "group": "Protocol References", + "pages": [] + } + ] + }, + { + "group": "Changelog", + "icon": "swap", + "pages": [ + "v2/pages/00_home/changelog/changelog", + "v2/pages/00_home/changelog/migration-guide" + ] + } + ] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Help Center", + "hidden": true, + "icon": "comments-question-check", + "anchors": [ + { + "anchor": "Help Center", + "icon": "comments-question-check", + "groups": [ + { + "group": "Home", + "pages": ["v2/pages/08_help/README"] + }, + { "group": "Delegating LPT", "pages": [] }, + { "group": "Livepeer Governance", "pages": [] }, + { "group": "Livepeer Treasury", "pages": [] }, + { "group": "Token Resources", "pages": [] } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + } + ] + } + ] + }, + { + "version": "v1", + "languages": [ + { + "language": "en", + "dropdowns": [ + { + "dropdown": "Developers", + "icon": "code", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/developers/introduction", + "v1/developers/quick-start", + "v1/developers/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/developers/guides/overview", + { + "group": "Assets", + "icon": "video", + "pages": [ + "v1/developers/guides/upload-video-asset", + "v1/developers/guides/playback-an-asset", + "v1/developers/guides/listen-to-asset-events", + "v1/developers/guides/encrypted-asset", + "v1/developers/guides/thumbnails-vod" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/developers/guides/create-livestream", + "v1/developers/guides/playback-a-livestream", + "v1/developers/guides/stream-via-obs", + "v1/developers/guides/livestream-from-browser", + "v1/developers/guides/optimize-latency-of-a-livestream", + "v1/developers/guides/monitor-stream-health", + "v1/developers/guides/listen-to-stream-events", + "v1/developers/guides/multistream", + "v1/developers/guides/clip-a-livestream", + "v1/developers/guides/thumbnails-live" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/developers/guides/access-control-webhooks", + "v1/developers/guides/access-control-jwt" + ] + }, + { + "group": "Webhooks", + "icon": "bell", + "pages": [ + "v1/developers/guides/setup-and-listen-to-webhooks" + ] + }, + { + "group": "Transcode API", + "icon": "photo-film", + "pages": [ + "v1/developers/guides/transcode-video-storj", + "v1/developers/guides/transcode-video-w3s" + ] + }, + { + "group": "Viewership Metrics", + "icon": "chart-bar", + "pages": [ + "v1/developers/guides/get-engagement-analytics-via-api", + "v1/developers/guides/get-engagement-analytics-via-grafana", + "v1/developers/guides/get-engagement-analytics-via-timeplus" + ] + }, + { + "group": "Projects", + "icon": "folder-open", + "pages": ["v1/developers/guides/managing-projects"] + }, + { + "group": "Integrations", + "icon": "puzzle-piece", + "pages": [ + "v1/developers/tutorials/decentralized-app-with-fvm", + "v1/developers/tutorials/token-gate-videos-with-lit", + { + "group": "Storage Provider Integration", + "pages": [ + "v1/developers/tutorials/upload-playback-videos-4everland", + "v1/developers/tutorials/upload-playback-videos-on-arweave", + "v1/developers/tutorials/upload-playback-videos-on-ipfs" + ] + } + ] + } + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Delegators", + "icon": "coins", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/delegators/introduction", + "v1/delegators/quick-start", + "v1/delegators/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/delegators/guides/bridge-lpt-to-arbitrum", + "v1/delegators/guides/migrate-stake-to-arbitrum", + "v1/delegators/guides/yield-calculation" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Orchestrators", + "icon": "microchip", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/orchestrators/introduction", + "v1/orchestrators/quick-start", + "v1/orchestrators/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/orchestrators/guides/get-started", + "v1/orchestrators/guides/install-go-livepeer", + "v1/orchestrators/guides/connect-to-arbitrum", + "v1/orchestrators/guides/configure-reward-calling", + "v1/orchestrators/guides/set-session-limits", + "v1/orchestrators/guides/set-pricing", + "v1/orchestrators/guides/benchmark-transcoding", + "v1/orchestrators/guides/assess-capabilities", + "v1/orchestrators/guides/monitor-metrics", + "v1/orchestrators/guides/vote", + "v1/orchestrators/guides/dual-mine", + "v1/orchestrators/guides/o-t-split", + "v1/orchestrators/guides/migrate-to-arbitrum", + "v1/orchestrators/guides/migrate-from-contract-wallet", + "v1/orchestrators/guides/gateway-introspection", + "v1/orchestrators/guides/troubleshoot" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Gateways", + "icon": "torii-gate", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/gateways/introduction", + "v1/gateways/quick-start", + "v1/gateways/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/gateways/guides/gateway-overview", + "v1/gateways/guides/docker-install", + "v1/gateways/guides/linux-install", + "v1/gateways/guides/windows-install", + "v1/gateways/guides/transcoding-options", + "v1/gateways/guides/fund-gateway", + "v1/gateways/guides/publish-content", + "v1/gateways/guides/playback-content" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0\u00A0➚", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + } + ] + } + ] + } + ] + }, + "logo": { + "light": "/logo/light.svg", + "dark": "/logo/dark.svg" + }, + "api": { + "openapi": "openapi.yaml", + "mdx": { + "server": "https://livepeer.studio/api" + } + }, + "appearance": { + "default": "dark" + }, + "search": { + "prompt": "Need help? Ask our AI" + }, + "footer": { + "links": [ + { + "header": "links", + "items": [ + { "label": "custom link here", "href": "https://livepeer.org" }, + { "label": "custom link here", "href": "https://livepeer.org" }, + { "label": "custom link here", "href": "https://livepeer.org" } + ] + } + ], + "socials": { + "website": "https://forum.livepeer.org", + "github": "https://github.com/livepeer", + "twitter": "https://twitter.com/livepeer", + "discord": "https://discord.gg/livepeer", + "linkedin": "https://www.linkedin.com/company/livepeer" + } + }, + "integrations": { + "ga4": { + "measurementId": "G-P1Z15F6NX4" + } + }, + "navbar": { + "links": [ + { + "label": "", + "href": "https://twitter.com/Livepeer", + "icon": "x-twitter" + }, + { + "label": "", + "href": "https://github.com/livepeer", + "icon": "github" + }, + { + "label": "", + "href": "https://discord.gg/livepeer", + "icon": "discord" + } + ] + }, + "errors": { + "404": { + "redirect": false, + "title": "Ruh oh. This page doesn't exist.", + "description": "\"Rick

Sorry About That." + } + }, + "redirects": [ + { + "source": "/v2/pages/07_resources/redirect", + "destination": "/v2/pages/07_resources/resources_hub" + }, + { + "source": "/v2/pages/08_help/redirect", + "destination": "/v2/pages/08_help/README" + }, + { + "source": "/v1/guides/developing/quickstart", + "destination": "/v1/developers/quick-start" + }, + { + "source": "/v1/guides/overview", + "destination": "/v1/developers/guides/overview" + }, + { + "source": "/v1/guides/developing/player", + "destination": "/v1/developers/guides/playback-an-asset" + }, + { + "source": "/v1/guides/developing/create-a-livestream", + "destination": "/v1/developers/guides/create-livestream" + }, + { + "source": "/v1/guides/developing/stream-via-obs", + "destination": "/v1/developers/guides/stream-via-obs" + }, + { + "source": "/v1/developing/stream-via-browser", + "destination": "/v1/developers/guides/livestream-from-browser" + }, + { + "source": "/v1/guides/developing/upload-a-video-asset", + "destination": "/v1/developers/guides/upload-video-asset" + }, + { + "source": "/v1/guides/developing/mint-a-video-nft", + "destination": "/v1/developers/guides/mint-video-nft" + }, + { + "source": "/v1/guides/developing/dstorage-playback", + "destination": "/v1/developers/guides/dstorage-playback" + }, + { + "source": "/v1/developers/guides/dstorage-playback", + "destination": "/v1/developers/guides/upload-video-asset" + }, + { + "source": "/v1/guides/developing/access-control", + "destination": "/v1/developers/guides/access-control-webhooks" + }, + { + "source": "/v1/guides/developing/access-control-vod", + "destination": "/v1/developers/guides/access-control-webhooks" + }, + { + "source": "/v1/guides/developing/encrypted-vod", + "destination": "/v1/developers/guides/encrypted-asset" + }, + { + "source": "/v1/guides/developing/listen-for-webhooks", + "destination": "/v1/developers/guides/setup-and-listen-to-webhooks" + }, + { + "source": "/v1/guides/developing/multistream", + "destination": "/v1/developers/guides/multistream" + }, + { + "source": "/v1/guides/developing/monitor-stream-health", + "destination": "/v1/developers/guides/monitor-stream-health" + }, + { + "source": "/v1/guides/developing/viewer-engagement", + "destination": "/v1/developers/guides/get-engagement-analytics-via-api" + }, + { + "source": "/v1/guides/developing/transcode-video-storj", + "destination": "/v1/developers/guides/transcode-video-storj" + }, + { + "source": "/v1/guides/developing/transcode-video-w3s", + "destination": "/v1/developers/guides/transcode-video-w3s" + }, + { + "source": "/v1/tutorials/developing/optimize-latency", + "destination": "/v1/developers/guides/optimize-latency-of-a-livestream" + }, + { + "source": "/v1/tutorials/developing/analyze-engagement-timeplus", + "destination": "/v1/developers/guides/get-engagement-analytics-via-timeplus" + }, + { + "source": "/v1/tutorials/developing/visualize-engagement-metrics-grafana", + "destination": "/v1/developers/guides/get-engagement-analytics-via-grafana" + }, + { + "source": "/v1/tutorials/developing/token-gate-videos-using-guildxyz", + "destination": "/v1/developers/tutorials/token-gate-videos-with-lit" + }, + { + "source": "/v1/tutorials/developing/token-gate-videos-using-lit", + "destination": "/v1/developers/tutorials/token-gate-videos-with-lit" + }, + { + "source": "/v1/tutorials/developing/build-decentralized-video-app-with-fvm", + "destination": "/v1/developers/tutorials/decentralized-app-with-fvm" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-ipfs-4everland", + "destination": "/v1/developers/tutorials/upload-playback-videos-4everland" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-ipfs", + "destination": "/v1/developers/tutorials/upload-playback-videos-on-ipfs" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-arweave", + "destination": "/v1/developers/tutorials/upload-playback-videos-on-arweave" + }, + { + "source": "/v1/reference/api", + "destination": "/v1/api-reference/overview/introduction" + }, + { + "source": "/v1/reference/deployed-contract-addresses", + "destination": "/v1/references/contract-addresses" + }, + { + "source": "/v1/reference/example-applications", + "destination": "/v1/references/example-applications" + }, + { + "source": "/v1/reference/api-support-matrix", + "destination": "/v1/references/api-support-matrix" + }, + { + "source": "/v1/reference/go-livepeer", + "destination": "/v1/references/go-livepeer/bandwidth-requirements" + }, + { + "source": "/v1/reference/go-livepeer/cli-reference", + "destination": "/v1/references/go-livepeer/cli-reference" + }, + { + "source": "/v1/reference/go-livepeer/gpu-support", + "destination": "/v1/references/go-livepeer/gpu-support" + }, + { + "source": "/v1/reference/go-livepeer/hardware-requirements", + "destination": "/v1/references/go-livepeer/hardware-requirements" + }, + { + "source": "/v1/reference/go-livepeer/bandwidth-requirements", + "destination": "/v1/references/go-livepeer/bandwidth-requirements" + }, + { + "source": "/v1/reference/go-livepeer/prometheus-metrics", + "destination": "/v1/references/go-livepeer/prometheus-metrics" + }, + { + "source": "/v1/guides/delegating/bridge-lpt-to-arbitrum", + "destination": "/v1/delegators/guides/bridge-lpt-to-arbitrum" + }, + { + "source": "/v1/guides/delegating/migrate-stake-to-arbitrum", + "destination": "/v1/delegators/guides/migrate-stake-to-arbitrum" + }, + { + "source": "/v1/delegators/reference/yield-calculation", + "destination": "/v1/delegators/guides/yield-calculation" + }, + { + "source": "/v1/guides/orchestrating/get-started", + "destination": "/v1/orchestrators/guides/get-started" + }, + { + "source": "/v1/guides/orchestrating/install-go-livepeer", + "destination": "/v1/orchestrators/guides/install-go-livepeer" + }, + { + "source": "/v1/guides/orchestrating/connect-to-arbitrum", + "destination": "/v1/orchestrators/guides/connect-to-arbitrum" + }, + { + "source": "/v1/guides/orchestrating/configure-reward-calling", + "destination": "/v1/orchestrators/guides/configure-reward-calling" + }, + { + "source": "/v1/guides/orchestrating/set-session-limits", + "destination": "/v1/orchestrators/guides/set-session-limits" + }, + { + "source": "/v1/guides/orchestrating/set-pricing", + "destination": "/v1/orchestrators/guides/set-pricing" + }, + { + "source": "/v1/guides/orchestrating/benchmark-transcoding", + "destination": "/v1/orchestrators/guides/benchmark-transcoding" + }, + { + "source": "/v1/guides/orchestrating/assess-capabilities", + "destination": "/v1/orchestrators/guides/assess-capabilities" + }, + { + "source": "/v1/guides/orchestrating/monitor-metrics", + "destination": "/v1/orchestrators/guides/monitor-metrics" + }, + { + "source": "/v1/guides/orchestrating/vote", + "destination": "/v1/orchestrators/guides/vote" + }, + { + "source": "/v1/guides/orchestrating/dual-mine", + "destination": "/v1/orchestrators/guides/dual-mine" + }, + { + "source": "/v1/guides/orchestrating/o-t-split", + "destination": "/v1/orchestrators/guides/o-t-split" + }, + { + "source": "/v1/guides/orchestrating/migrate-to-arbitrum", + "destination": "/v1/orchestrators/guides/migrate-to-arbitrum" + }, + { + "source": "/v1/guides/orchestrating/migrate-from-contract-wallet", + "destination": "/v1/orchestrators/guides/migrate-from-contract-wallet" + }, + { + "source": "/v1/guides/orchestrating/gateway-introspection", + "destination": "/v1/orchestrators/guides/gateway-introspection" + }, + { + "source": "/v1/guides/orchestrating/troubleshoot", + "destination": "/v1/orchestrators/guides/troubleshoot" + }, + { + "source": "/v1/reference/react", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/getting-started", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/client", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/LivepeerConfig", + "destination": "/v1/sdks/react/migration/3.x/LivepeerConfig" + }, + { + "source": "/v1/reference/react/Player", + "destination": "/v1/react/player/Root" + }, + { + "source": "/v1/reference/react/Broadcast", + "destination": "/v1/react/broadcast/Root" + }, + { + "source": "/v1/reference/react/providers/studio", + "destination": "/v1/sdks/react/migration/3.x/providers/studio" + }, + { + "source": "/v1/reference/react/asset/useAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useAsset" + }, + { + "source": "/v1/reference/react/asset/useCreateAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useCreateAsset" + }, + { + "source": "/v1/reference/react/asset/useAssetMetrics", + "destination": "/v1/sdks/react/migration/3.x/asset/useAssetMetrics" + }, + { + "source": "/v1/reference/react/asset/useUpdateAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useUpdateAsset" + }, + { + "source": "/v1/reference/react/stream/useStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useStream" + }, + { + "source": "/v1/reference/react/stream/useStreamSession", + "destination": "/v1/sdks/react/migration/3.x/stream/useStreamSession" + }, + { + "source": "/v1/reference/react/stream/useStreamSessions", + "destination": "/v1/sdks/react/migration/3.x/stream/useStreamSessions" + }, + { + "source": "/v1/reference/react/stream/useCreateStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useCreateStream" + }, + { + "source": "/v1/reference/react/stream/useUpdateStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useUpdateStream" + }, + { + "source": "/v1/reference/react/playback/usePlaybackInfo", + "destination": "/v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + }, + { + "source": "/v1/reference/react/constants/abis", + "destination": "/v1/sdks/react/migration/3.x/constants/abis" + }, + { + "source": "/v1/reference/react/constants/contract-addresses", + "destination": "/v1/sdks/react/migration/3.x/constants/contract-addresses" + } + ] +} diff --git a/llms.txt.information.md b/llms.txt.information.md new file mode 100644 index 00000000..21254206 --- /dev/null +++ b/llms.txt.information.md @@ -0,0 +1,30 @@ +This page is deliberately named incorrectly so as not to override the default +llms.txt file. + +https://www.mintlify.com/docs/ai/llmstxt + +An llms.txt file is a plain Markdown file that contains: Site title as an H1 +heading. Structured content sections with links and a description of each page +in your documentation. + +**Ensure all pages have a description for LLMs.txt to be useful.** + +Each page’s description comes from the description field in its frontmatter. +Pages without a description field appear in the llms.txt file without a +description. + +Example + +``` +# Site title + +## Docs + +- [API](https://example.com/docs/api): Endpoint list and usage +- [Install](https://example.com/docs/install): Setup steps +- [Getting started](https://example.com/docs/start): Intro guide +``` + +This structured approach allows LLMs to efficiently process your documentation +at a high level and locate relevant content for user queries, improving the +accuracy and speed of AI-assisted documentation searches. diff --git a/mint.json b/mintOld.json similarity index 97% rename from mint.json rename to mintOld.json index b70dc247..ab684bf8 100644 --- a/mint.json +++ b/mintOld.json @@ -336,12 +336,7 @@ "url": "https://discord.gg/livepeer" } ], - "versions": [ - "Developers", - "Delegators", - "Orchestrators", - "Gateways" - ], + "versions": ["Developers", "Delegators", "Orchestrators", "Gateways"], "topbarCtaButton": { "name": "Dashboard", "url": "https://livepeer.studio" @@ -430,9 +425,7 @@ { "group": "Webhooks", "icon": "bell", - "pages": [ - "developers/guides/setup-and-listen-to-webhooks" - ] + "pages": ["developers/guides/setup-and-listen-to-webhooks"] }, { "group": "Transcode API", @@ -454,9 +447,7 @@ { "group": "Projects", "icon": "folder-open", - "pages": [ - "developers/guides/managing-projects" - ] + "pages": ["developers/guides/managing-projects"] }, { "group": "Integrations", @@ -582,9 +573,7 @@ "group": "How to Contribute", "icon": "heart", "iconType": "solid", - "pages": [ - "ai/contributors/coming-soon" - ] + "pages": ["ai/contributors/coming-soon"] }, { "group": "SDKs", @@ -645,17 +634,11 @@ }, { "group": "Overview", - "pages": [ - "sdks/introduction" - ] + "pages": ["sdks/introduction"] }, { "group": "Server-side SDKs", - "pages": [ - "sdks/javascript", - "sdks/go", - "sdks/python" - ] + "pages": ["sdks/javascript", "sdks/go", "sdks/python"] }, { "group": "React Components", @@ -734,10 +717,7 @@ { "group": "Examples", "icon": "clipboard", - "pages": [ - "sdks/react/Player", - "sdks/react/Broadcast" - ] + "pages": ["sdks/react/Player", "sdks/react/Broadcast"] }, { "group": "Migration", @@ -773,9 +753,7 @@ }, { "group": "Playback", - "pages": [ - "sdks/react/migration/3.x/playback/usePlaybackInfo" - ] + "pages": ["sdks/react/migration/3.x/playback/usePlaybackInfo"] }, { "group": "Constants", diff --git a/openapi.yaml b/openapi.yaml index d1674c23..220a7f78 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -84,7 +84,10 @@ components: quality: type: integer description: > - Restricts the size of the output video using the constant quality feature. Increasing this value will result in a lower quality video. Note that this parameter might not work if the transcoder lacks support for it. + Restricts the size of the output video using the constant quality + feature. Increasing this value will result in a lower quality video. + Note that this parameter might not work if the transcoder lacks + support for it. minimum: 0 maximum: 44 @@ -131,7 +134,10 @@ components: quality: type: integer description: > - Restricts the size of the output video using the constant quality feature. Increasing this value will result in a lower quality video. Note that this parameter might not work if the transcoder lacks support for it. + Restricts the size of the output video using the constant quality + feature. Increasing this value will result in a lower quality video. + Note that this parameter might not work if the transcoder lacks + support for it. minimum: 0 maximum: 44 @@ -193,7 +199,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 events: type: array @@ -251,7 +258,8 @@ components: type: number readOnly: true example: 1587667174725 - description: Timestamp (in milliseconds) at which the webhook last failed + description: + Timestamp (in milliseconds) at which the webhook last failed error: readOnly: true type: string @@ -354,17 +362,24 @@ components: playbackId: type: string description: >- - The playback ID of the stream or stream recording to clip. Asset playback IDs are not supported yet. + The playback ID of the stream or stream recording to clip. Asset + playback IDs are not supported yet. example: eaw4nk06ts2d0mzb startTime: type: number description: >- - The start timestamp of the clip in Unix milliseconds. _See the ClipTrigger in the UI Kit for an example of how this is calculated (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses the latency from server to client at stream startup)._ + The start timestamp of the clip in Unix milliseconds. _See the + ClipTrigger in the UI Kit for an example of how this is calculated + (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses + the latency from server to client at stream startup)._ example: 1587667174725 endTime: type: number description: >- - The end timestamp of the clip in Unix milliseconds. _See the ClipTrigger in the UI Kit for an example of how this is calculated (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses the latency from server to client at stream startup)._ + The end timestamp of the clip in Unix milliseconds. _See the + ClipTrigger in the UI Kit for an example of how this is calculated + (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses + the latency from server to client at stream startup)._ example: 1587667174725 name: type: string @@ -373,7 +388,9 @@ components: sessionId: type: string description: >- - The optional session ID of the stream to clip. This can be used to clip _recordings_ - if it is not specified, it will clip the ongoing livestream. + The optional session ID of the stream to clip. This can be used to + clip _recordings_ - if it is not specified, it will clip the ongoing + livestream. example: de7818e7-610a-4057-8f6f-b785dc1e6f88 target: type: object @@ -432,7 +449,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 stream: type: object @@ -512,7 +530,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 parentId: type: string @@ -585,7 +604,9 @@ components: playbackId: type: string example: eaw4nk06ts2d0mzb - description: The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + description: + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. playbackPolicy: $ref: "#/components/schemas/playback-policy" profiles: @@ -664,7 +685,8 @@ components: - number - "null" example: 1713281212993 - description: Timestamp (in milliseconds) when the stream was last terminated + description: + Timestamp (in milliseconds) when the stream was last terminated userId: type: string readOnly: true @@ -789,7 +811,9 @@ components: - type: "array" items: type: "string" - description: "A string array of human-readable errors describing issues affecting the stream, if any." + description: + "A string array of human-readable errors describing issues affecting + the stream, if any." tracks: type: object description: | @@ -806,7 +830,8 @@ components: description: "The bitrate of the track, in kilobits per second." keys: type: object - description: An object containing additional track-specific metrics. + description: + An object containing additional track-specific metrics. additionalProperties: type: number fpks: @@ -890,7 +915,8 @@ components: createdAt: readOnly: true type: number - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 parentId: type: string @@ -902,14 +928,16 @@ components: example: aac12556-4d65-4d34-9fb6-d1f0985eb0a9 record: description: > - Whether the stream should be recorded. Uses default settings. For more customization, create and configure an object store. + Whether the stream should be recorded. Uses default settings. For + more customization, create and configure an object store. type: boolean example: false recordingStatus: readOnly: true type: string - description: The status of the recording process of this stream session. + description: + The status of the recording process of this stream session. enum: - waiting - ready @@ -923,12 +951,14 @@ components: mp4Url: type: string readOnly: true - description: The URL for the stream session recording packaged in an MP4. + description: + The URL for the stream session recording packaged in an MP4. playbackId: type: string example: eaw4nk06ts2d0mzb description: >- - The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. profiles: $ref: "#/components/schemas/stream/properties/profiles" recordingSpec: @@ -964,7 +994,8 @@ components: url: type: string writeOnly: true - description: Livepeer-compatible multistream target URL (RTMP(S) or SRT) + description: + Livepeer-compatible multistream target URL (RTMP(S) or SRT) example: "rtmps://live.my-service.tv/channel/secretKey" format: uri pattern: "^(srt|rtmps?)://" @@ -1003,7 +1034,8 @@ components: type: string example: eaw4nk06ts2d0mzb description: >- - The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. userId: type: string readOnly: true @@ -1019,13 +1051,18 @@ components: example: >- https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8 description: >- - URL for HLS playback. **It is recommended to not use this URL**, and instead use playback IDs with the Playback Info endpoint to retrieve the playback URLs - this URL format is subject to change (e.g. https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8). + URL for HLS playback. **It is recommended to not use this URL**, and + instead use playback IDs with the Playback Info endpoint to retrieve + the playback URLs - this URL format is subject to change (e.g. + https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8). downloadUrl: readOnly: true type: string example: "https://livepeercdn.com/asset/eaw4nk06ts2d0mzb/video/download.mp4" description: >- - The URL to directly download the asset, e.g. `https://livepeercdn.com/asset/eawrrk06ts2d0mzb/video`. It is not recommended to use this for playback. + The URL to directly download the asset, e.g. + `https://livepeercdn.com/asset/eawrrk06ts2d0mzb/video`. It is not + recommended to use this for playback. playbackPolicy: $ref: "#/components/schemas/playback-policy" source: @@ -1045,7 +1082,8 @@ components: gatewayUrl: type: string description: >- - Gateway URL from asset if parsed from provided URL on upload. + Gateway URL from asset if parsed from provided URL on + upload. encryption: $ref: "#/components/schemas/new-asset-payload/properties/encryption" - additionalProperties: false @@ -1059,7 +1097,8 @@ components: - recording sessionId: type: string - description: ID of the session from which this asset was created + description: + ID of the session from which this asset was created - additionalProperties: false required: - type @@ -1073,20 +1112,25 @@ components: $ref: "#/components/schemas/new-asset-payload/properties/encryption" sourceId: type: string - description: ID of the asset or stream from which this asset was created. + description: + ID of the asset or stream from which this asset was created. sessionId: type: string - description: ID of the session from which this asset was created. + description: + ID of the session from which this asset was created. playbackId: type: string description: >- - Playback ID of the asset or stream from which this asset was created. + Playback ID of the asset or stream from which this asset was + created. requesterId: type: string - description: ID of the requester from which this asset was created. + description: + ID of the requester from which this asset was created. assetId: type: string - description: ID of the asset from which this asset was created. + description: + ID of the asset from which this asset was created. creatorId: $ref: "#/components/schemas/creator-id" profiles: @@ -1162,7 +1206,8 @@ components: - deleted updatedAt: type: number - description: Timestamp (in milliseconds) at which the asset was last updated + description: + Timestamp (in milliseconds) at which the asset was last updated example: 1587667174725 progress: type: number @@ -1173,7 +1218,8 @@ components: name: type: string description: > - The name of the asset. This is not necessarily the filename - it can be a custom name or title. + The name of the asset. This is not necessarily the filename - it can + be a custom name or title. example: filename.mp4 projectId: @@ -1275,7 +1321,8 @@ components: example: 1080 pixelFormat: type: string - description: Pixel format of the track - only for video tracks + description: + Pixel format of the track - only for video tracks example: yuv420p fps: type: number @@ -1329,7 +1376,8 @@ components: name: type: string description: > - The name of the asset. This is not necessarily the filename - it can be a custom name or title. + The name of the asset. This is not necessarily the filename - it can + be a custom name or title. example: filename.mp4 staticMp4: @@ -1370,7 +1418,8 @@ components: type: string writeOnly: true description: >- - Encryption key used to encrypt the asset. Only writable in the upload asset endpoints and cannot be retrieved back. + Encryption key used to encrypt the asset. Only writable in the + upload asset endpoints and cannot be retrieved back. c2pa: type: boolean description: Decides if the output video should include C2PA signature @@ -1382,7 +1431,8 @@ components: $ref: "#/components/schemas/transcode-profile" targetSegmentSizeSecs: type: number - description: How many seconds the duration of each output segment should be + description: + How many seconds the duration of each output segment should be room-user-payload: type: object required: @@ -1399,7 +1449,8 @@ components: example: true canPublishData: type: boolean - description: Whether a user is allowed to publish data messages to the room + description: + Whether a user is allowed to publish data messages to the room example: true metadata: type: string @@ -1421,12 +1472,14 @@ components: joinUrl: type: string description: >- - Joining URL - use this for Livepeer's default meeting app (see the multiparticipant streaming guide for more info). + Joining URL - use this for Livepeer's default meeting app (see the + multiparticipant streaming guide for more info). example: "https://meet.livepeer.chat" token: type: string description: >- - Joining JWT - this can be used if you have a custom meeting app (see the multiparticipant streaming guide for more info). + Joining JWT - this can be used if you have a custom meeting app (see + the multiparticipant streaming guide for more info). example: token get-room-user-response: type: object @@ -1470,12 +1523,14 @@ components: canPublish: type: boolean description: >- - Whether a user is allowed to publish audio/video tracks (i.e. their microphone and webcam) + Whether a user is allowed to publish audio/video tracks (i.e. their + microphone and webcam) example: true default: true canPublishData: type: boolean - description: Whether a user is allowed to publish data messages to the room + description: + Whether a user is allowed to publish data messages to the room example: true default: true metadata: @@ -1566,7 +1621,10 @@ components: format: uri pattern: "^http(s)?://" description: >- - Service endpoint URL (AWS S3 endpoint list: https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP S3 endpoint: https://storage.googleapis.com, Storj: https://gateway.storjshare.io) + Service endpoint URL (AWS S3 endpoint list: + https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP + S3 endpoint: https://storage.googleapis.com, Storj: + https://gateway.storjshare.io) example: "https://gateway.storjshare.io" bucket: type: string @@ -1613,7 +1671,10 @@ components: format: uri pattern: "^http(s)?://" description: >- - Service endpoint URL (AWS S3 endpoint list: https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP S3 endpoint: https://storage.googleapis.com, Storj: https://gateway.storjshare.io) + Service endpoint URL (AWS S3 endpoint list: + https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP + S3 endpoint: https://storage.googleapis.com, Storj: + https://gateway.storjshare.io) example: "https://gateway.storjshare.io" bucket: type: string @@ -1705,7 +1766,8 @@ components: $ref: "#/components/schemas/transcode-profile" targetSegmentSizeSecs: type: number - description: How many seconds the duration of each output segment should be + description: + How many seconds the duration of each output segment should be creatorId: $ref: "#/components/schemas/input-creator-id" c2pa: @@ -1776,7 +1838,8 @@ components: $ref: "#/components/schemas/new-asset-payload/properties/encryption" c2pa: type: boolean - description: Decides if the output video should include C2PA signature + description: + Decides if the output video should include C2PA signature example: true profiles: type: array @@ -1785,7 +1848,8 @@ components: targetSegmentSizeSecs: type: number description: >- - How many seconds the duration of each output segment should be + How many seconds the duration of each output segment should + be example: 6 export: $ref: "#/components/schemas/export-task-params" @@ -1875,7 +1939,8 @@ components: $ref: "#/components/schemas/input-creator-id" c2pa: type: boolean - description: Decides if the output video should include C2PA signature + description: + Decides if the output video should include C2PA signature example: false clip: properties: @@ -1887,7 +1952,10 @@ components: clipStrategy: type: object description: >- - Strategy to use for clipping the asset. If not specified, the default strategy that Catalyst is configured for will be used. This field only available for admin users, and is only used for E2E testing. + Strategy to use for clipping the asset. If not specified, + the default strategy that Catalyst is configured for will be + used. This field only available for admin users, and is only + used for E2E testing. additionalProperties: false properties: startTime: @@ -1899,7 +1967,10 @@ components: catalystPipelineStrategy: type: string description: >- - Force to use a specific strategy in the Catalyst pipeline. If not specified, the default strategy that Catalyst is configured for will be used. This field only available for admin users, and is only used for E2E testing. + Force to use a specific strategy in the Catalyst pipeline. + If not specified, the default strategy that Catalyst is + configured for will be used. This field only available for + admin users, and is only used for E2E testing. enum: - catalyst - catalyst_ffmpeg @@ -2000,16 +2071,19 @@ components: videoFileGatewayUrl: readOnly: true type: string - description: URL to access file via HTTP through an IPFS gateway + description: + URL to access file via HTTP through an IPFS gateway example: "https://gateway.ipfs.io/ipfs/Qmabc123xyz341" nftMetadataCid: type: string - description: IPFS CID of the default metadata exported for the video + description: + IPFS CID of the default metadata exported for the video example: Qmabc123xyz341 nftMetadataUrl: readOnly: true type: string - description: URL for the metadata file with the IPFS protocol + description: + URL for the metadata file with the IPFS protocol example: "ipfs://Qmabc123xyz341" nftMetadataGatewayUrl: readOnly: true @@ -2049,7 +2123,8 @@ components: type: string - type: string description: >- - Helper syntax to specify an unverified creator ID, fully managed by the developer. + Helper syntax to specify an unverified creator ID, fully managed by + the developer. creator-id: oneOf: - type: object @@ -2065,7 +2140,8 @@ components: example: "unverified" value: type: string - description: Developer-managed ID of the user who created the resource. + description: + Developer-managed ID of the user who created the resource. example: "user123" export-task-params: description: Parameters for the export task @@ -2128,12 +2204,14 @@ components: createdAt: readOnly: true type: number - description: Timestamp (in milliseconds) at which the signing-key was created + description: + Timestamp (in milliseconds) at which the signing-key was created example: 1587667174725 lastSeen: readOnly: true type: number - description: Timestamp (in milliseconds) at which the signing-key was last used + description: + Timestamp (in milliseconds) at which the signing-key was last used example: 1587667174725 publicKey: type: string @@ -2201,15 +2279,18 @@ components: example: 1234 createdAt: type: number - description: Timestamp (in milliseconds) at which user object was created + description: + Timestamp (in milliseconds) at which user object was created example: 1587667174725 verifiedAt: type: number - description: Timestamp (in milliseconds) at which user object was verified + description: + Timestamp (in milliseconds) at which user object was verified example: 1587667174725 planChangedAt: type: number - description: Timestamp (in milliseconds) at which user object was verified + description: + Timestamp (in milliseconds) at which user object was verified example: 1587667174725 lastStreamedAt: type: number @@ -2219,7 +2300,8 @@ components: example: 1587667174725 lastSeen: type: number - description: Timestamp (in milliseconds) at which user's password was used + description: + Timestamp (in milliseconds) at which user's password was used example: 1587667174725 usage: type: object @@ -2252,7 +2334,8 @@ components: type: - object - "null" - description: Whether the playback policy for an asset or stream is public or signed + description: + Whether the playback policy for an asset or stream is public or signed additionalProperties: false required: - type @@ -2272,7 +2355,7 @@ components: type: object description: User-defined webhook context additionalProperties: true - example: {"streamerId": "my-custom-id"} + example: { "streamerId": "my-custom-id" } refreshInterval: type: number description: | @@ -2281,7 +2364,9 @@ components: example: 600 allowedOrigins: type: array - description: List of allowed origins for CORS playback (://:, ://) + description: + List of allowed origins for CORS playback + (://:, ://) items: type: string usage-metric: @@ -2417,7 +2502,8 @@ components: example: America/Los_Angeles geohash: type: string - description: Geographic encoding of the viewers location. Accurate to 3 digits. + description: + Geographic encoding of the viewers location. Accurate to 3 digits. example: 123 viewCount: type: integer @@ -2624,7 +2710,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which the object was created + description: + Timestamp (in milliseconds) at which the object was created example: 1587667174725 signatureType: type: string @@ -2644,7 +2731,8 @@ components: readOnly: true type: number description: > - Timestamp (in milliseconds) at which IPFS export task was updated + Timestamp (in milliseconds) at which IPFS export task was + updated example: 1587667174725 status: @@ -2685,7 +2773,8 @@ components: apiSecret: type: string writeOnly: true - description: Will be added to the pinata_secret_api_key header. + description: + Will be added to the pinata_secret_api_key header. example: 1234567890abcdef storage-status: readOnly: true @@ -2753,9 +2842,11 @@ components: format: uri pattern: "^(https?|ipfs|ar)://" description: > - URL where the asset contents can be retrieved, e.g. `https://s3.amazonaws.com/my-bucket/path/filename.mp4`. + URL where the asset contents can be retrieved, e.g. + `https://s3.amazonaws.com/my-bucket/path/filename.mp4`. - For an IPFS source, this should be similar to: `ipfs://{CID}`. For an Arweave + For an IPFS source, this should be similar to: `ipfs://{CID}`. For + an Arweave source: `ar://{CID}`. @@ -2796,8 +2887,11 @@ components: type: string title: Return Timestamps description: >- - Return timestamps for the transcribed text. Supported values: 'sentence', 'word', or a string boolean ('true' or 'false'). Default is 'true' ('sentence'). 'false' means no timestamps. 'word' means word-based timestamps. - default: 'true' + Return timestamps for the transcribed text. Supported values: + 'sentence', 'word', or a string boolean ('true' or 'false'). Default + is 'true' ('sentence'). 'false' means no timestamps. 'word' means + word-based timestamps. + default: "true" type: object required: - audio @@ -2823,36 +2917,43 @@ components: type: string title: Loras description: >- - A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. - default: '' + A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}. + default: "" strength: type: number title: Strength - description: Degree of transformation applied to the reference image (0 to 1). + description: + Degree of transformation applied to the reference image (0 to 1). default: 0.8 guidance_scale: type: number title: Guidance Scale description: >- - Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). default: 7.5 image_guidance_scale: type: number title: Image Guidance Scale description: >- - Degree to which the generated image is pushed towards the initial image. + Degree to which the generated image is pushed towards the initial + image. default: 1.5 negative_prompt: type: string title: Negative Prompt description: >- - Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. - default: '' + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -2862,7 +2963,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 100 num_images_per_prompt: type: integer @@ -2906,19 +3008,22 @@ components: type: integer title: Motion Bucket Id description: >- - Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video. + Used for conditioning the amount of motion for the generation. The + higher the number the more motion will be in the video. default: 127 noise_aug_strength: type: number title: Noise Aug Strength description: >- - Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion. + Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. default: 0.02 safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -2928,7 +3033,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 25 type: object required: @@ -2947,7 +3053,7 @@ components: system_msg: type: string title: System Msg - default: '' + default: "" temperature: type: number title: Temperature @@ -2959,7 +3065,7 @@ components: history: type: string title: History - default: '[]' + default: "[]" stream: type: boolean title: Stream @@ -2985,38 +3091,47 @@ components: type: string title: Point Coords description: >- - Nx2 array of point prompts to the model, where each point is in (X,Y) in pixels. + Nx2 array of point prompts to the model, where each point is in + (X,Y) in pixels. point_labels: type: string title: Point Labels description: >- - Labels for the point prompts, where 1 indicates a foreground point and 0 indicates a background point. + Labels for the point prompts, where 1 indicates a foreground point + and 0 indicates a background point. box: type: string title: Box - description: 'A length 4 array given as a box prompt to the model, in XYXY format.' + description: + "A length 4 array given as a box prompt to the model, in XYXY + format." mask_input: type: string title: Mask Input description: >- - A low-resolution mask input to the model, typically from a previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + A low-resolution mask input to the model, typically from a previous + prediction iteration, with the form 1xHxW (H=W=256 for SAM). multimask_output: type: boolean title: Multimask Output description: >- - If true, the model will return three masks for ambiguous input prompts, often producing better masks than a single prediction. + If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. default: true return_logits: type: boolean title: Return Logits description: >- - If true, returns un-thresholded mask logits instead of a binary mask. + If true, returns un-thresholded mask logits instead of a binary + mask. default: true normalize_coords: type: boolean title: Normalize Coords description: >- - If true, the point coordinates will be normalized to the range [0,1], with point_coords expected to be with respect to image dimensions. + If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image + dimensions. default: true type: object required: @@ -3043,7 +3158,8 @@ components: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -3053,7 +3169,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 75 type: object required: @@ -3082,7 +3199,7 @@ components: properties: detail: allOf: - - $ref: '#/components/schemas/APIError' + - $ref: "#/components/schemas/APIError" description: Detailed error information. type: object required: @@ -3093,7 +3210,7 @@ components: properties: detail: items: - $ref: '#/components/schemas/ValidationError' + $ref: "#/components/schemas/ValidationError" type: array title: Detail type: object @@ -3102,7 +3219,7 @@ components: properties: images: items: - $ref: '#/components/schemas/Media' + $ref: "#/components/schemas/Media" type: array title: Images description: The generated images. @@ -3137,7 +3254,8 @@ components: logits: type: string title: Logits - description: 'The raw, unnormalized predictions (logits) for the masks.' + description: + "The raw, unnormalized predictions (logits) for the masks." type: object required: - masks @@ -3165,7 +3283,8 @@ components: - seed - nsfw title: Media - description: A media object containing information about the generated media. + description: + A media object containing information about the generated media. TextResponse: properties: text: @@ -3174,7 +3293,7 @@ components: description: The generated text. chunks: items: - $ref: '#/components/schemas/Chunk' + $ref: "#/components/schemas/Chunk" type: array title: Chunks description: The generated text chunks. @@ -3195,13 +3314,16 @@ components: type: string title: Loras description: >- - A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. - default: '' + A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}. + default: "" prompt: type: string title: Prompt description: >- - Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model. + Text prompt(s) to guide image generation. Separate multiple prompts + with '|' if supported by the model. height: type: integer title: Height @@ -3216,19 +3338,22 @@ components: type: number title: Guidance Scale description: >- - Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). default: 7.5 negative_prompt: type: string title: Negative Prompt description: >- - Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. - default: '' + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -3238,7 +3363,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 50 num_images_per_prompt: type: integer @@ -3275,7 +3401,7 @@ components: properties: images: items: - $ref: '#/components/schemas/Media' + $ref: "#/components/schemas/Media" type: array title: Images description: The generated images. @@ -4800,7 +4926,9 @@ paths: tags: - webhook description: > - To create a new webhook, you need to make an API call with the events you want to listen for and the URL that will be called when those events occur. + To create a new webhook, you need to make an API call with the events + you want to listen for and the URL that will be called when those events + occur. requestBody: required: true @@ -5657,13 +5785,19 @@ paths: url: type: string description: >- - The direct upload endpoint for which supports PUT requests. **It is recommended to use the Tus endpoint for a better upload experience.** + The direct upload endpoint for which supports PUT + requests. **It is recommended to use the Tus endpoint for + a better upload experience.** example: >- https://origin.livepeer.com/api/asset/upload/direct?token=eyJhbGciOiJIUzI1NiJ9.eyJtc2ciOiJoZWxsbyBoYWNrZXIsIHRoZXJlJ3Mgbm90aGluZyBmb3IgeW91IGhlcmUg8J-YhiJ9.1YDjmXsqLcgNyMSzT4kXl_kIni46_EuGX_xfqmC7e0Q tusEndpoint: type: string description: >- - The [Tus-compatible](https://tus.io/) endpoint for resumable uploads. **This is the recommended way to upload assets.** See the [Tus-js](https://github.com/tus/tus-js-client) client for more information. + The [Tus-compatible](https://tus.io/) endpoint for + resumable uploads. **This is the recommended way to upload + assets.** See the + [Tus-js](https://github.com/tus/tus-js-client) client for + more information. example: >- https://origin.livepeer.com/api/asset/upload/tus?token=eyJhbGciOiJIUzI1NiJ9.eyJtc2ciOiJoZWxsbyBoYWNrZXIsIHRoZXJlJ3Mgbm90aGluZyBmb3IgeW91IGhlcmUg8J-YhiJ9.1YDjmXsqLcgNyMSzT4kXl_kIni46_EuGX_xfqmC7e0Q asset: @@ -7020,7 +7154,8 @@ paths: description: > Create a livestream for your room. - This allows you to leverage livestreaming features like recording and HLS output. + This allows you to leverage livestreaming features like recording and + HLS output. responses: default: @@ -7159,9 +7294,11 @@ paths: type: string summary: Create a room user description: > - Call this endpoint to add a user to a room, specifying a display name at a minimum. + Call this endpoint to add a user to a room, specifying a display name at + a minimum. - The response will contain a joining URL for Livepeer's default meeting app. + The response will contain a joining URL for Livepeer's default meeting + app. Alternatively the joining token can be used with a custom app. @@ -7725,7 +7862,9 @@ paths: tags: - metrics description: > - Requires a proof of ownership to be sent in the request, which for now is just the assetId or streamId parameters (1 of those must be in the query-string). + Requires a proof of ownership to be sent in the request, which for now + is just the assetId or streamId parameters (1 of those must be in the + query-string). parameters: - name: from @@ -7894,7 +8033,8 @@ paths: schema: $ref: "#/components/schemas/error" "200": - description: A single Metric object with the viewCount and playtimeMins metrics. + description: + A single Metric object with the viewCount and playtimeMins metrics. content: application/json: schema: @@ -8363,9 +8503,14 @@ paths: tags: - accessControl description: > - The publicKey is a representation of the public key, encoded as base 64 and is passed as a string, and the privateKey is displayed only on creation. This is the only moment where the client can save the private key, otherwise it will be lost. Remember to decode your string when signing JWTs. + The publicKey is a representation of the public key, encoded as base 64 + and is passed as a string, and the privateKey is displayed only on + creation. This is the only moment where the client can save the private + key, otherwise it will be lost. Remember to decode your string when + signing JWTs. - Up to 10 signing keys can be generated, after that you must delete at least one signing key to create a new one. + Up to 10 signing keys can be generated, after that you must delete at + least one signing key to create a new one. responses: default: @@ -9276,7 +9421,8 @@ paths: schema: type: string description: >- - The playback ID from the asset or livestream, e.g. `eaw4nk06ts2d0mzb`. + The playback ID from the asset or livestream, e.g. + `eaw4nk06ts2d0mzb`. responses: default: description: Error @@ -9365,7 +9511,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/TextToImageParams' + $ref: "#/components/schemas/TextToImageParams" required: true responses: default: @@ -9373,46 +9519,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: textToImage x-codeSamples: - lang: typescript @@ -9445,7 +9591,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genImageToImage' + $ref: "#/components/schemas/Body_genImageToImage" required: true responses: default: @@ -9453,46 +9599,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: imageToImage x-codeSamples: - lang: typescript @@ -9527,7 +9673,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genImageToVideo' + $ref: "#/components/schemas/Body_genImageToVideo" required: true responses: default: @@ -9535,46 +9681,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/VideoResponse' + $ref: "#/components/schemas/VideoResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: imageToVideo x-codeSamples: - lang: typescript @@ -9608,7 +9754,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genUpscale' + $ref: "#/components/schemas/Body_genUpscale" required: true responses: default: @@ -9616,46 +9762,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: upscale x-codeSamples: - lang: typescript @@ -9690,7 +9836,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genAudioToText' + $ref: "#/components/schemas/Body_genAudioToText" required: true responses: default: @@ -9698,62 +9844,62 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/TextResponse' + $ref: "#/components/schemas/TextResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '413': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "413": description: Request Entity Too Large content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '415': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "415": description: Unsupported Media Type content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: audioToText x-codeSamples: - lang: typescript @@ -9787,7 +9933,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genSegmentAnything2' + $ref: "#/components/schemas/Body_genSegmentAnything2" required: true responses: default: @@ -9795,46 +9941,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/MasksResponse' + $ref: "#/components/schemas/MasksResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: segmentAnything2 x-codeSamples: - lang: typescript @@ -9868,7 +10014,7 @@ paths: content: application/x-www-form-urlencoded: schema: - $ref: '#/components/schemas/Body_genLLM' + $ref: "#/components/schemas/Body_genLLM" required: true responses: default: @@ -9876,45 +10022,45 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/LLMResponse' - '400': + $ref: "#/components/schemas/LLMResponse" + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: llm x-codeSamples: - lang: typescript diff --git a/snippets/api-base-urls-table.mdx b/snippets/api-base-urls-table.mdx new file mode 100644 index 00000000..34243cb4 --- /dev/null +++ b/snippets/api-base-urls-table.mdx @@ -0,0 +1,27 @@ +{/* + API Base URLs Table Snippet + Usage: Include this in API landing pages for styled base URL tables + Pass props: urls (array of {name, url} objects) +*/} + +export const ApiBaseUrlsTable = ({ urls }) => ( +
+ + + + + + + + + {urls.map((item, index) => ( + + + + + ))} + +
EnvironmentURL
{item.name}{item.url}
+
+); + diff --git a/snippets/automationData/blog/ghostBlogData.jsx b/snippets/automationData/blog/ghostBlogData.jsx new file mode 100644 index 00000000..bcaf70db --- /dev/null +++ b/snippets/automationData/blog/ghostBlogData.jsx @@ -0,0 +1,191 @@ +export const ghostData = [ + { + title: `A Real-time Update to the Livepeer Network Vision`, + href: `https://blog.livepeer.org/a-real-time-update-to-the-livepeer-network-vision/`, + author: `By Livepeer Team`, + content: `

For the past year, the Livepeer Ecosystem has been guided by the Cascade vision:  a path to transition from a pure streaming and transcoding infrastructure, to an infrastructure that could succeed at providing compute for the future of real-time AI video. The latest Livepeer quarterly report from Messari highlights that this transition is paying off, with network fees up 3x from this time last year, and over 72% of the fees now driven via AI inference. This is exemplified by the growing inspirational examples emerging from Daydream powered real-time AI, and real-time Agent avatar generation through Embody and the Agent SPE.

Source: Livepeer Q3 2025 Report by Messari

This shift has been an ecosystem wide effort – ranging from branding and communications, to productization and go to market, to hardware upgrades for orchestrators. It has successfully shifted the project under an updated mission and direction, however it has still left ambiguity in terms of what the Livepeer network itself offers as killer value propositions to new builders outside of the existing ecosystem. Is it a GPU cloud? A transcoding infra? An API engine? Now that there are signs of validation and accelerated momentum around an exciting opportunity, it’s time to really hone in on a refined vision for the future of the Livepeer network as a product itself. 

The market for video is set to massively expand

The concept of live video itself is expanding well beyond a simple single stream of video captured from a camera. Now entire worlds and scenes are generated or enhanced in real-time via AI assistance, leading to more immersive and interactive experiences than possible via old-school streaming alone. For a taste of the future, see the following examples:

  1. The future of gaming will be AI generated video and worlds in real-time:
+
  1. Video streams can be analyzed and data leveraged programmatically in real-time, for instant insight generation and decision making:
+
  1. Real-time style transfer can enable avatars and agents to participate in the global economy:
+

Video world models and real-time AI video are merging, as they both use AI to generate frame-by-frame video output with low latency on the fly, based on user input and AI inference. This requires a tremendous amount of GPU compute, and requires an amazing low latency video streaming and compute stack – two areas in which the Livepeer network and community thrive, and two areas to which the many other generic GPU inference providers in the market bring no unique skillset, experience, or software advantage. 

The big opportunity for the Livepeer network is to be the leading AI Infrastructure For Real-Time Video.
From interactive live streaming to generative world models, Livepeer’s open-access, low-latency network of GPUs will be the best compute solution for cutting edge AI video workflows. 

World models are a game changing category, and Livepeer is well suited to offer a unique and differentiated product here, that serves a huge market of diverse and varying use cases. These range from creative entertainment, to gaming, to robotics, to data analysis, to monitoring and security, to synthetic data generation for AGI itself.

While an ambitious stretch, Nvidia executives responsible for the category have even projected that due to the impact in robotics, the economic opportunity for world models could exceed $100 trillion, or approximately the size of the entire global economic output itself!  

What does it mean to productize the Livepeer network to succeed as a valuable infrastructure in this category?

From a simplified viewpoint, it needs to deliver on the following:

1. Ability for users to deploy real-time AI workflows to the Livepeer network and request inference on them

2. Industry leading latency for providing inference on real-time AI and world model workflows.

3. Cost effective scalability – users can pay as they go to scale up and down capacity and the network automagically delivers the scale required.

Imagine a gaming platform is powering world-model generated games using their unique workflows that generate game levels or areas in a certain style by combining several real-time models, LLMs, and style transfer mechanisms. Each game its powering has users exploring and creating their own corners of the interactive worlds, based on prompts and gameplay inputs. Every gamer that joins a game represents a new stream of AI video compute, and the Livepeer network is the backing infrastructure that provides the compute for this video world generation, leveraging hundreds or thousands of GPUs concurrently.

For this to be possible the Livepeer network needs to enable that game platform to deploy their game generation workflow. It needs to offer low latency on the inference that runs this workflow, relative to the generic GPU compute clouds. The pricing needs to be competitive vs alternative options in the market for this GPU compute. And the network needs to allow this company to scale up and down the number of GPUs that are currently live ready to accept new real-time inference streams based on the number of users currently live on the games it is powering.

All of this is possible on the Livepeer network, and it isn’t far away from where we are now. If we work to build, test, and iterate on the Livepeer network itself towards supporting the latency and scale required for these types of workflows, we’ll be set up to power them.
Now multiply this example gaming company by the high number of diverse industries and verticals that real-time AI and world models will touch. Each category can have one or multiple companies competing to leverage this scalable and cost effective infrastructure for unique go to markets targeting different segments. And they can all be powered by the Livepeer network’s unique value propositions.

Livepeer’s core network is strategically positioned

What are these value propositions that make the Livepeer network differentiated relative to alternative options in the market? I’d argue that there are three primary, table stakes, must-have value propositions if Livepeer is to succeed. 

1. Industry standard low latency infrastructure specializing in real-time AI and world model workflows: First of all, the network needs to let its users deploy custom workflows. Inference alone on base models is not enough and does not represent scaled demand. Users want to take base models, chain them together with other models and pre/post processors, and create unique and specialized capabilities. When one of these capabilities is defined as a workflow, that is the unit that needs to be deployed as a job on the Livepeer network, and the network needs to be able to run inference on it. Secondly, for these real-time interactive use cases, latency matters a lot. Generic GPU clouds don’t offer the specialized low latency video stacks to ingest, process, and serve video with optimal latency, but Livepeer does. And Livepeer needs to benchmark itself to have lower or equal latency to alternative GPU clouds for these particular real-time and world model use cases.

2. Cost effective scalability: GPU provisioning, reservations, and competing for scarce supply procurement creates major challenges for AI companies – often overpaying for GPUs that sit idle most of the time in order to guarantee the capacity that they need. The Livepeer network’s value proposition is that users should be able to “automagically” scale up almost instantly and pay on demand for the compute that they use, rather than having to pre-pay for reservations and let capacity sit idle. This is enabled by Livepeer taking advantage of otherwise existing idle longtail compute through its open marketplace, and its supply side incentives. The Livepeer network needs to be more cost effective than alternative GPU clouds within this category - with impacts comparable to the 10x+ cost reduction already demonstrated in live video transcoding delivered by the network.

3. Community driven, open source, open access: The Livepeer project and software stack is open source. Users can control, update, and contribute to the software they are using. They also can be owners in the infrastructure itself through the Livepeer Token, and can benefit from the network’s improvements and adoption, creating a network effect. The community that cares about its success and pushes it forward collectively, can be a superpower, relative to the uncertain and shaky relationship between builders and centralized platform providers, who have a history of getting rugged based on limitations to access, changes in functionality, or discontinuity of the platforms. Anyone can build on the Livepeer network regardless of location, jurisdiction, use case, or central party control.

The above are primary value propositions that should appeal to nearly all users. And we must work to close the gaps to live up to those value props before we could successfully hope to go to market and attract new vertical-specific companies to build directly on top of the network. Luckily, in addition to all of Livepeer’s streaming users, we have a great realtime AI design partner in Daydream, which is already going to market around creative real-time AI, using the network, and contributing to its development to live up to these requirements. While building with this design partner, the ecosystem should be working to productize to live up to these promises in a more generic perspective – it should be setting up benchmarks, testing frameworks, and building mechanisms for scaling up supply ahead of demand, so that it can represent this power to the world alongside successful Daydream case studies.

Opportunities to push towards this vision

To truly live up to these value propositions, there are a number of opportunities for the community to focus on in order to close some key gaps. There are many details to come in more technical posts laying out roadmaps and execution frameworks, but at a high level, consider a series of milestones that take the network as a product from technically functional, to production usable, to extensible, to infinitely scalable:

  1. Network MVP - Measure what matters: Establish key network performance SLAs, measure latency and performance benchmarks, and enhance the low latency client to support realtime AI workflows above industry grade standards.
  2. Network as a Product - Self adaptability and scalability: Network delivers against these SLAs and core value props for supported realtime AI workflows. Selection algorithms, failovers and redundancy, and competitive market price discovery established for realtime AI.
  3. Extensibility - Toolkit for community to deploy workflows and provision resources: Workflow deployment and signaling, LPT incentive updates to ensure compute supply for popular AI workflows exceeds demand.
  4. Parallel Scalability: Manage clusters of resources on the network for parallel workflow execution, truly unlocking job types beyond single-GPU inference. 

Many teams within the ecosystem, from the Foundation, to Livepeer Inc, to various SPEs have already started operationalizing around how they’ll be contributing to milestones 1 and 2 to upgrade the network to deliver against these key realtime AI value propositions. 

Conclusion and Livepeer’s opportunity

 The market for the opportunity to be the GPU infrastructure that powers real-time AI and world models is absolutely massive – the compute requirements are tremendous - 1000x that of AI text or images - and real-time interaction with media represents a new platform that will affect all of the above-mentioned industries. The Livepeer network can be the infrastructure that powers it. How we plan to close the needed gaps and achieve this will be the subject of an upcoming post. But when we do prove these value propositions, Livepeer will have a clear path to 100x the demand on the network

The likely target market users for the network are those startups that are building out vertical specific businesses on top of real-time AI and world model workflows. The ecosystem should look to enable one (or multiple!) startups in each category going after building real-time AI platforms that serve gaming, that serve robotics, that serve synthetic data generation, that serve monitoring and analysis, and all the additional relevant categories. The network’s value propositions will hopefully speak for themselves, but in the early stages of this journey, it is likely the ecosystem will want to use incentives (like investment or credits) to bootstrap these businesses into existence. Each will represent a chance at success, and will bring more demand and proof.

Ultimately, many users of these platforms may choose to build direct on the network themselves. Similarly to how startups start to build on platforms like Heroku, Netlify, or Vercel, and then as they scale and need more control and cost savings they build direct on AWS, and then ultimately move to their own datacenters after reaching even more scale – users of Daydream or a real-time Agent platform built on Livepeer, may ultimately choose to run their own gateways to recognize the cost savings and control and full feature set that comes from doing so. This is a good thing! As it represents even more usage and scale for the network, more proof that as an infrastructure the Livepeer network has product market fit, and that it can absorb all workflows directly. The businesses built on top will provide their own vertical specific bundles of features and services that onboard that vertical specific capacity, but they’ll be complemented by and enabled by the Livepeer Network’s superpowers.

While there’s a lot of work ahead, the Livepeer community has already stepped up to cover tremendous ground on this mission. At the moment by already powering millions of minutes of real-time AI inference per week, by our orchestrators already upgrading their capacity and procurement mechanisms to provide real-time AI-capable compute, and by the Foundation groups already working to evaluate the networks incentives and cryptoeconomics to sustainably fund and reward those contributing to this effort, we’re set up well to capture this enormous opportunity!

`, + datePosted: `Nov 13, 2025`, + img: `https://blog.livepeer.org/content/images/2025/11/LP_Blog-Header_Nov25_01_moshed-1.png`, + excerpt: `For the past year, the Livepeer Ecosystem has been guided by the Cascade vision:  a path to transition from a pure streaming and transcoding infrastructure, to an infrastructure that could succeed at providing compute for the future of real-time AI video. The latest Livepeer quarterly report from Messari highlights that this transition is paying off, with network fees up 3x from this time last year, and over 72% of the fees now driven via AI inference. This is exemplified by the growing inspirat`, + readingTime: 9, + }, + { + title: `Livepeer Onchain Builders - Streamplace: Building the Video Backbone of Decentralized Social`, + href: `https://blog.livepeer.org/livepeer-onchain-builders-streamplace-building-the-video-backbone-of-decentralized-social/`, + author: `By Livepeer Team`, + content: `

Welcome to Livepeer Onchain Builders, a new content series spotlighting the Special Purpose Entities (SPEs) funded by the Livepeer onchain treasury. SPEs are working groups funded by the community treasury to work on specific tasks and are accountable to the community for their delivery. These deep dives will explore how each initiative is driving protocol usage, expanding infrastructure, and pushing the boundaries of what’s possible in decentralized video and AI.

Streamplace is an open-source video streaming platform designed to power decentralized social applications with real-time, creator-first infrastructure. It aims to make livestreaming and video hosting as seamless as TikTok or YouTube, but built on open protocols and self-sovereign identity.

What makes it ambitious? Streamplace is not only building full-stack video infra for federated social networks, it's doing so in a way that prioritizes interoperability, scalability, and public goods. From developer SDKs to end-user apps, Streamplace is building an entire ecosystem.

What is an SPE? 

A Special Purpose Entity (SPE) is a focused, community-funded team contributing to the Livepeer ecosystem. SPEs are typically mission-driven groups that operate independently to build infrastructure, applications, or tooling that expand and improve the Livepeer protocol. These teams are funded through proposals to the onchain treasury and are accountable to the community.

SPEs are necessary for the ecosystem because no single team can build every part of a decentralized protocol. SPEs decentralize development, fund public goods, and allow the community to direct resources where they're most needed.

Why do they matter to delegators and stakeholders? Because SPEs grow in usage. More usage = more fees = more rewards. Delegators benefit when the protocol succeeds, and SPEs are among the most direct ways to make that happen.

From Aquareum to Streamplace

A clear goal drives the team behind Streamplace: to build the foundational video infrastructure for the next generation of decentralized social platforms. These platforms, such as Farcaster and the AT Protocol, promise user-owned identity and interoperability, but have thus far lacked robust support for live and on-demand video.

Streamplace solves this by providing a full-stack, developer-friendly video layer that anyone can plug into. It's a bold attempt to make decentralized video feel as native and easy as its Web2 counterparts.

Streamplace started as Aquareum, a project with the same mission and team. This evolution into Streamplace is a rebranding, not a restart, building on past momentum with a sharper focus.

Their vision is to give every user the ability to publish, stream, and remix content with the same ease as TikTok or YouTube, but backed by self-sovereign identity and decentralized networks.

Streamplace homepage

The first proposal delivered:

  • A unified Aquareum node: bundling the Livepeer stack with indexing and playback.
  • App releases on iOS, Android, and Web.
  • Native integrations with AT Protocol and Farcaster.
  • Support for C2PA metadata and content provenance.

Now, Streamplace continues that momentum with 100,000 LPT in treasury funding and a clear mandate to scale.

Streamplace Graphana dashboard

Why Streamplace Matters

Video is the heart of online social interaction. Yet decentralized social networks have lagged in providing seamless, user-friendly video experiences. Streamplace addresses this by:

  • Transcoding every livestream through Livepeer, providing decentralized, low-cost processing for global delivery.
  • Powering partner platforms like Skylight Social, a TikTok alternative backed by Mark Cuban, that recently hit #1 in entertainment on the App Store.
  • Making it dead-simple to stream or host video through single-binary nodes that anyone can deploy.
  • Championing public goods, 100% of their code is open source, with a commitment to infrastructure, not monetization lock-in.

Decentralized social, spanning protocols like Farcaster, AT Protocol, and Bluesky, represents a movement toward user-owned networks and open standards. These networks are gaining traction, but video remains a missing layer. That’s where Streamplace comes in.

Video is essential because it's the most engaging, expressive medium for creators and communities. And as these decentralized platforms scale, having real-time, composable video becomes non-negotiable.

Streamplace positions itself as the default video infra layer for this new social stack, and with every stream transcoded through Livepeer, it's also a major driver of protocol usage and visibility.

What Streamplace 2.0 Will Deliver

This new phase of work, funded by the Livepeer treasury, focuses on scale, performance, and ecosystem integration:

Infrastructure Enhancements

  • Expand server capacity to support growing user bases like Skylight.
  • Harden video nodes for reliability under real-world load.
  • Deliver high-quality performance on all platforms: Web, iOS, Android.

Protocol and Developer Growth

  • Deepen native integration with AT Protocol.
  • Build SDKs and NPM packages to embed Streamplace easily into other apps.
  • Ship VOD functionality and new moderation tools.

Community-First Ethos

  • Launch creator monetization models and stream incentive programs.
  • Empower streamers with self-hosted app capabilities ("Twitch, but it's your own app").
  • Maintain full transparency and livestream development.

The Livepeer Angle

Livepeer's decentralized video infrastructure powers every second of video on Streamplace. That means more work for orchestrators, more fees flowing through the protocol, and more incentive for high-quality node operation.

Streamplace strengthens the Livepeer ecosystem in three key ways:

  • Demand generation: Real-world usage at scale means more consistent transcoding work.
  • Protocol visibility: High-impact apps like Skylight drive awareness of Livepeer beyond its native circles.
  • Infrastructure robustness: Streamplace's nodes enhance the distributed capacity of the Livepeer network.

Without Livepeer, a decentralized video stack like Streamplace wouldn’t be possible. And without ambitious apps like Streamplace, Livepeer wouldn’t have the same opportunity to prove its value at scale.

Final Thoughts

Streamplace is a keystone piece of open video infrastructure and a cornerstone in the emerging world of decentralized social media. By fusing creator-first tooling with Livepeer’s scalable infrastructure, it offers a glimpse into what the open internet can become.

As decentralized protocols shift from vision to adoption, the need for native video is urgent. Streamplace, with the support of the Livepeer treasury and a relentless commitment to open-source infrastructure, is meeting that need head-on.

If you're a developer, creator, or community builder, now is the time to get involved.

Do you want to contribute to Streamplace's success? Explore the open roles here.

Interested in building or contributing to the Livepeer ecosystem? Learn more about current and past SPEs, open opportunities, and how to submit your own proposal here.

Follow along, fork the code, or join a stream — the future of social video is open.

Streamplace App

Streamplace Proposal

Aquareum Proposal


Livepeer is a decentralized video infrastructure network for live and on-demand streaming. It has integrated AI Video Compute capabilities (Livepeer AI) by harnessing its massive GPU network and is not building the future of real-time AI video.

Twitter | Discord | Website

`, + datePosted: `Aug 14, 2025`, + img: `https://blog.livepeer.org/content/images/2025/08/Onchain-Builders-Streamplace.jpg`, + excerpt: `Welcome to Livepeer Onchain Builders, a new content series spotlighting the Special Purpose Entities (SPEs) funded by the Livepeer onchain treasury. SPEs are working groups funded by the community treasury to work on specific tasks and are accountable to the community for their delivery. These deep dives will explore how each initiative is driving protocol usage, expanding infrastructure, and pushing the boundaries of what’s possible in decentralized video and AI. + +Streamplace is an open-source `, + readingTime: 5, + }, + { + title: `Builder Story: dotsimulate x Daydream`, + href: `https://blog.livepeer.org/builder-story-dotsimulate-x-daydream/`, + author: `By Livepeer Team`, + content: `

Building StreamDiffusionTD Operator - a Real-Time Generative Video Operator for TouchDesigner, Powered by the Daydream API

Creator:
Lyell Hintz (@dotsimulate)
Operator: StreamDiffusionTD
Backends Supported: Local + Daydream (Livepeer)

+
+ +
+ +
+
+
+ + + 0:00 +
+ /0:34 +
+ + + + + +
+
+
+ +

Overview

StreamDiffusionTD is a TouchDesigner operator that connects real-time inputs like audio, sensors, and camera feeds to StreamDiffusion, enabling live generative visuals controlled in real time. With the Daydream API, it adds remote inference capabilities on top of the existing local GPU inference and unlocks more flexibility for users.

Built by Lyell Hintz, a technical artist and TouchDesigner developer, the operator is used in live shows, installations, and experimental workflows.

Why It Was Built

Lyell began working on the operator a few hours after StreamDiffusion was released on GitHub. He wanted to use it in TouchDesigner - a powerful tool for real time interactive content creation.

“TouchDesigner is the only place this could be controlled from… it can hook into everything else.”

From the start, he avoided creating a “black box.” The operator exposes core parameters like prompt, seed, and ControlNet weights, allowing users to adjust values and see results immediately.

Key Features

  • Real-time video generation
  • Prompt and seed morphing
  • Dynamic ControlNet weighting
  • Live input support: audio, sensors, camera
  • Local GPU and Daydream backend options
  • Instant visual feedback in TouchDesigner
+
+ +
+ +
+
+
+ + + 0:00 +
+ /0:26 +
+ + + + + +
+
+
+ +

Daydream API Integration

StreamDiffusionTD works with the Daydream API, which allows the operator to run on a remote GPU backend. This eliminates the major barrier of requiring a high-end PC with an NVIDIA RTX 4090 to run StreamDiffusion at professional quality, unlocking the flexibility to run it from any location, on any device form factor.

Just drop in your API key and hit “Start Stream.” The backend handles orchestration, model hosting, and frame delivery, so builders can stay focused on their creative and technical workflows.

Setup takes less than 1 minute and once installed, the configuration is remembered for future use.Daydream’s API brings new features to StreamDiffusion:

  • Multi-controlnet: Mixing different controlnets for better artistic control
  • IPAdapter: Use images as powerful style guides
  • TensorRT: Better frame rate for smooth video output

Daydream is adding support for more real time video generation models, and developers can request features, suggest improvements, or build on top of the API itself. It aligns with the values of open tooling and community-led infrastructure.

How Artists can use StreamDiffusionTD in TouchDesigner

  • Audio-reactive visuals for concerts
  • Camera-driven generative visuals
  • Real-time visuals for LED walls and stages
  • TouchDesigner automation workflows

Because it's built inside TouchDesigner, the operator can be extended using Python, MIDI, OSC, or any other input TouchDesigner supports.

Current State

The operator is live and ready to use, with active development underway for new features and improved performance. It’s a great time to jump in, explore, and help shape what comes next.

Try it Yourself

Operator Access: patreon.com/dotsimulate
Community and Support: discord.gg/daydreamlive
API Keys can be requested here

`, + datePosted: `Aug 5, 2025`, + img: `https://blog.livepeer.org/content/images/2025/08/DD_Builder-Story_dotsimulate_01.png`, + excerpt: `Building StreamDiffusionTD Operator - a Real-Time Generative Video Operator for TouchDesigner, Powered by the Daydream API + +Creator: Lyell Hintz (@dotsimulate) +Operator: StreamDiffusionTD +Backends Supported: Local + Daydream (Livepeer) + + + + + + + + + + + + + + + + + + + + + + + + +0:00 + +/0:34 + + +1× + + + + + + + + + + + + + + + + + +Overview + +StreamDiffusionTD is a TouchDesigner operator that connects real-time inputs like audio, sensors, and camera feeds to StreamDiffusion, enabling live generative visuals controlled in real time. Wit`, + readingTime: 2, + }, + { + title: `Livepeer Incorporated! (and realtime AI)`, + href: `https://blog.livepeer.org/livepeer-incorporated-and-realtime-ai/`, + author: `By Livepeer Team`, + content: `

Written by Doug Petkanics, Co-founder and CEO at Livepeer Inc

The past 18 months have been an energizing time to be in the Livepeer Ecosystem. An onchain treasury was introduced to fund public goods via community governance, the community has coalesced around Livepeer’s opportunity to be the leading infrastructure for realtime AI video, and fees and usage of the network have been steadily increasing due to this focus. The Livepeer Foundation has recently launched to steward the 10+ entities in the ecosystem that are core contributors to the project, and is unlocking even more funding around the opportunities recommended in the project’s strategic pillars.

With so much core development, marketing, and growth driven by the ecosystem at large, the company that I co-founded and operate, Livepeer Incorporated, has had the opportunity to shift its focus to what we deem to be the highest priority area of the project where we feel uniquely suited to make an outsized impact: executing a high conviction go to market motion in an attempt to dramatically grow demand on the Livepeer network. We, like many in the ecosystem, are fully bought in to the realtime AI video vision laid out in Livepeer Cascade, and are solely focused on productization to find product market fit for the Livepeer network as the leading infrastructure in the coming world of live video AI. Here is a bit about what Livepeer Inc is focused on, and almost equally as importantly, what we are not focused on in the coming 12 months.

Product Market Fit for Realtime AI Video 

As mentioned, the number one priority is to prove that the Livepeer network has product market fit as an infrastructure that runs the latest and greatest in realtime AI video workflows for developers. To do this, we’ll focus on three core things:

  1. Contribute to core network development to ensure Livepeer is an infrastructure that can run realtime AI video workflows.
  2. Build the developer APIs to run these workflows that developers use to build them into applications. This is a natural extension of Livepeer Studio
  3. Cultivate the leading realtime AI video community. Researchers, builders, and creators interested in this coming category need a home. They will provide the moat that ensures that an open, community led infrastructure will always be more responsive, cost effective, and full featured than centralized alternatives.

We’re going to provide the full stack product, engineering, community, and go to market motion to validate product market fit for this opportunity. This will drive significant fees and growth into the Livepeer network. We’re aligned as large LPT token holders and want the network to succeed - which represents a far bigger opportunity for Livepeer Inc than any revenue related opportunity via SaaS services in the short term. Let’s grow those network fees!

What Livepeer Inc is Not Focused On

While there are many potential products and go to markets that can be executed upon under an ambitious vision of being the world’s open video infrastructure, a single company is more likely to succeed by focusing on only one opportunity at a time. Many alternative demand generating bets will be better served by other self-motivated actors in the ecosystem - especially as the open source software around Livepeer, and the broader ecosystem has matured to the point of providing reliable access points for different categories of use cases.Regarding Livepeer Inc’s learnings on some of these categories:

  • Transcoding alone has been proven out technically and economically, however the market hasn’t accepted the standalone infrastructure without significant productization, support, SLAs, and enterprise services around it.
  • Similarly, when bundled with end to end streaming, the offering isn’t significantly differentiated in a crowded and consolidating market. 
  • Livepeer Studio will continue to support existing users at the enterprise level that pay for these surrounding services, while passing the transcoding jobs through to the Livepeer network, but due to the long sales cycle and slow growth, it will not be actively competing to grow this source of demand. 
  • The ecosystem can support aspiring users of transcoding and streaming via projects like Streamplace, the Frameworks SPE, and their supporting teams. One of the core pillars of the Livepeer Foundation’s GTM recommendations is to tackle being the open video infrastructure for web3 social and decentralized streaming, so the ecosystem will prioritize support. This includes aspiring web3-centric streaming users, who culturally align with the values of the project community, but to date have not shown significant growth nor driven significant fees to the network. There’s an opportunity for these projects to crack this nut and help these users grow, if they deem it to be worth the effort!
  • There are also additional bets that the ecosystem is interested in around the realtime AI mission. These are laid out by the Livepeer Foundation’s GTM Strategy post. Visual avatars for live AI agents is one example. Realtime video analysis and understanding are others. These areas do overlap with the broad theme that Livepeer Inc is focused on - running realtime AI models on live video on the Livepeer network. However as Inc pursues creative AI use cases initially to inspire the broader world in what’s possible, we welcome others in the ecosystem building commercial entities to go after these opportunities. And we will certainly collaborate. If the ecosystem efforts make technical progress, but stop short of commercializing and going to market, these are areas for collaboration with Inc to consider productizing for commercial purposes. 

A Simplified View: Foundation and Inc

While the above contains a lot of details about realtime AI and specific demand generating bets on the Livepeer network, there’s a simplified view:

  • The Livepeer Foundation will steward the Livepeer community, project marketing, and public goods funding to enable recommendations on the project roadmap.
  • Livepeer Inc will focus on driving demand to the network by building the realtime AI products, go to market services, and AI community - initially in the creative realtime AI video space.

If you’re interested in building within this ecosystem, there are lots of opportunities that both contribute to the core development and operations of the project in service of the realtime AI mission, but also to develop companies that service additional markets not currently being focused on. Hopefully the above post gives you a view into what some of those opportunities and gaps are. Then check out the Livepeer Foundation’s recent forum posts on tactical recommendations, and raise your hand to get involved in the ones of interest.

`, + datePosted: `Jul 31, 2025`, + img: `https://blog.livepeer.org/content/images/2025/07/e.png`, + excerpt: `Written by Doug Petkanics, Co-founder and CEO at Livepeer Inc + +The past 18 months have been an energizing time to be in the Livepeer Ecosystem. An onchain treasury was introduced to fund public goods via community governance, the community has coalesced around Livepeer’s opportunity to be the leading infrastructure for realtime AI video, and fees and usage of the network have been steadily increasing due to this focus. The Livepeer Foundation has recently launched to steward the 10+ entities in `, + readingTime: 5, + }, +] diff --git a/snippets/automationData/forum/Hero_Livepeer_Forum.png b/snippets/automationData/forum/Hero_Livepeer_Forum.png new file mode 100644 index 00000000..7805c1bd Binary files /dev/null and b/snippets/automationData/forum/Hero_Livepeer_Forum.png differ diff --git a/snippets/automationData/forum/forumData.jsx b/snippets/automationData/forum/forumData.jsx new file mode 100644 index 00000000..c25d3756 --- /dev/null +++ b/snippets/automationData/forum/forumData.jsx @@ -0,0 +1,38 @@ +export const forumData = [ + { + title: "It's time to ACT! Accumulation & the Treasury Ceiling", + href: 'https://forum.livepeer.org/t/3153', + author: 'By b3nnn (@b3nnn)', + content: + '

The onchain treasury was designed to provide sustainable public goods funding. It has supported many important and strategic contributions to the Livepeer Ecosystem. The AI SPE, Streamplace, Agent SPE and Cloud have all received funds and made important contributions. And through our onchain governance, the community have shown time and again their thoughtfulness and care for getting decisions right. Your desire to align decisions with long-term health has made us a shining example of simple but effective governance and how people can working together onchain.

The treasury is key to supporting strategic investments to improve UX for stakeholders, effectively manage protocol security, and fund other capital and resource needs for this exciting phase of the project.

As of now, the onchain treasury is currently not accumulating LPT. It was designed not to accept unlimited funding, hit the initial value set as the ceiling, and reset treasury contributions to 0% on or around 31st of March this year. There are a backlog of upcoming projects on highly strategic initiatives that will need treasury support, and we will all feel better about how to allocate funds if we have certainty that new funds are coming into the treasury.

I intend to post a LIP to turn on the treasury rewards again at their initial values:

  • treasuryRewardCutRate: 10%

  • treasuryBalanceCeiling: 750000 LPT

The rate of 750000 LPT is currently set as the ceiling so would not be updated in the formal proposal

For what it’s worth, my personal bias is to increase one of these values, but I’m happy to punt that discussion to another day. Having seen the exciting things in the background that will require treasury support in coming weeks, the most pressing item for us as a community is to start getting the treasury repopulated.

I’ll be on the watercooler next week to discuss and am happy to set up an office hours to discuss direct if there is support for that. I look forward to proposing this for a community vote . If you have any input on the contribution percentage that goes into my proposal, please also share your input here.

', + replyCount: 7, + datePosted: 'Dec 3, 2025', + }, + { + title: 'Pre-proposal: IDOL - Improving Dex / Onchain Liquidity', + href: 'https://forum.livepeer.org/t/3151', + author: 'By b3nnn (@b3nnn)', + content: + '
TLDR

We propose to address known UX issues and ease and costs to participate by increasing DEX liquidity. Arrakis offers an optimal solution for our specific needs, and we are requesting 250,000 LPT for deployment to a Uniswap v4 pool which will significantly reduce slippage for ecosystem participants

Motivation

The Capital Markets Advisory board made improving onchain liquidity a tactical recommendation, specifically sighting:

  • Low liquidity levels on our DEX pools (primarily Uniswap on Arbitrum). This creates high slippage when trying to transact with any size, and might refrain larger stakeholders or participants from buying LPT

  • The much higher ratio of available liquidity on centralized exchanges compared to DEXs drives participants to rely on centralized platforms, exposing them to the inherent risks associated with centralized providers

  • Further, centralised exchanges often don’t support L2 withdrawals. This results in delayed bridging and withdrawal processing between L1 & L2, impairing overall UX and the efficiency of orchestrators as it relates to capital allocation

In short, improved L2 Dex liquidity is essential for both current and future participants in Livepeer.

Recommended Solution

How to address our challenges is relatively straightforward to describe:

  • Increase the amount of liquidity on targeted DEX pool/s

  • Ensure the solution is executing against this goal as agreed

  • Use funds wisely, ensuring a good balance between what we pay and what we receive

Any solution will require liquidity from the on-chain treasury to start bootstrapping an optimal asset mix. In addition to this liquidity requirement, using a traditional market maker is likely a major expense (in the range of $15-20K per month). While traditional market makers can do a good job in actively managing liquidity, especially on centralised exchanges, they often present new or additional challenges:

  • Market makers typically operate through asset loan agreements, using our capital to actively manage liquidity across venues. While this model provides flexibility and professional management, it can make visibility into how and where assets are deployed more challenging.

  • Compared to centralized venues, on-chain liquidity provision is often less economically attractive for market makers. As a result, they may prioritize other strategies or venues where returns are higher, which can limit incentives to deepen on-chain liquidity.

  • Ensuring that capital is being used effectively by traditional market makers remains challenging, as it requires clear visibility into capital deployment and a deep understanding of the alternative strategies they pursue.

While none of this is insurmountable, it requires significant thought, effort and time to ensure oversight and manage risk.

Arrakis pro is an ideal solution to addresses these challenges.

Arrakis specifically addresses each of these challenges because:

  • It is built specifically for managing onchain liquidity on DEXs

  • The assets are stored in a vault controlled by a multisig made up of Livpeer Foundation members. This means the treasury, via the Foundation, can withdraw and return the liquidity at any time

  • Because it is onchain, and through the features provided in Arrakis pro, we can check and confirm at any time where our assets are and what strategies are being applied.

  • It rebalances positions by setting up ranges / limit orders, no swaps involved. The solution algorithmically minimises price impact given the allocated capital and bootstraps base asset liquidity without causing negative selling pressure.

  • Arrakis leverages sophisticated algorithms to increase capital efficiency for the deployed capital and reduce slippage for traders on the DEX pools.

Arrakis vaults hold ~$170M TVL and the team actively manages the on-chain liquidity for over 100 protocols. Projects such as MakerDAO, Lido, Morpho, Gelato, Redstone, Wormhole, Across, Euler, Usual, Syrup, Venice.ai, Ether.fi, etc. are benefiting from the high capital efficiency and cost effectiveness for DEX liquidity optimization enabled by Arrakis PRO.

For more information regarding Arrakis and Arrakis Pro, feel free to have a look at their docs or join their community:

Arrakis | Twitter | Resources

In addition, the team are present here and will address any questions directly - hello @Arrakis

The Ask

We want to significantly decrease slippage and costs for orchestrators and other participants to interact with the network through onchain liquidity.

We are asking for 250,000 LPT (approx. $1M in USD value) to be held in a multisig controlled by the Livepeer Foundation, to be deployed via an onchain vault with Arrakis as a concentrated pool on Uniswap v4.

Management of concentrated liquidity on Uniswap V4 allows for larger trades with minimal price impact, improving the overall trading experience. Savings to participants are substantial at approx. $1500 in slippage reduction on a $25,000 sale of LPT (estimate based on data below).

Comparison of current and estimated price impact (after successful ETH liquidity bootstrapping) for buying LPT and ETH across different amounts

Specification for Livepeer
  1. The Arrakis team uses the existing LPT/ETH pool on the 0.3% fee tier for UniswapV4

  2. Arrakis then deploys a dedicated vault managed by the Arrakis Pro smart contract for this LPT/ETH Uniswap pool.

  3. The Livepeer Foundation team establish a ⅔ Multisig for custody of the funds. If the proposal passes, funds are transferred onchain to this multisig account

  4. Through this Livepeer Foundation multisig, we deposit $1 million worth of $LPT into the Arrakis Pro vault. Transfers in and out of the vault are controlled by the multisig, meaning they cannot be deployed or moved by Arrakis elsewhere

  5. Arrakis Pro will allocate the provided liquidity in a concentrated and fully active market making strategy to facilitate trading on UniswapV4.

  6. The strategy initially operates to bootstrap ETH to establish a 50/50 inventory ratio over the first months. The primary objective is to create price stability by generating deep liquidity and reaching an even inventory over time.

For the services provided, Arrakis charges the following fees:

Arrakis Asset-under-Management (AUM) fee: 1% per year, waived for the first 6 months

Arrakis performance fee: 50% of trading fees the vault generates

FAQ

What are the risks of this model?

  • Deploying funds to DEX pools bears smart contract risk and general market risk (e.g. token exposure, impermanent loss). Arrakis smart contracts have been audited by leading security firms and currently secure +$150M TVL (https://docs.arrakis.finance/text/resources/audits.html)

What happens to the capital required?

  • The capital required is deployed by the Livepeer DAO, via a Foundation controlled multisig, to a self-custodial smart contract vault and can be withdrawn at any point in time. Arrakis does not hold custody, nor control the funds deployed outside of the mandate to manage DEX liquidity on Uniswap V4 for the respective trading pair.

Will this impact the current liquidity on CEXs?

  • Arrakis mandate is to gradually improve on-chain markets and provide deeper liquidity for the respective pair over time on DEX markets. CEX markets will not be affected.

How does the Arrakis model differ from standard AMMs (like Uniswap v3)?

  • Arrakis provides a sophisticated on-chain market making service, running dedicated algorithmic market making strategies.

  • Instead of manually deploying funds into the CLAMM pool, Arrakis algorithmically rebalances the position and runs active liquidity management strategies.

Will our liquidity still be actively managed, or will it be passively allocated in a vault?

  • Close to 100% of the liquidity deployed with an Arrakis vault is actively deployed to the Uniswap CLAMM pool and provides liquidity. Small shares of liquidity remain in the vault as token reserves for rebalancing purposes.

How is the strategy for the vault determined — who sets the parameters, and how often are they rebalanced?

  • Arrakis quant team fine tunes the strategies and engages in period review cycles along with 24h-365day monitoring and alerting.

Who controls or can modify the AMM strategy parameters?

  • Arrakis strategies are designed, deployed and maintained by professional quant traders. The Foundation can be involved in discussion in regular intervals as needed to further align on achieving the stated goals.

Will the community have visibility into performance and strategy updates?

  • The Foundation delegates will receive access to a custom real time analytics dashboard and can share periodic updates to the forum for the community.

What happens to the liquidity if the vault underperforms or becomes unbalanced?

  • Liquidity is actively rebalanced towards a 50:50 ratio by placing one sided limit maker orders. In adverse market scenarios strategies will adjust to certain market volatility settings.

How do fees compare to centralized market makers?

  • Centralized market makers work in two models: a) Loan & Option b) Retainer Fix Fee payment. Arrakis works on a profit sharing of trading fees earned (50% captured by the Livepeer DAO, 50% retained by Arrakis for the services provided)

How will LP performance be measured?

  • LP performance will be measured by market depth, price impact, slippage improvement, total volumes facilitated.

What happens after funds are returned?

  • It’s important to note that the liquidity in the vault can remain deployed indefinitely, but also returned to the onchain treasury or control by the voters at any time. As funds will now be held in both ETH and LPT, the community can be involved in discussions about how returned funds are stored or used.

This is a large proportion of the current treasury. What gives?

  • We recognise that this is a large ask relative to the current size and value of the treasury. The size and value of the treasury will be addressed in a separate proposal. As it relates to this proposal, consider that we will reduce slippage costs by approx 2-3X on every dex transaction. The ROI on this proposal will be quite substantial.
', + replyCount: 3, + datePosted: 'Dec 1, 2025', + }, + { + title: 'Transformation SPE Release Notes', + href: 'https://forum.livepeer.org/t/3142', + author: 'By Mehrdad (@Mehrdad)', + content: + '

Release notes are a way to share work being completed by the Transformation SPE and it’s various contributors. Dive in and explore what has been happening and please reach out or reply with any questions and we will happily expand further.

', + replyCount: 2, + datePosted: 'Nov 10, 2025', + }, + { + title: 'Transcoder Campaign: organic-node.eth', + href: 'https://forum.livepeer.org/t/1970', + author: 'By Ron (@ron)', + content: + '

Hello fellow video enthusiast and web3 supporters,

Thanks for your time in reading my post. (organic-node.eth) Node has been active for about 6 months and everyday has been a great learning experience. My node has been highly reliable with 4 Orchestrators across the globe with possibility to expand more depending on the demand. If you are looking to get in touch with me please reach out to me on discord Organic-Node#9009.

It gives me great pleasure when looking at lenstube videos, thinking that some of these vides may have been transcoded by my Orch. Stakes and delegators enjoy passive income with my low reward cuts and low fee cut and help support robust Orch for a fairer web3 platforms

Stake here:
(organic-node.eth)

', + replyCount: 1, + datePosted: 'Dec 6, 2022', + }, +] diff --git a/snippets/automationData/globals/README.md b/snippets/automationData/globals/README.md new file mode 100644 index 00000000..d4d5f2e6 --- /dev/null +++ b/snippets/automationData/globals/README.md @@ -0,0 +1,89 @@ +# Livepeer Release Updater + +Three different solutions for updating the Livepeer release version: + +## 1. **n8n Workflow** (livepeer-release-updater.json) CURRENT + +This is the recommended approach for your setup. It: + +- Polls the go-livepeer releases API every 30 minutes +- Uses Redis to track the last known version (prevents duplicate updates) +- Only updates the `LatestRelease` value without touching anything else +- Commits directly to the docs-v2 branch + +## 2. **GitHub Action** (update-livepeer-release.yml) RECOMMENDED + +If someone with admin access can add this to the docs repo's +`.github/workflows/` folder, it will run automatically without needing external +infrastructure like n8n + +## 3. **Node.js Script** (update-livepeer-release.js) + +Can be run manually or via cron job from any server with Node.js installed. + +Since you mentioned you can't get a GitHub token for the livepeer org but can +use the GUI, the n8n workflow is your best bet. You'll need to: + +1. Create a Personal Access Token from your own GitHub account (Settings → + Developer settings → Personal access tokens) +2. Ensure you have write access to the docs repository +3. Import the n8n workflow and configure it with your token + +The workflow specifically: + +- Uses regex to update ONLY the `LatestRelease` value +- Preserves all other content and formatting +- Includes error handling and validation +- Can send notifications when updates occur + +All files include the setup guide with detailed instructions for each approach. + +### 2. Code for yml + +on: schedule: # Run every 30 minutes - cron: '_/30 _ \* \* \*' +workflow_dispatch: + +jobs: check-and-update: runs-on: ubuntu-latest + + steps: + - name: Checkout docs repository + uses: actions/checkout@v3 + with: + ref: docs-v2 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Get latest go-livepeer release + id: get_release + run: | + LATEST_RELEASE=$(curl -s https://api.github.com/repos/livepeer/go-livepeer/releases/latest | jq -r .tag_name) + echo "release=${LATEST_RELEASE}" >> $GITHUB_OUTPUT + echo "Latest release: ${LATEST_RELEASE}" + + - name: Read current version from globals.jsx + id: current_version + run: | + CURRENT=$(grep -oP 'LatestRelease:\s*["'\'']?\K[^"'\'']+' snippets/automationData/globals/globals.jsx || echo "") + echo "current=${CURRENT}" >> $GITHUB_OUTPUT + echo "Current version: ${CURRENT}" + + - name: Update globals.jsx if needed + if: steps.get_release.outputs.release != steps.current_version.outputs.current + run: | + # Create backup + cp snippets/automationData/globals/globals.jsx snippets/automationData/globals/globals.jsx.bak + + # Update the LatestRelease value + sed -i "s/LatestRelease:[[:space:]]*[\"'][^\"']*[\"']/LatestRelease: \"${{ steps.get_release.outputs.release }}\"/" snippets/automationData/globals/globals.jsx + + # Verify the change + echo "Updated content:" + grep "LatestRelease" snippets/automationData/globals/globals.jsx + + - name: Commit and push if changed + if: steps.get_release.outputs.release != steps.current_version.outputs.current + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add snippets/automationData/globals/globals.jsx + git commit -m "chore: update latest release to ${{ steps.get_release.outputs.release }}" + git push origin docs-v2 diff --git a/snippets/automationData/globals/globals.jsx b/snippets/automationData/globals/globals.jsx new file mode 100644 index 00000000..1a218b71 --- /dev/null +++ b/snippets/automationData/globals/globals.jsx @@ -0,0 +1 @@ +export const LatestRelease = "v0.8.8"; diff --git a/snippets/components/buttons.jsx b/snippets/components/buttons.jsx new file mode 100644 index 00000000..ca0ecfb9 --- /dev/null +++ b/snippets/components/buttons.jsx @@ -0,0 +1,3 @@ +export const BasicBtn = () => { + return
+} diff --git a/snippets/components/buttons.tsx b/snippets/components/buttons.tsx new file mode 100644 index 00000000..0f5a12af --- /dev/null +++ b/snippets/components/buttons.tsx @@ -0,0 +1,4 @@ +export type buttonsProps = Record; +export const BasicBtn = () => { + return
+} diff --git a/snippets/components/cards.jsx b/snippets/components/cards.jsx new file mode 100644 index 00000000..2ab77e83 --- /dev/null +++ b/snippets/components/cards.jsx @@ -0,0 +1,234 @@ +// card layouts + +export const PostCard = ({ + title, + content, + href, + author = 'Unknown', + datePosted = null, + replyCount = null, + icon = 'book-open', + authorIcon = 'user-pen', + dateIcon = 'calendar', + cta = 'Read More', + img = null, +}) => { + console.log('item', title, content, href, img) + // Show hint if content is likely to overflow (>500 chars as proxy) + const showScrollHint = content && content.length > 500 + + return ( + + {author && ( +
+ + + + {author} +
+ )} + {datePosted && ( +
+ + + + {datePosted} +
+ )} + {/* {replyCount && ( +
+ + + + Replies: {replyCount} +
+ )} */} +
+
{ + const el = e.target + const atBottom = + el.scrollHeight - el.scrollTop <= el.clientHeight + 10 + const hint = el.nextSibling + if (hint) hint.style.display = atBottom ? 'none' : 'block' + }} + dangerouslySetInnerHTML={{ __html: content }} + /> + {showScrollHint && ( +
+ Scroll for more ↓ +
+ )} + + ) +} + +export const CardColumnsPostLayout = ({ cols = 2, items = [] }) => { + console.log('items', items) + return ( + + {items.map((props, idx) => ( + + ))} + + ) +} + +export const BlogCard = ({ + title, + content, + href, + author = 'Livepeer Team', + datePosted = null, + excerpt = null, //use if we prefer people to go to the actual blog site + readingTime = null, + icon = 'book-open', + authorIcon = 'user-pen', + dateIcon = 'calendar', + cta = 'Read More', + img = null, +}) => { + console.log('item', title, content, href, img) + // Show hint if content is likely to overflow (>500 chars as proxy) + const showScrollHint = content && content.length > 500 + + return ( + + {/* {author && ( +
+ + + + {author} +
+ )} */} + {datePosted && ( +
+ + + + {datePosted} +
+ )} + {readingTime && ( +
+ + + + Read Time: {readingTime} minutes +
+ )} +
+
{ + const el = e.target + const atBottom = + el.scrollHeight - el.scrollTop <= el.clientHeight + 10 + const hint = el.nextSibling + if (hint) hint.style.display = atBottom ? 'none' : 'block' + }} + dangerouslySetInnerHTML={{ __html: content }} + /> + {showScrollHint && ( +
+ Scroll for more ↓ +
+ )} + + ) +} + +export const CardBlogDataLayout = ({ items = [] }) => { + console.log('items', items) + return ( +
+ {items.map((props, idx) => ( + + ))} +
+ ) +} diff --git a/snippets/components/chainlist.jsx b/snippets/components/chainlist.jsx new file mode 100644 index 00000000..d1c6d2f0 --- /dev/null +++ b/snippets/components/chainlist.jsx @@ -0,0 +1,152 @@ +import { useState, useEffect } from "react"; + +/** + * ChainlistRPCs - Dynamically fetches and displays RPC endpoints from Chainlist + * + * Props: + * - chainId: The chain ID to fetch RPCs for (default: 42161 for Arbitrum One) + */ +export const ChainlistRPCs = ({ chainId = 42161 }) => { + const [rpcs, setRpcs] = useState([]); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + const fetchRPCs = async () => { + try { + // Fetch from DefiLlama chainlist extraRpcs.js (has all RPCs) + const response = await fetch( + "https://raw.githubusercontent.com/DefiLlama/chainlist/main/constants/extraRpcs.js" + ); + + if (response.ok) { + const jsContent = await response.text(); + + // Extract the chain's RPC block using regex + const chainPattern = new RegExp( + `${chainId}:\\s*\\{[\\s\\S]*?rpcs:\\s*\\[([\\s\\S]*?)\\]\\s*,?\\s*\\}`, + "m" + ); + const match = jsContent.match(chainPattern); + + if (match) { + const rpcsBlock = match[1]; + const rpcList = []; + + // Match simple string URLs + const simpleUrls = rpcsBlock.match(/"(https?:\/\/[^"]+)"/g); + if (simpleUrls) { + simpleUrls.forEach((url) => { + rpcList.push({ url: url.replace(/"/g, ""), tracking: "-" }); + }); + } + + // Match object-style RPCs with tracking info + const objectPattern = + /\{\s*url:\s*"([^"]+)"[^}]*tracking:\s*"([^"]+)"/g; + let objMatch; + while ((objMatch = objectPattern.exec(rpcsBlock)) !== null) { + // Avoid duplicates + if (!rpcList.find((r) => r.url === objMatch[1])) { + rpcList.push({ url: objMatch[1], tracking: objMatch[2] }); + } + } + + // Also match wss:// URLs + const wssUrls = rpcsBlock.match(/"(wss:\/\/[^"]+)"/g); + if (wssUrls) { + wssUrls.forEach((url) => { + const cleanUrl = url.replace(/"/g, ""); + if (!rpcList.find((r) => r.url === cleanUrl)) { + rpcList.push({ url: cleanUrl, tracking: "-" }); + } + }); + } + + setRpcs(rpcList); + } else { + throw new Error("Chain not found in data"); + } + } else { + throw new Error("Failed to fetch chain data"); + } + } catch (err) { + setError("Failed to load RPC data"); + console.error("ChainlistRPCs error:", err); + } finally { + setLoading(false); + } + }; + + fetchRPCs(); + }, [chainId]); + + if (loading) { + return
Loading RPC endpoints...
; + } + + if (error) { + return
Error: {error}
; + } + + // Filter to only show public RPCs (not ones with API keys in URL) + const publicRpcs = rpcs.filter((rpc) => { + const url = typeof rpc === "string" ? rpc : rpc.url; + return url && !url.includes("${") && !url.includes("API_KEY"); + }); + + return ( +
+ + + + + + + + + {publicRpcs.map((rpc, index) => { + const url = typeof rpc === "string" ? rpc : rpc.url; + const isWebsocket = url.startsWith("wss://"); + + return ( + + + + + ); + })} + +
+ RPC URL + + Type +
+ {url} + + {isWebsocket ? "WSS" : "HTTPS"} +
+
+ ); +}; diff --git a/snippets/components/code.jsx b/snippets/components/code.jsx new file mode 100644 index 00000000..b5fccc7a --- /dev/null +++ b/snippets/components/code.jsx @@ -0,0 +1,59 @@ +export const CustomCodeBlock = ({ + filename, + icon, + language, + highlight, + codeString = "", + placeholderValue = "", + wrap = true, + lines = true, +}) => { + const renderedCode = codeString.replace(/\{PLACEHOLDER\}/g, placeholderValue); + return ( + + {renderedCode} + + ); +}; + +/** + * CodeComponent - Simple code display with {PLACEHOLDER} replacement + * + * Props: + * - codeString: string with {PLACEHOLDER} to replace + * - placeholderValue: string value to insert in place of {PLACEHOLDER} + */ +export const CodeComponent = ({ + filename = "", + icon = "terminal", + language = "", + highlight = "", + expandable = false, + wrap = true, + lines = true, + codeString = "", + placeholderValue = "", +}) => { + const renderedCode = codeString.replace(/\{PLACEHOLDER\}/g, placeholderValue); + return ( + + {renderedCode} + + ); +}; diff --git a/snippets/components/embed.jsx b/snippets/components/embed.jsx new file mode 100644 index 00000000..3471ed08 --- /dev/null +++ b/snippets/components/embed.jsx @@ -0,0 +1,23 @@ +export const MarkdownEmbed = ({ url }) => { + const [content, setContent] = useState('') + + useEffect(() => { + fetch(url) + .then((res) => res.text()) + .then(setContent) + }, [url]) + + return {content} +} + +export const EmbedMarkdown = ({ url }) => { + const [content, setContent] = useState('') + + useEffect(() => { + fetch(url) + .then((res) => res.text()) + .then(setContent) + }, [url]) + + return {content} +} diff --git a/snippets/components/embed.mdx b/snippets/components/embed.mdx new file mode 100644 index 00000000..583f2d5e --- /dev/null +++ b/snippets/components/embed.mdx @@ -0,0 +1,44 @@ +export const ExternalEmbed = ({ + children, + repoName, + repoUrl, + maxHeight = '1000px' +}) => ( +
+
+ + + {repoName} + + + View on GitHub + +
+
+ {children} +
+
+); + diff --git a/snippets/components/external-content.jsx b/snippets/components/external-content.jsx new file mode 100644 index 00000000..c6b6c41d --- /dev/null +++ b/snippets/components/external-content.jsx @@ -0,0 +1,60 @@ +/** + * ExternalContent - A reusable component for displaying external GitHub content + * Usage: + * import { ExternalContent } from '/snippets/components/external-content.jsx' + * import MyContent from '/snippets/external/my-content.mdx' + * + * + * + */ + +export const ExternalContent = ({ + repoName, + githubUrl, + maxHeight = '1000px', + icon = 'github', + children +}) => { + return ( +
+
+ + + {repoName} + + + View on GitHub + +
+
+ {children} +
+
+ ); +}; + diff --git a/snippets/components/image.jsx b/snippets/components/image.jsx new file mode 100644 index 00000000..1a61b1ad --- /dev/null +++ b/snippets/components/image.jsx @@ -0,0 +1,30 @@ +export const Image = ({ src, alt, caption, icon, hint, fullwidth = true }) => { + icon = icon ? icon : 'arrow-turn-down-right' + return ( + + {alt} + + ) +} + +export const LinkImage = ({ src, alt, caption, icon, hint, href }) => { + icon = icon ? icon : 'arrow-turn-down-right' + return ( + + + {alt} + + + ) +} + +// +// Livepeer Community GIF +// diff --git a/snippets/components/image.tsx b/snippets/components/image.tsx new file mode 100644 index 00000000..dfeb946f --- /dev/null +++ b/snippets/components/image.tsx @@ -0,0 +1,31 @@ +export type imageProps = Record; +export const Image = ({ src, alt, caption, icon, hint, fullwidth = true }) => { + icon = icon ? icon : 'arrow-turn-down-right' + return ( + + {alt} + + ) +} + +export const LinkImage = ({ src, alt, caption, icon, hint, href }) => { + icon = icon ? icon : 'arrow-turn-down-right' + return ( + + + {alt} + + + ) +} + +// +// Livepeer Community GIF +// diff --git a/snippets/components/links.jsx b/snippets/components/links.jsx new file mode 100644 index 00000000..0e3b2e7c --- /dev/null +++ b/snippets/components/links.jsx @@ -0,0 +1,152 @@ +export const CustomCallout = ({ + children, + icon = "lightbulb", + color = "#2d9a67", + iconSize = 16, + textSize = "0.875rem", + textColor, +}) => { + // Default textColor to match the icon color if not specified + const resolvedTextColor = textColor || color; + // Convert hex to rgba for proper opacity + const hexToRgba = (hex, alpha) => { + const r = parseInt(hex.slice(1, 3), 16); + const g = parseInt(hex.slice(3, 5), 16); + const b = parseInt(hex.slice(5, 7), 16); + return `rgba(${r}, ${g}, ${b}, ${alpha})`; + }; + + return ( +
+
+ +
+
+ {children} +
+
+ ); +}; + +export const BlinkingIcon = ({ + icon = "terminal", + size = 16, + color = "#2d9a67", +}) => { + return ( + + + + + ); +}; + +// Alias for backwards compatibility +export const BlinkingTerminal = BlinkingIcon; + +export const DoubleIconLink = ({ + label = "", + href = "#", + text = "", + iconLeft = "github", + iconRight = "arrow-up-right", +}) => { + return ( + + {text && {text}} + + {label} + + + ); +}; + +export const GotoLink = ({ + label, + relativePath, + text = "", + icon = "arrow-turn-down-right", +}) => { + return ( + +

{text}

+ + + {label} + +
+ ); +}; + +export const GotoCard = ({ label, relativePath, icon, text, cta = "" }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + {text} + + ); +}; + +export const DownloadLink = ({ + label = "Download Transcript", + icon = "download", + downloadLink, + rightIcon = false, +}) => { + console.log("dllink", downloadLink); + downloadLink = downloadLink ? downloadLink : "https://Livepeer.org"; + console.log("dllink2", downloadLink); + return ( + + {!rightIcon && } + + {label} + + {rightIcon && } + + ); +}; diff --git a/snippets/components/links.tsx b/snippets/components/links.tsx new file mode 100644 index 00000000..286562fe --- /dev/null +++ b/snippets/components/links.tsx @@ -0,0 +1,21 @@ +// export type linksProps = Record; +// export const GotoLink = ({ label, relativePath, text }) => { +// return ( +// +//

{text}

+// +// +// {label} +// +//
+// ) +// } + +// export const GotoCard = ({ label, relativePath, icon, text }) => { +// icon = icon ? icon : 'arrow-turn-down-right' +// return ( +// +// {text} +// +// ) +// } diff --git a/snippets/components/lists.jsx b/snippets/components/lists.jsx new file mode 100644 index 00000000..2879b21e --- /dev/null +++ b/snippets/components/lists.jsx @@ -0,0 +1,62 @@ +// Lists +import { GotoLink } from './links' + +export const BasicList = ({ listItems: array }) => { + return <> +} + +export const IconList = ({ listItems: array }) => { + return <> +} + +export const StepList = ({ listItems }) => { + console.log('listItems', listItems) + return ( + + {listItems.map(({ title, icon, content }, idx) => ( + + {content} + + ))} + + ) +} + +export const StepLinkList = ({ listItems }) => { + console.log('listItems', listItems) + return ( + + {listItems.map(({ title, icon, content, link }, idx) => ( + + + + ))} + + ) +} + +export const UpdateList = ({ listItems: array }) => { + return ( + +
+ Learn what Livepeer is and how it can benefit you + [About Livepeer](../../01_about/about-home/) +
+
+ ) +} + +export const UpdateLinkList = ({ listItems: array }) => { + return ( + <> + {array.map(({ title, icon, content, link }, idx) => ( + +
+ {content} + +
+
+ ))} + + ) +} diff --git a/snippets/components/release.jsx b/snippets/components/release.jsx new file mode 100644 index 00000000..4f3ae348 --- /dev/null +++ b/snippets/components/release.jsx @@ -0,0 +1,61 @@ +import { useState, useEffect } from "react"; + +/** + * LatestRelease - Fetches and displays the latest release version from GitHub + * Usage: + * import { LatestRelease, LatestReleaseUrl } from '/snippets/components/release.jsx' + * + * Latest version: + * Download here + */ + +export const LatestRelease = ({ + repo = "livepeer/go-livepeer", + fallback = "latest", +}) => { + const [version, setVersion] = useState(fallback); + const [loading, setLoading] = useState(true); + + useEffect(() => { + fetch(`https://api.github.com/repos/${repo}/releases/latest`) + .then((res) => res.json()) + .then((data) => { + if (data.tag_name) { + setVersion(data.tag_name); + } + setLoading(false); + }) + .catch(() => { + setLoading(false); + }); + }, [repo]); + + return version; +}; + +export const LatestReleaseUrl = ({ + repo = "livepeer/go-livepeer", + asset = "livepeer-linux-amd64.tar.gz", + children, +}) => { + const [url, setUrl] = useState(`https://github.com/${repo}/releases/latest`); + + useEffect(() => { + fetch(`https://api.github.com/repos/${repo}/releases/latest`) + .then((res) => res.json()) + .then((data) => { + if (data.tag_name) { + setUrl( + `https://github.com/${repo}/releases/download/${data.tag_name}/${asset}` + ); + } + }) + .catch(() => {}); + }, [repo, asset]); + + return ( + + {children || url} + + ); +}; diff --git a/snippets/components/steps.jsx b/snippets/components/steps.jsx new file mode 100644 index 00000000..bf86f7b3 --- /dev/null +++ b/snippets/components/steps.jsx @@ -0,0 +1,38 @@ +// Custom Steps component with styling support + +export const StyledSteps = ({ + children, + iconColor = "#18794e", + titleColor = "#2b9a66", + lineColor = "#2b9a66", + iconSize = "24px", +}) => { + const stepsId = `styled-steps-${Math.random().toString(36).substr(2, 9)}`; + + return ( + <> + +
+ {children} +
+ + ); +}; + +export const StyledStep = ({ title, icon, titleSize = "h3", children }) => { + return ( + + {children} + + ); +}; diff --git a/snippets/components/stuff.js b/snippets/components/stuff.js new file mode 100644 index 00000000..8ed09d40 --- /dev/null +++ b/snippets/components/stuff.js @@ -0,0 +1,9 @@ +// export const embedUrl = url.replace('watch?v=', 'embed/') + +// import { Video } from '/snippets/video.jsx' +//