From 30f78d00e26bb9e96720fd941655e19285ec0b36 Mon Sep 17 00:00:00 2001 From: Ian He <39037239+ianhe8x@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:23:46 +1300 Subject: [PATCH 1/5] Update GraphQL Agent documentation - Comprehensive GraphQL Agent documentation with technical architecture - Free tier integration in Network App with rate limiting - Paid MCP service announcement (coming soon) --- docs/.vuepress/config.ts | 4 +- docs/ai/ai-app-framework.md | 48 --- docs/ai/api/api.md | 8 - docs/ai/build/app.md | 160 ---------- docs/ai/build/function_tools.md | 87 ------ docs/ai/build/other.md | 5 - docs/ai/build/rag.md | 134 -------- docs/ai/graphql_agent.md | 252 +++++++++++++++ docs/ai/guides/delegation-helper.md | 321 -------------------- docs/ai/guides/fancy-greeter.md | 115 ------- docs/ai/guides/subquery-docs-rag.md | 194 ------------ docs/ai/publish/publish.md | 11 - docs/ai/run/cli.md | 165 ---------- docs/ai/run/docker.md | 54 ---- docs/ai/run/local.md | 21 -- docs/ai/snippets/add-a-function-tool.md | 1 - docs/ai/snippets/configure-app-logic.md | 1 - docs/ai/snippets/configure-manifest-file.md | 1 - docs/ai/snippets/create-a-new-app.md | 5 - docs/ai/snippets/install-the-framework.md | 9 - docs/ai/snippets/prerequisites.md | 11 - docs/ai/snippets/run-the-ai-app.md | 15 - docs/ai/snippets/summary.md | 7 - docs/ai/snippets/update-system-prompt.md | 1 - docs/ai/welcome.md | 54 ---- 25 files changed, 254 insertions(+), 1430 deletions(-) delete mode 100644 docs/ai/ai-app-framework.md delete mode 100644 docs/ai/api/api.md delete mode 100644 docs/ai/build/app.md delete mode 100644 docs/ai/build/function_tools.md delete mode 100644 docs/ai/build/other.md delete mode 100644 docs/ai/build/rag.md create mode 100644 docs/ai/graphql_agent.md delete mode 100644 docs/ai/guides/delegation-helper.md delete mode 100644 docs/ai/guides/fancy-greeter.md delete mode 100644 docs/ai/guides/subquery-docs-rag.md delete mode 100644 docs/ai/publish/publish.md delete mode 100644 docs/ai/run/cli.md delete mode 100644 docs/ai/run/docker.md delete mode 100644 docs/ai/run/local.md delete mode 100644 docs/ai/snippets/add-a-function-tool.md delete mode 100644 docs/ai/snippets/configure-app-logic.md delete mode 100644 docs/ai/snippets/configure-manifest-file.md delete mode 100644 docs/ai/snippets/create-a-new-app.md delete mode 100644 docs/ai/snippets/install-the-framework.md delete mode 100644 docs/ai/snippets/prerequisites.md delete mode 100644 docs/ai/snippets/run-the-ai-app.md delete mode 100644 docs/ai/snippets/summary.md delete mode 100644 docs/ai/snippets/update-system-prompt.md delete mode 100644 docs/ai/welcome.md diff --git a/docs/.vuepress/config.ts b/docs/.vuepress/config.ts index b09a29f3432..ebba53ce49a 100644 --- a/docs/.vuepress/config.ts +++ b/docs/.vuepress/config.ts @@ -70,8 +70,8 @@ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= link: "/indexer/welcome.md", }, { - text: "SubQuery AI Apps Framework", - link: "/ai/welcome.md", + text: "SubQuery Graphql Agent", + link: "/ai/graphql_agent.md", }, { text: "SubQuery Network", diff --git a/docs/ai/ai-app-framework.md b/docs/ai/ai-app-framework.md deleted file mode 100644 index ff16cbdac38..00000000000 --- a/docs/ai/ai-app-framework.md +++ /dev/null @@ -1,48 +0,0 @@ -# SubQuery AI App Framework - A New Way to Build Decentralised Intelligent Applications - -SubQuery has launched the first version of our new developer SDK, the SubQuery AI App Framework. - -The AI App Framework allows you to build, deploy, and run production AI apps on the SubQuery Network in a trusted and decentralised environment. AI apps are self-contained and easily scalable AI agents that you can use to power your intelligent applications. They are sandboxed to a trusted runner and can be easily distributed and scaled horizontally across the SubQuery Network. - -[Start building AI Apps today!](https://academy.subquery.network/ai/welcome.html) - -### Starting with the new SubQuery AI Assistant - -The framework provides a shortcut for developers to start incorporating AI into their applications in a decentralised and secure way today, we can’t wait to see how our customers will use it! - -Today we’re launching with an example open-source use case on the SubQuery Network, SubQuery’s new support chatbot. On both our [docs](https://academy.subquery.network/) and on our [network application](https://app.subquery.network/), you can chat with SubQuery’s AI assistant for common questions about SubQuery’s SDK, about the decentralised network, and even queries about your own staking rewards. - -The AI App shows off our function tooling capabilities to query wallet balances and network analytics in real-time, and has also been trained with a detailed RAG of the SubQuery documentation so it easily understands how SubQuery works. - -[Try it out now](https://app.subquery.network/) and ask it any question, or [see how it is built here](https://github.com/subquery/subql-ai-app-example/tree/main/network-delegation-helper) - you could adapt it to your own decentralised application in a few minutes! - -### What can you build with SubQuery’s AI App Framework? - -SubQuery’s AI App Framework is a powerful but intuitive SDK to help you build advanced AI applications in minutes. It’s built on typescript and is extremely easy to use, essentially simplifying most of the backend tasks required to customise and integrate LLM models into production usecases. - -For example, you could use it to build: - -- A customer support bot, trained on the documentation for your project and able to guide customers through their problems. It could also be trained to submit clean and detailed customer support tickets when escalation is required. -- Wallet assistants that help users understand the tokens they have and guide them through the process of managing, bridging, or swapping them. For example, a user could ask “_how do I convert ETH to SQT?_” and the AI would guide them through the options. -- Content moderation for decentralised social networks, AI Apps could be trained to moderate content by identifying spam or harmful content and using function tools to disable or hide them. -- AI agents to improve participation in governance. By analysing and summarising proposals in DAOs and assisting users in making informed voting decisions based on data-driven insights. -- Dynamic pricing models, by analysing demand and supply in real-time, adjusting prices for tokens or NFTs dynamically based on user behaviour and market trends. - -### Features - -- Effortless decentralised distribution: The SubQuery AI App Framework uses a sandboxed environment for secure and efficient operations. Each AI App is encapsulated with its own manifest, enabling seamless distribution across the SubQuery Network. This ensures that horizontal scaling is not only easy but also secure, allowing developers to focus on innovation rather than infrastructure. -- Empower your AI with RAGs: By integrating [RAG (Retrieval-Augmented Generation) files](https://academy.subquery.network/ai/build/rag.html), your AI Apps can leverage domain-specific knowledge efficiently. With initial support for LanceDB and future compatibility with other vector databases, developers can enhance their applications' performance and accuracy. Additionally, publishing to IPFS ensures data integrity and accessibility. -- Your AI journey starts here: The SubQuerty AI App framework is designed with user-friendliness in mind, providing intuitive wrappers around core features. This lowers the barrier to entry for developers of all skill levels, making it easier to create, run, and deploy AI Apps. -- Connect, create, and integrate with function tooling: You can extend your AI Apps with [additional function tooling](https://academy.subquery.network/ai/build/function_tools.html), facilitating connections to external systems and tools. This capability enables rich integrations, allowing users to create versatile applications that can interact seamlessly with blockchains and other ecosystems. -- Choose your own model: By supporting a range of open-source LLM models, starting with Ollama-compatible and OpenAI, the SubQuery AI App Framework ensures that users can choose the best model for their applications without being locked into a specific model ecosystem. This flexibility fosters open-source innovation. -- Proven standards for seamless integration: SubQuery AI Apps expose the industry-standard [OpenAI API](https://academy.subquery.network/ai/query/query.html), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. - -### Running your AI Apps - -The SubQuery AI Apps framework is and always will be open-source, meaning you can extend it and run it in a variety of ways. - -Initially we will support two methods to running your AI apps, but we expect this number to grow over time. You can run it yourself locally or a cloud provider of your choosing, using [Docker](https://academy.subquery.network/ai/run/docker.html) or [running the individual components using NodeJS services](https://academy.subquery.network/ai/run/local.html). - -Alternatively, you can publish your AI Apps to the SubQuery Network. The AI Apps framework has been built from the ground up to support running on our decentralised network. This means that your AI apps can easily be deployed onto our decentralised network and scaled horizontally. - -[Start building AI Apps today!](https://academy.subquery.network/ai/welcome.html) \ No newline at end of file diff --git a/docs/ai/api/api.md b/docs/ai/api/api.md deleted file mode 100644 index 6202c6df771..00000000000 --- a/docs/ai/api/api.md +++ /dev/null @@ -1,8 +0,0 @@ -# AI App Query API - -SubQuery AI Apps expose the industry-standard [OpenAI Completions API](https://platform.openai.com/docs/api-reference/), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. - -The HTTP interface implements the following OpenAPI interface methods: - -- [GET `/v1/models`](https://platform.openai.com/docs/api-reference/models) -- [POST `/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat/create) diff --git a/docs/ai/build/app.md b/docs/ai/build/app.md deleted file mode 100644 index dd185d7f981..00000000000 --- a/docs/ai/build/app.md +++ /dev/null @@ -1,160 +0,0 @@ -# AI Apps - -The SubQuery AI App framework is designed with user-friendliness in mind, providing intuitive wrappers around core features. This lowers the barrier to entry for developers of all skill levels, making it easier to create, run, and deploy AI Apps. - -Building an AI application is a straightforward process that takes minutes. The SubQuery AI App framework simplifies most of this development, while allowing you to build complex applications that are ready to run on a decentralised network. - -There are a few components to an application, to get started, create a project manifest and entrypoint. - -## Project Manifest - -The project manifest is the start point for your project. It defines a set of options and references your [tools](./tools) and [rag data](./rag). The format of this file can either be in JSON or TypeScript format. It is recommended to use TypeScript as it can easily provide extra build time functionality and type safety. - -::: info Note -When you publish your app, the TypeScript manifest will be converted to JSON. -::: - -::: code-tabs - -@tab:active typescript - -```typescript -import type { ProjectManifest } from "jsr:@subql/ai-app-framework"; - -const project: ProjectManifest = { - // The version of the manifest spec, this is always 0.0.1. - specVersion: "0.0.1", - // For security reasons you must provide a list of allowed domains that your project will use - endpoints: ["gateway.subquery.network"], - // (Optional) Vector db storage for RAG data, currently only lance DB is supported - vectorStorage: { - type: "lancedb", - path: "./.db", - }, - // (Optional) Runtime configuration options, this will get passed into your project entrypoint. - // It needs to be in the JSON Schema format. We recommend using @sinclair/typebox to make this easy and provide runtime type checking. - config: { - type: "object", - properties: { - GRAPHQL_ENDPOINT: { - default: - "https://gateway.subquery.network/query/QmcoJLxSeBnGwtmtNmWFCRusXVTGjYWCK1LoujthZ2NyGP", - type: "string", - }, - BASE_RPC: { - default: "https://gateway.subquery.network/rpc/base-full", - type: "string", - }, - BASE_SQT_ADDR: { - default: "0x858c50C3AF1913b0E849aFDB74617388a1a5340d", - type: "string", - }, - }, - required: ["GRAPHQL_ENDPOINT", "BASE_RPC", "BASE_SQT_ADDR"], - }, - // The LLM model you wish to use. Currently any Ollama model is supported - model: "llama3.1", - // (Optional) The LLM model for generating vectors from text. - embeddingsModel: "nomic-embed-text", - // The path to your project entrypoint - entry: "./index.ts", -}; - -// The manifest must be the default export -export default project; -``` - -@tab json - -```json -{ - "specVersion": "0.0.1", - "endpoints": ["gateway.subquery.network"], - "vectorStorage": { - "type": "lancedb", - "path": "./.db" - }, - "config": { - "type": "object", - "properties": { - "GRAPHQL_ENDPOINT": { - "default": "https://gateway.subquery.network/query/QmcoJLxSeBnGwtmtNmWFCRusXVTGjYWCK1LoujthZ2NyGP", - "type": "string" - }, - "BASE_RPC": { - "default": "https://gateway.subquery.network/rpc/base-full", - "type": "string" - }, - "BASE_SQT_ADDR": { - "default": "0x858c50C3AF1913b0E849aFDB74617388a1a5340d", - "type": "string" - } - }, - "required": ["GRAPHQL_ENDPOINT", "BASE_RPC", "BASE_SQT_ADDR"] - }, - "model": "llama3.1", - "entry": "./index.ts" -} -``` - -::: - -### Config - -To specify or override default config values when running, you can provide them with environment variables. - -Example: - -```shell -GRAPHQL_ENDPOINT=https://some.other.endpoint subql-ai -p ./path/to/project.ts -``` - -## Project Entrypoint - -The project entrypoint is how your tools and system prompt are initialised, unlike the project manifest this is not static so you can change the behaviour based on your config spec. - -Example entrypoint: - -```typescript -import type { ProjectEntry, Project } from "jsr:@subql/ai-app-framework"; - -// This is your system prompt. It gives broad information to the LLM about what your application should to and how it should respond. -const SYSTEM_PROMPT = ` -You are an agent designed to help a user with their token delegation on the SubQuery Network. -Given an input question, use the available tools to answer the users question quickly and concisely. -Your answer must use the result of the tools available. -Do not mention that you used a tool or the name of a tool. -If you need more information to answer the question, ask the user for more details. -All token amounts are in SQT. - -If the question seems to be unrelated to the API, just return "I don't know" as the answer. -`; - -const entrypoint: ProjectEntry = async (config: Config): Promise => { - return { - // Initialise any tools you wish to provided with your app - tools: [ - new TotalDelegation(config.GRAPHQL_ENDPOINT), - new DelegatedIndexers(config.GRAPHQL_ENDPOINT), - new UnclaimedDelegatorRewards(config.GRAPHQL_ENDPOINT), - new CurrentDelegatorApy(config.GRAPHQL_ENDPOINT), - new BetterIndexerApy(config.GRAPHQL_ENDPOINT), - new TokenBalance( - new JsonRpcProvider(config.BASE_RPC), - config.BASE_SQT_ADDR, - ), - new SubqueryDocs(), - ], - systemPrompt: SYSTEM_PROMPT, - }; -}; - -// The project entry must be a default export -export default entrypoint; -``` - -## Next Steps - -- [Define any additional function tools](./function_tools) -- [Add RAG data](./rag) -- Optimise your System Prompt and Tool descriptions. diff --git a/docs/ai/build/function_tools.md b/docs/ai/build/function_tools.md deleted file mode 100644 index 8ddc8e8329b..00000000000 --- a/docs/ai/build/function_tools.md +++ /dev/null @@ -1,87 +0,0 @@ -# Function Tools - -You can extend your AI Apps with additional function tooling, facilitating connections to external systems and tools. This capability enables rich integrations, allowing users to create versatile applications that can interact seamlessly with blockchains and other ecosystems. - -Function tools are functions that extend the functionality of the LLM. They can be used to do many things like request data from external APIs and services, perform computations or analyse structured data outputs from the AI. - -An example of a simple tool is making a GraphQL query from a specific SubQuery indexing SDK project. - -## Defining a function tool - -Function tools consist of 4 parts: - -- `name`: The name of the tool, this is used to identify the tool and must be unique amongst the provided tools. -- `description`: This is like a system prompt for the LLM to understand what the tool does and when it should be used, it should be as descriptive as possible as it allows the AI to determine when to use the tool and what it should be used for. -- `parameters`: This defines what parameters the LLM needs to gather in order to run the tool. -- `call`: This is the function implementation that takes an input that should match the defined parameters and return a string with the result. - -### Example - -This tool example makes a GraphQL request to get the amount of SQT a wallet address has delegated on the SubQuery network. - -```ts -import { FunctionTool, type IContext } from "jsr:@subql/ai-app-framework"; - -export class TotalDelegation extends FunctionTool { - constructor(readonly endpoint: string) { - super(); - } - - // The name can be inferred from the class name or if you wish to be explicit it can be done here - // name = 'total-delegation-amount'; - description = `This tool gets the total delegation amount of SQT for the given user address. -If no delegation is found it will return null.`; - parameters = { - type: "object", - required: ["account"], - properties: { - account: { - type: "string", - description: - "The account or address of the user which to get delegation information for", - }, - }, - }; - - async call( - { account }: { account: string }, - ctx: IContext, - ): Promise { - try { - const res = await graphqlRequest<{ - delegator: null | { totalDelegations: Amount }; - }>( - this.endpoint, - `{ - delegator(id: "${account}") { - totalDelegations - } - }`, - ); - - if (!res.delegator) { - return null; - } - - return formatEther(res.delegator.totalDelegations.valueAfter.value); - } catch (error) { - return `${error}`; - } - } -} -``` - -### Context - -Tool calls have access to a context. This provides relevant functions that a tool can use relating to the LLM and Vector DB. - -It has the following interface: - -```ts -type IContext = { - // Converts text into vector data using the nomic-embed-text model - computeQueryEmbedding: (query: string) => Promise; - // Searches the provided vector DB with vector data from computeQueryEmbedding and returns matching results - vectorSearch: (table: string, vector: number[]) => Promise; -}; -``` diff --git a/docs/ai/build/other.md b/docs/ai/build/other.md deleted file mode 100644 index 80478d8dc02..00000000000 --- a/docs/ai/build/other.md +++ /dev/null @@ -1,5 +0,0 @@ -# Other - -Other random bits of information that don't yet have a place - -- Don't use a `deno.json` file in your project for import maps. They won't be resolved when publishing. diff --git a/docs/ai/build/rag.md b/docs/ai/build/rag.md deleted file mode 100644 index 7396c6e84ec..00000000000 --- a/docs/ai/build/rag.md +++ /dev/null @@ -1,134 +0,0 @@ -# RAG - -By integrating RAG (Retrieval-Augmented Generation) files, your AI Apps can leverage domain-specific knowledge efficiently. With initial support for LanceDB and future compatibility with other vector databases, developers can enhance their applications' performance and accuracy. Additionally, publishing to IPFS ensures data integrity and accessibility. - -Retrieval Augmented Generation (RAG) allows developers to provide a knowledge base outside of the LLMs training data. This means that the LLM can provide specific information about a dataset or have expertise in a certain area. - -## Defining RAG - -We provide an off the shelf tool to create datasets from markdown files and web sources. This tool extracts the data into chunks, generates embedding vectors and stores them in a vector DB. - -Currently only [Lance DB](https://lancedb.github.io/lancedb/) is supported. You can [review Lance DB's documentation](https://lancedb.github.io/lancedb/basic/) to determine the best way to ingest and embed your chosen RAG source data. - -```shell -subql-ai embed -i ./path/to/dir/with/markdown -o ./db --table your-table-name --model nomic-embed-text -``` - -RAG data can also be defined with your own tooling. - -::: info - -You can follow through a step by step tutorial on how parse, vectorise, and add the resulting RAG database to your AI App in our [RAG quick start guide](../guides/subquery-docs-rag.md). - -::: - -## Updating RAG - -As the source content changes, you may need to update your RAG data. To do this you can run the same command as used to define your initial RAG data. The tool will be able to determine changes to content and update the DB to match the changes, this makes the process much faster and reduces calls to the LLM. - -## Adding RAG to your app - -Once you have defined your RAG dataset you need to include it in your project. - -First you will need to add it to your project manifest: - -```ts -const project: ProjectManifest = { - // ..The rest of your project manifest - vectorStorage: { - type: "lancedb", - path: "./data.lance", - }, - // Set this to the same model you use to generate your RAG db - embeddingsModel: "nomic-embed-text", -}; -``` - -In order for your project to be able to use this data you will also need to define a tool to consume it. We provide a built in RagTool which you can use, if you need more specific functionality you can extend this or build your own. - -```ts -import { RagTool } from "jsr:@subql/ai-app-framework"; - -// Add this to your array of tools in your project, -// The first argument is your table name and the second is the column you want to select -new RagTool("subql-docs", "content"); -``` - -::: details Tool implementation - -```ts -export class RagTool extends FunctionTool { - /** - * RagTool is a default implementation allowing querying RAG data - * @param tableName The name of the table to query - * @param column The column on the table to extract results from - */ - constructor(readonly tableName: string, readonly column: string) { - super(); - } - - get description(): string { - return `This tool gets relevant information from the ${this.tableName}. It returns a list of results separated by newlines.`; - } - - parameters = { - type: "object", - required: ["query"], - properties: { - account: { - type: "string", - description: "A search string, generally the users prompt", - }, - }, - }; - - async call({ query }: { query: string }, ctx: IContext): Promise { - const vector = await ctx.computeQueryEmbedding(query); - const raw = await ctx.vectorSearch(this.tableName, vector); - - const res = raw - .map((r) => r[this.column]) - .filter((c) => !!c) - .join("\n"); - - return res; - } -} -``` - -::: - -This tool does a few things: - -1. Converts the user query input into an embedded vector data. -2. Searches the specified vector db table for the closest matches. -3. Processes the raw results. In this example the data has a column called `content` that needs to be extracted and combined for the LLM to use as the answer. - -#### Add your tool to your project - -```ts -const tools: FunctionTool[] = [ - // ..The rest of your tools - new SubqueryDocs(), -]; -``` - -## Updating your projects data - -RAG data is generally not static, the source of this information can change and evolve. -That means from time to time it is handy to rebuild your database. - -In order to do this when you define your application the path to your Lance DB should not use a local or IPFS path. -Instead you should use any of the supported storage options that [Lance DB Supports](https://lancedb.github.io/lancedb/concepts/storage/). - -Example: - -```ts - -const project: ProjectManifest = { - // ..The rest of your project manifest - vectorStorage: { - type: "lancedb", - path: "s3://my-bucket/my-path/data.lance" -} -``` diff --git a/docs/ai/graphql_agent.md b/docs/ai/graphql_agent.md new file mode 100644 index 00000000000..5bc6ebd14ca --- /dev/null +++ b/docs/ai/graphql_agent.md @@ -0,0 +1,252 @@ +# SubQuery GraphQL Agent + +The SubQuery GraphQL Agent is an intelligent AI-powered agent that revolutionizes how users interact with SubQuery and Subgraph projects. Instead of writing complex GraphQL queries manually, users can ask questions in natural language, and the agent will automatically interpret the schema, generate appropriate GraphQL queries, extract the data, and provide summarized answers. + +## Overview + +Traditional GraphQL query building requires deep understanding of: +- GraphQL schema structure +- Query syntax and best practices +- Entity relationships and data types +- Performance optimization techniques + +The GraphQL Agent eliminates these barriers by providing an intelligent interface that bridges the gap between natural language questions and structured data retrieval. + +### How It Works + +1. **Schema Analysis**: The agent reads and interprets the GraphQL schema of any SubQuery or Subgraph project +2. **Query Generation**: Converts natural language questions into optimized GraphQL queries +3. **Data Extraction**: Executes the queries against the project's endpoint +4. **Intelligent Summarization**: Processes the raw data and provides human-readable answers + +## Core Features + +### Natural Language to GraphQL Conversion +- Ask questions in plain English: "What are the top 10 DeFi protocols by TVL?" +- Automatic conversion to complex GraphQL queries +- Handles nested queries, filters, and aggregations +- No GraphQL knowledge required + +### Innovative Schema Compression Technology +The GraphQL Agent solves a fundamental challenge in AI-powered GraphQL interactions: **schema size exceeds LLM context limits**. Traditional approaches fail because: + +- **Traditional**: Full GraphQL introspection schemas are ~50,000+ tokens +- **Our Approach**: Compressed entity schemas are ~500-1,000 tokens (100x smaller) +- **Context Usage**: Reduced from 80-95% to 5-10% of context window +- **Result**: Reliable, cost-effective query generation + +### Entity Schema + Rules Architecture +Instead of raw GraphQL schemas, we use a revolutionary **compressed, high-density schema representation**: + +1. **Entity Schema**: Project-specific domain models and relationships +2. **PostGraphile v4 Rules**: Query construction patterns for SubQuery SDK +3. **Intelligent Construction**: Agent builds queries using learned patterns +4. **Real-time Validation**: Ensures query correctness before execution + +**Benefits:** +- **💰 Cost Effective**: 10-20x lower token usage than traditional approaches +- **🎯 Higher Accuracy**: Domain-specific knowledge reduces errors +- **⚡ Faster Responses**: Smaller context means faster processing +- **🔄 Scalable**: Works consistently across different LLM models + +### PostGraphile v4 Pattern Recognition +The agent automatically understands SubQuery SDK-generated GraphQL patterns: +- **Entity Queries**: Single entities and collection queries with pagination +- **Advanced Filtering**: Complex filters with multiple conditions +- **Ordering & Sorting**: Multi-field ordering capabilities +- **Relationship Navigation**: Nested entity relationships + +### Multi-Query Orchestration +- Breaks complex questions into multiple related queries +- Executes queries in optimal order +- Combines results from multiple data sources +- Maintains context across related queries + +### Data Summarization +- Extracts key insights from large datasets +- Provides human-readable summaries +- Identifies trends and patterns +- Highlights important metrics and relationships + +## Free Tier - Network App Integration + +The GraphQL Agent is integrated directly into the SubQuery Network App, available on every indexing project's detail page. + +### Access and Usage +- **Location**: Available on each indexing project's detail page in the Network App +- **Authentication**: No authentication required (basic web interface) +- **Cost**: Free with rate limiting + +### Rate Limits +- **5 queries per day per user** +- Resets daily +- Fair usage policy applies +- Designed for exploration and testing + +### Getting Started with Free Tier +1. Navigate to any indexing project in the [SubQuery Network App](https://app.subquery.network) +2. Go to the project's detail page +3. Find the GraphQL Agent interface +4. Type your question in natural language +5. Receive instant answers with data insights + +## Paid Service - Graphql Agent as MCP (Coming Soon) + +For users requiring higher query volumes and advanced features, we will be offering a premium MCP (Model Context Protocol) server that wraps the GraphQL Agent functionality. + +### Planned Features +- **Protocol**: Model Context Protocol (MCP) +- **Deployment**: Cloud-hosted service +- **Integration**: Compatible with MCP-compliant tools and IDEs +- **Performance**: Optimized for high-throughput usage + +### What to Expect +- **API Keys**: Secure authentication via unique API keys +- **Multi-tenant**: Isolated environments for each customer +- **Flexible Rate Limiting**: Based on subscription tier +- **Real-time Monitoring**: Usage tracking and analytics + +### Subscription Tiers (Planned) +We plan to offer multiple subscription tiers to accommodate different usage patterns: + +- **Starter**: Suitable for individual developers and small projects +- **Professional**: Ideal for growing teams and moderate usage +- **Enterprise**: Custom solutions for large organizations with high-volume needs + +### Payment Methods (Planned) +- **Cryptocurrency**: SubQuery Tokens (SQT) +- **Traditional Payment**: Credit cards and wire transfers +- **Flexible Billing**: Monthly and annual subscription options + +:::info Status + +The paid GraphQL Agent MCP service is currently under development. Sign up for our newsletter to be notified when it launches! + +::: + +## Getting Started Guide + +### Free Tier Quick Start +1. **Visit the Network App**: Navigate to [app.subquery.network](https://app.subquery.network) +2. **Select a Project**: Choose any indexing project to explore +3. **Access GraphQL Agent**: Find the agent interface on the project detail page +4. **Ask Questions**: Type questions in natural language +5. **Review Results**: Get instant answers with data insights + +### Future Paid Service Setup (Coming Soon) +Once the MCP service launches, the setup process will include: +1. **Choose a Subscription**: Select appropriate tier based on your needs +2. **Complete Payment**: Subscribe using SQT or traditional payment methods +3. **Receive API Key**: Get your unique authentication credentials +4. **Configure MCP Client**: Set up your MCP-compatible tool or IDE +5. **Start Querying**: Begin using the GraphQL Agent with enhanced capabilities + +## Usage Examples + +### Simple Queries +- "How many transactions occurred yesterday?" +- "What's the current TVL of this protocol?" +- "List the top 5 users by transaction volume" + +### Complex Analysis +- "Compare the growth rate of DeFi protocols vs gaming protocols over the last 30 days" +- "What patterns emerge in user behavior during market volatility?" +- "Analyze the correlation between gas prices and transaction volume" + +### Data Exploration +- "What types of smart contracts are most frequently interacted with?" +- "Identify unusual transaction patterns that might indicate bot activity" +- "Show me the distribution of token holders across different wallets" + +## Technical Reference + +### Architecture Overview + +The GraphQL Agent is built with these core components: + +1. **GraphQLSource** - Connection wrapper for GraphQL endpoints with entity schema support +2. **GraphQLToolkit** - LangChain-compatible toolkit providing all GraphQL tools +3. **Agent Tools** - Individual tools for specific GraphQL operations +4. **FastAPI Server** - OpenAI-compatible API with streaming support + +### Available Agent Tools + +1. **`graphql_schema_info`** - Get raw entity schema with PostGraphile v4 rules +2. **`graphql_query_validator_execute`** - Combined validation and execution tool (validates queries, then executes them if valid) + +### Supported GraphQL Features +- **Queries**: Standard GraphQL queries with fields, arguments, and variables +- **Fragments**: Support for query fragments and inline fragments +- **Aliases**: Field and argument aliasing for custom result structures +- **Directives**: @include and @skip directives for conditional queries +- **Variables**: Query variables for dynamic parameter passing + +### PostGraphile v4 Query Patterns + +The agent understands these SubQuery SDK patterns automatically: + +#### Entity Queries +```graphql +# Single entity +entityName(id: "ID") + +# Collection with pagination +entityNames(first: 10, filter: {field: {equalTo: "value"}}) { + nodes { id, name } + pageInfo { hasNextPage, endCursor } +} +``` + +#### Advanced Filtering +```graphql +filter: { + fieldName: { equalTo: "value" } + amount: { greaterThan: 100 } + status: { in: ["active", "pending"] } +} +``` + +#### Ordering and Pagination +```graphql +orderBy: [FIELD_NAME_ASC, CREATED_AT_DESC] +{ + entities(first: 10, after: "cursor") { + nodes { id, field } + pageInfo { hasNextPage, endCursor } + } +} +``` + +### Agent Workflow + +The agent follows this intelligent workflow: + +1. **Relevance Check**: Determines if the question relates to the project data +2. **Schema Analysis**: Loads entity schema and PostGraphile rules (once per session) +3. **Query Construction**: Builds GraphQL queries using PostGraphile patterns +4. **Validation**: Validates queries against the live GraphQL schema +5. **Execution**: Executes validated queries to get real data +6. **Summarization**: Provides user-friendly responses based on actual results + +## Pricing and Billing +Comming soon + +### Support Resources +- **Documentation**: Comprehensive guides and API references +- **Community Forum**: Get help from other users and developers +- **Support Tickets**: Direct support for paid subscribers +- **Status Page**: Real-time service status and incident updates + +## FAQ + +**What types of questions can I ask?** +You can ask any question that can be answered by the data in the SubQuery project, from simple counts to complex analytical queries. + +**Can I use the GraphQL Agent with my own SubQuery projects?** +Yes, in the coming graphql agent as MCP. + +**What's the difference between the free and paid versions?** +The free tier has rate limits (5 queries/day) while paid versions offer higher limits, priority access, and additional features. + +**How does the agent handle complex queries?** +The agent automatically breaks complex questions into multiple optimized queries and combines the results intelligently. \ No newline at end of file diff --git a/docs/ai/guides/delegation-helper.md b/docs/ai/guides/delegation-helper.md deleted file mode 100644 index 67d18c288a9..00000000000 --- a/docs/ai/guides/delegation-helper.md +++ /dev/null @@ -1,321 +0,0 @@ -# SubQuery Network Delegation Helper - Complex Example with Advanced Function tools - -This is a more advanced example of a SubQuery AI application. It is an agent specifically designed to assist users with questions relating to their token delegation on the SubQuery Network. The agent utilizes multiple tools to extract and interpret relevant information about delegation from raw on-chain data (indexed using SubQuery of course). - -This showcases an excellent integration of an AI framework with the SubQuery Indexing SDK, enabling the structuring of natural language responses based on on-chain data. - -:::info note -You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/network-delegation-helper). -::: - - - -## 1. Install the framework - - - -## 2. Create a New App - - - -## 3. Configure Manifest File - - - -We need to update the manifest file to the following: - -```ts -/** Gets the host names of any urls in a record */ -export function extractConfigHostNames( - config: Record, -): string[] { - const hosts = Object.values(config) - .filter((v) => typeof v === "string") - .map((v) => { - try { - return new URL(v).hostname; - } catch (_e) { - return undefined; - } - }) - .filter((v) => !!v) as string[]; // Cast should be unnecessary with latest TS versions - - // Make unique - return [...new Set(hosts)]; -} - -const defaultConfig = Value.Default(ConfigType, {} as Config) as Config; - -const project: ProjectManifest = { - specVersion: "0.0.1", - endpoints: extractConfigHostNames(defaultConfig), - config: JSON.parse(JSON.stringify(ConfigType)), // Convert to JSON Schema - model: "llama3.1", - entry: "./project.ts", -}; - -export default project; -``` - -The code includes the import of necessary types and functions, the definition of a utility function, and the creation of a project manifest object. Since we're sharing the config logic between the manifest file and the app's code, we import `ConfigType`, which we will explain later during the code explaination. - -## 4. Configure App's Logic - - - - - -```ts -const PROMPT = ` -You are an agent designed to help a user with their token delegation on the SubQuery Network. -Given an input question, use the available tools to answer the users question quickly and concisely. -You answer must use the result of the tools available. -Do not mention that you used a tool or the name of a tool. -If you need more information to answer the question, ask the user for more details. -All token amounts are in SQT. - -If the question seems to be unrelated to the API, just return "I don't know" as the answer. -`; -``` - - - -Since delegation data can only be derived from chain events, it must be pre-indexed to enable faster and simpler querying. For this reason, we are including a relevant GraphQL endpoint that provides access to this data. Additionally, we are establishing a connection to an RPC to retrieve token balances, although the same results could be achieved if the indexer were configured to include balance data. - -Each tool can be assigned specific endpoints to ensure requests are routed correctly. Importantly, this data does not need to be hardcoded and can be provided dynamically in response to the LLM's request. In our case, the configuration might look like this: - -```ts -export const ConfigType = Type.Object({ - GRAPHQL_ENDPOINT: Type.String({ - default: "https://api.subquery.network/sq/subquery/subquery-mainnet", - }), - BASE_RPC: Type.String({ - default: "https://gateway.subquery.network/rpc/base-full", - }), - BASE_SQT_ADDR: Type.String({ - default: "0x858c50C3AF1913b0E849aFDB74617388a1a5340d", - }), -}); - -export type Config = Static; -``` - -You can now include the tools in the array and supply the configuration during initialisation: - -```ts -const entrypoint: ProjectEntry = async (config: Config): Promise => { - return { - tools: [ - new TotalDelegation(config.GRAPHQL_ENDPOINT), - new DelegatedIndexers(config.GRAPHQL_ENDPOINT), - new UnclaimedDelegatorRewards(config.GRAPHQL_ENDPOINT), - new CurrentDelegatorApy(config.GRAPHQL_ENDPOINT), - new BetterIndexerApy(config.GRAPHQL_ENDPOINT), - new TokenBalance( - new JsonRpcProvider(config.BASE_RPC), - config.BASE_SQT_ADDR, - ), - ], - systemPrompt: PROMPT, - }; -}; - -export default entrypoint; -``` - -Once the tools are added to the array, you can proceed with implementing them. - -As mentioned earlier, there are two methods for obtaining data: using an SDK to access indexed data via GraphQL queries and fetching data directly from the RPC node. - -Given the number of tools included in the project, the logic can be modularized and split across multiple files. To achieve this, the original project will include a dedicated `tools.ts` file for managing these tools. - -### 4.1 Obtaining Data from Indexers - -Let’s take the `TotalDelegation` tool as an example to illustrate its implementation in detail. This tool calculates the total delegation amount of SQT for a given user address. If no delegation is found, it returns `null`. You can view the tool's implementation here: - -```ts -export class TotalDelegation extends FunctionTool { - constructor(readonly endpoint: string) { - super(); - } - - // name = 'total-delegation-amount'; - description = `This tool gets the total delegation amount of SQT for the given user address. - If no delegation is found it will return null. - `; - parameters = { - type: "object", - required: ["account"], - properties: { - account: { - type: "string", - description: - "The account or address of the user which to get delegation information for", - }, - }, - }; - - async call({ account }: { account: string }): Promise { - try { - const res = await grahqlRequest<{ - delegator: null | { totalDelegations: Amount }; - }>( - this.endpoint, - `{ - delegator(id: "${account}") { - totalDelegations - } - }`, - ); - - if (!res.delegator) { - return null; - } - - return formatEther(res.delegator.totalDelegations.valueAfter.value); - } catch (error) { - return `${error}`; - } - } -} -``` - -The `TotalDelegation` tool queries a GraphQL endpoint to retrieve this information. If no delegation is found for the specified user address, it returns null. - -Every tool utilising GraphQL depends on an external function named `graphqlRequest`, which can be relocated to a separate file and implemented as follows: - -```ts -export async function grahqlRequest( - endpoint: string, - query: string, - variables?: unknown, -): Promise { - const response = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - query, - variables, - }), - }); - - const res = await response.json(); - - if (res.errors) { - console.log(`Request failed\n${query}`); - - throw new Error( - res.errors.map((e: { message: string }) => e.message).join("\n"), - ); - } - - return res.data; -} -``` - -This function is an asynchronous utility created to send GraphQL requests to a designated endpoint. It is a generic function, enabling the caller to define the expected structure of the response data. - -### 4.2 Fetching Data Directly from RPC - -In our example, only the `TokenBalance` tool retrieves data directly from the node. Let’s review its implementation: - -```ts -export class TokenBalance extends FunctionTool { - constructor( - readonly provider: AbstractProvider, - readonly tokenAddress: string, - ) { - super(); - } - - // name = 'token-balance'; - description = `This tool gets the current on chain SQT balance for the given address`; - parameters = { - type: "object", - required: ["account"], - properties: { - account: { - type: "string", - description: - "The account or address of the user which to get the balance for", - }, - }, - }; - - async call({ account }: { account: string }): Promise { - try { - // Step 3: Define the ERC-20 contract ABI (only need the 'balanceOf' function) - const erc20Abi = [ - "function balanceOf(address owner) view returns (uint256)", - ]; - - const erc20Contract = new Contract( - this.tokenAddress, - erc20Abi, - this.provider, - ); - - const balance = await erc20Contract.balanceOf(account); - - return formatEther(balance); - } catch (error) { - return `${error}`; - } - } -} -``` - -The tool is built to fetch the current on-chain balance of a specific token (SQT) for a given user address. The class constructor accepts two parameters: `provider`, an instance of AbstractProvider used to interact with the blockchain, and `tokenAddress`, the address of the token contract, both of which are hardcoded. - -## 5. Run the AI App - - - -Let's now try asking questions and obtaining responses using the previously demonstrated tools. For example, to extract the total delegations, you can use the following prompt: - -``` -What is the total delegation amount of SQT done for me? -``` - -This will return a response like: - -``` -The total delegation amount of SQT done for you is: 1000 SQT -``` - -This number is obtained from the indexer endpoint and can be cross-verified through a direct GraphQL query or another method to ensure accuracy and avoid bias. - -To test the second tool, you can use this prompt: - -``` -What is my balance? -``` - -Which will return: - -``` -Your balance is 12442989724000780042135 SQT -``` - -This number is fetched directly from the RPC node, and further modifications such as type casting can be requested. - -Other useful prompts for this project could include: - -``` -"My address is 0x108A496cDC32DA84e4D5905bb02ED695BC1024cd, use this for any further prompts. What is my delegation?", -"Who am I delegating to?", -"What is my balance?", -"Do I have any unclaimed rewards?", -"What is my current APY?", -"Are there better indexers to delegate to?" -``` - -## Summary - -You now have a functional SubQuery AI App that utilizes the latest LLMs and integrates various function tools that access blockchain data. This example serves as a foundation for building your own tools that leverage on-chain data, allowing you to query it in a convenient way and enabling real-time data analysis through simple natural language prompts. - -[**A full version of the code for this guide can be found here**](https://github.com/subquery/subql-ai-app-example/tree/main/fancy-greeter). - - diff --git a/docs/ai/guides/fancy-greeter.md b/docs/ai/guides/fancy-greeter.md deleted file mode 100644 index 682bbf1e9e4..00000000000 --- a/docs/ai/guides/fancy-greeter.md +++ /dev/null @@ -1,115 +0,0 @@ -# SubQuery Fancy Greeter - Basic Example - -This basic example AI App is a good starting point to learn about prompt engineering and function tooling. It's a perfect example that shows off the key features of SubQuery's AI App Framework. - -:::info note -You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/fancy-greeter). -::: - - - -## 1. Install the framework - - - -## 2. Create a New App - - - -## 3. Review the Manifest File - - - -The manifest file for a default project looks like the following: - -```ts -import type { ProjectManifest } from "jsr:@subql/ai-app-framework"; - -const project: ProjectManifest = { - specVersion: "0.0.1", - // Specify any hostnames your tools will make network requests too - endpoints: [], - // Your projects runtime configuration options - config: {}, - model: "llama3.2:1b", - entry: "./project.ts", -}; - -export default project; -``` - -As you can see, there are very few details to configure in our default example. The two most important settings are the `model` (a selection of models can be found [here](https://ollama.com/library)) and the `entry`, where you'll specify the path to your project's entry point. - -## 4. Configure System Prompt Logic - - - - - -```ts -const entrypoint: ProjectEntry = async (config: Config): Promise => { - return { - tools: [], - systemPrompt: `You are an agent designed to greet a user in the strangest way possible. - Always ask for the users name first before you greet them, once you have this information, you can greet them in a unique way. - Your greeting should be weird, perhaps a pun or dad joke with their name. Please be funny, interesting, weird, and/or unique. - If you need more information to answer to greet the user, ask the user for more details.`, - }; -}; - -export default entrypoint; -``` - -## 5. Add a Function Tool - - - -We're going to add a simple function tool that does nothing more than take an input name, and reverse the name. For example, `alice` would become `ecila` and `bob` would remain `bob`. To accomplish this, we need to modify the code as follows: - -```ts -class ReverseNameTool extends FunctionTool { - description = `This tool reverses the users name.`; - parameters = { - type: "object", - required: ["name"], - properties: { - name: { - type: "string", - description: "The name of the user", - }, - }, - }; - - async call({ name }: { name: string }): Promise { - // Reverse the order of the input name - return await name.split("").reverse().join(""); - } -} - -// deno-lint-ignore require-await -const entrypoint: ProjectEntry = async (config: Config): Promise => { - return { - tools: [new ReverseNameTool()], - systemPrompt: `You are an agent designed to greet a user in the strangest way possible. - Always ask for the users name first before you greet them, once you have this information, you can greet them in a unique way. - Your greeting should be weird, perhaps a pun or dad joke with their name. Please be funny, interesting, weird, and/or unique. - ALWAYS REVERSE THEIR NAME USING THE REVERSENAMETOOL BEFORE GREETING THEM! - Do not mention that you used a tool or the name of a tool. - If you need more information to answer to greet the user, ask the user for more details.`, - }; -}; - -export default entrypoint; -``` - -First, define the function tool by creating a class (`class ReverseNameTool extends FunctionTool { ... }`). Next, add this new function tool to the list of tools (`tools: [new ReverseNameTool()],`). Lastly, update the system prompt to instruct the AI to always reverse the name before greeting, using the Reverse Name tool (`ALWAYS USE THE REVERSE_NAME_TOOL TO REVERSE THEIR NAME BEFORE GREETING THEM!`). - -## 6. Run the App - - - -## Summary - -You now have a running SubQuery AI App that uses the latest LLMs and also incorporates a function tool. This may be a simple and rather basic example, but it's a great starting point to building complex AI Apps and agents custom built for your application. - - diff --git a/docs/ai/guides/subquery-docs-rag.md b/docs/ai/guides/subquery-docs-rag.md deleted file mode 100644 index 3110061733a..00000000000 --- a/docs/ai/guides/subquery-docs-rag.md +++ /dev/null @@ -1,194 +0,0 @@ -# Project Documentation AI Assistant - Intermediate Example with RAG Support - -This is an example of an AI app utilising RAG (Retrieval-Augmented Generation). [RAG tools](../build/rag.md) are a specialised type of [function tools](../build/function_tools.md) that enhance your LLM by integrating a vector database created from anything you choose to vectorise. In most cases this will be additional data from a knowledgebase or a database, in this case we're incorportating the SubQuery documentation website as our RAG data (where you're reading this right now). - -:::info note -This tool is already in use in production for SubQuery documentation. Be sure to explore it now by clicking the corresponding button in the lower-right corner. -::: - -You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/subql-docs). - - - -## 1. Install the framework - - - -## 2. Create a New App - - - -## 3. Embedding Documentation for RAG - -To proceed with our example, we need to define and add a RAG dataset. For this guide, we will experiment with the [SubQuery documentation](https://github.com/subquery/documentation), but feel free to use your own markdown-based documentation, provided it can be vectorised. - -### Step 1: Clone the Documentation Repository - -First, clone the SubQuery documentation repository by running the following command in your terminal: - -```bash -git clone https://github.com/subquery/documentation.git -``` - -### Step 2: Define the RAG Dataset - -Once the documentation repository is cloned, you can define it using the SubQuery CLI. The default RAG tool can be utilised by following [this guide](../build/rag.md#defining-rag). Here’s an example command: - -```bash -subql-ai embed -i ./subquery/documentation -o ./db --table subql-docs --model nomic-embed-text -``` - -Here’s a breakdown of the parameters used in this command: - -- **`-i` (input)**: Specifies the path to the documentation repository you cloned, with no additional modifications required. -- **`-o` (output)**: Indicates the path where the generated embeddings will be saved. -- **`--table`**: Defines the table name for storing the embeddings. -- **`--model`**: Specifies the embedding LLM model to use, which should match the model defined in the app's manifest. - -:::info Note -The logic for vectorisation is implemented in the SubQuery framework and can be found on [GitHub](https://github.com/subquery/subql-ai-app-framework/blob/main/src/embeddings/generator/generator.ts). - -The CLI processes markdown files within the specified directory and generates embeddings by performing the following steps: - -1. **Splitting Markdown Files**: Files are divided into sections based on headers. -2. **MDX Element Removal**: Any MDX elements are stripped away. -3. **Plain Text Conversion**: Each section's content is converted to plain text. - -If the default vectorisation algorithm doesn’t suit your needs, you can use a custom algorithm tailored to your specific requirements. -::: - -### Step 3: Review Generated Embeddings - -After the vectorisation process is complete, a folder will be generated containing the embeddings. It will include subfolders and files similar to the structure shown below: - -![Generated Embeddings](/assets/img/ai/generated-embeddings.png) - -Copy the root path of this folder. You will need this path in the next step when configuring the manifest file to ingest and embed your chosen RAG source data. - -### 4. Configure the Manifest File - - - -Continue with the next steps to integrate the embeddings into your application. After the modification, the manifest file will resemble the following structure: - -```ts -import type { ProjectManifest } from "jsr:@subql/ai-app-framework@^0.0.5"; - -const project: ProjectManifest = { - specVersion: "0.0.1", - vectorStorage: { - type: "lancedb", - path: "../path-to-previously-generated-db-folder", - }, - config: {}, - model: "llama3.1", - embeddingsModel: "nomic-embed-text", - entry: "./project.ts", -}; - -export default project; -``` - -## 5. Configure App's Logic - - - - - -```ts -const PROMPT = ` -You are designed to assist with answering questions about SubQuery, using its documentation as a reference. -You answer must use the result of the tools available. -Do not mention that you used a tool or the name of a tool. -If you need more information to answer the question, ask the user for more details. -`; -``` - - - -You can now add the tool to the array. - -```ts -// deno-lint-ignore require-await -const entrypoint: ProjectEntry = async (): Promise => { - return { - tools: [new SubqueryDocs()], - systemPrompt: PROMPT, - }; -}; - -export default entrypoint; -``` - -After adding the tools to the array, you can begin implementing it. No explicit configuration is needed, as this is handled automatically by the SubQuery AI Framework SDK in the background. - -```ts -export class SubqueryDocs extends RagTool { - constructor() { - super("subql-docs", "content"); - } -} -``` - -## 6. Run the AI App with developed tools - - - -Let's attempt to ask the tool specific questions: - -``` -How can I define a one-to-many relationship in a subquery project GraphQL schema? -``` - -This should return a response that looks like the following: - -::: details Full Response - -``` -It seems like the SubQuery Docs API is still under development and doesn't support defining one-to-many relationships in GraphQL queries. - -To answer your question, a one-to-many relationship can be achieved by using subqueries. Here's an example of how you could define a one-to-many relationship between two tables using a subquery: - -type OneToOneExample { - id: ID! - anotherId: ID! -} - -type Query { - oneToMany: [OneToOneExample!] @client -} - -In this query, we have a `OneToOneExample` type that has two fields: `id` and `anotherId`. The `@client` keyword is used to indicate that the field is not a built-in GraphQL field. - -The subquery is defined like so: - -query MyQuery { - oneToMany: [one TO MANY ON ONE(id: $id, anotherId: $anotherId) FROM OneToOneExample!] WHERE $id IS NOT NULL AND $anotherId IS NOT NULL -} - -In this query, we define a subquery that selects from the `OneToOneExample` type. The subquery is executed for each pair of distinct values in the `id` and `anotherId` fields. This will give us a list of records where one-to-many relationship between the two tables. - -Please note that not all GraphQL databases support this syntax, so you may need to adjust it to fit your specific database schema. -``` - -::: - -Another prompt: - -``` -Does subquery support the solana blockchain? -``` - -::: details Full Response - -``` -Yes. SubQuery supports data indexing on Solana blockchain. -``` - -::: - -## Summary - -You now have a fully functional SubQuery AI App that leverages unstructured markdown data to deliver clearer and more concise responses about SubQuery. - - diff --git a/docs/ai/publish/publish.md b/docs/ai/publish/publish.md deleted file mode 100644 index aeb2463757d..00000000000 --- a/docs/ai/publish/publish.md +++ /dev/null @@ -1,11 +0,0 @@ -# Publish your AI App - -The SubQuery AI App Framework uses a sandboxed environment for secure and efficient operations. Each AI App is encapsulated with its own manifest, enabling seamless distribution across the SubQuery Network. This ensures that scaling is not only easy but also secure, allowing developers to focus on innovation rather than infrastructure. - -:::info AI Apps are in beta - -We don't yet support publishing your AI App to the SubQuery Network, but we are working on this right now. - -You can continue to build, develop, and self host your AI Apps and soon you will be able to deploy to the decentralised network. - -::: diff --git a/docs/ai/run/cli.md b/docs/ai/run/cli.md deleted file mode 100644 index ff3af8ba1d8..00000000000 --- a/docs/ai/run/cli.md +++ /dev/null @@ -1,165 +0,0 @@ -# CLI Reference - -``` -Run a SubQuery AI app - -Commands: - subql-ai Run a SubQuery AI app [default] - subql-ai info Get information on a project - subql-ai embed-mdx Creates a Lance db table with embeddings from MDX files - subql-ai repl Creates a CLI chat with a running app - subql-ai publish Publishes a project to IPFS so it can be easily - distributed - subql-ai init Create a new project skeleton - -Options: - --version Show version number [boolean] - --help Show help [boolean] - -p, --project A path to a project file [string] [required] - --ipfsEndpoint An endpoint to an IPFS gateway - [string] [default: "https://unauthipfs.subquery.network/ipfs/api/v0/"] - --ipfsAccessToken A bearer authentication token to be used with the ipfs - endpoint [string] - --cacheDir The location to cache data from ipfs. Default is a temp - directory [string] - --debug Enable debug logging [boolean] [default: false] - --logFmt Set the logger format - [string] [choices: "json", "pretty"] [default: "pretty"] - -h, --host The LLM RPC host. If the project model uses an OpenAI - model then the default value is not used. - [string] [default: "http://localhost:11434"] - --openAiApiKey If the project models use OpenAI models, then this api - key will be parsed on to the OpenAI client [string] - -i, --interface The interface to interact with the app - [string] [choices: "cli", "http"] [default: "http"] - --port The port the http service runs on - [number] [default: 7827] - --forceReload If the project is from IPFS force reload it and don't - use the cached version [boolean] [default: false] - --toolTimeout Set a limit for how long a tool can take to run, unit - is MS [number] [default: 10000] - --streamKeepAlive The interval in MS to send empty data in stream - responses to keep the connection alive. Only wokrs with - http interface. Use 0 to disable. - [number] [default: 5000] -``` - -These can also be specified with environment variables. They should be prefixed with `SUBQL_AI_` and the flag renambed to capitalized snake case. E.g `SUBQL_AI_CACHE_DIR` - -### `subql-ai` - -Run an AI app. - -```shell -subql-ai -p ./path/to/manifest.ts -``` - -### `info` - -Get information on a project. - -```shell -subql-ai info -p ./path/to/manifest.ts -``` - -::: details Example output - -```json -Project Information: - Model: - llama3.1 - Conifg: - GRAPHQL_ENDPOINT: https://gateway.subquery.network/query/QmcoJLxSeBnGwtmtNmWFCRusXVTGjYWCK1LoujthZ2NyGP - "type": "object", - "properties": { - "GRAPHQL_ENDPOINT": { - "default": "https://gateway.subquery.network/query/QmcoJLxSeBnGwtmtNmWFCRusXVTGjYWCK1LoujthZ2NyGP", - "type": "string" - }, - "BASE_RPC": { - "default": "https://gateway.subquery.network/rpc/base-full", - "type": "string" - }, - "BASE_SQT_ADDR": { - "default": "0x858c50C3AF1913b0E849aFDB74617388a1a5340d", - "type": "string" - } - }, - "required": [ - "GRAPHQL_ENDPOINT", - "BASE_RPC", - "BASE_SQT_ADDR" - ] - } - Tools: - TotalDelegation - DelegatedIndexers - UnclaimedDelegatorRewards - CurrentDelegatorApy - BetterIndexerApy - TokenBalance - SubqueryDocs - System Prompt: - - You are an agent designed to help a user with their token delegation on the SubQuery Network. - Given an input question, use the available tools to answer the users question quickly and concisely. - You answer must use the result of the tools available. - Do not mention that you used a tool or the name of a tool. - If you need more information to answer the question, ask the user for more details. - All token amounts are in SQT. - - If the question seems to be unrelated to the API, just return "I don't know" as the answer. - - Endpoints: - gateway.subquery.network - Vector Storage: - Type: lancedb - Path: ipfs://QmbELwJY7akcah3Ds5taT3KSN3aPG8xUv5poJ8crdhXrhx - -``` - -::: - -### `embed-mdx` - -Creates a Lance db table with embeddings from MDX files - -```shell -subql-ai embed-mdx -i ./path/to/dir/with/markdown -o ./db --table your-table-name -``` - -### `repl` - -When you have a running app you can run this in another terminal as a CLI interface for chatting with the App. - -Type `/bye` to exit - -```shell -subql-ai repl -``` - -### `publish` - -Publish your project to IPFS, this is how you can distribute your project - -```shell -subql-ai publish -p ./path/to/manifest.ts -``` - -::: tip Info -You can use the `--silent` flag to only output the IPFS url. This is useful with CI pipelines. -::: - -::: details example output - -``` -✔ Loaded project manifest -✔ Loaded project -✔ Generated project bundle -✔ Published project code -✔ Published vector db -✔ Published project to IPFS -ipfs://QmSejMf351cHqNTEbmJDziVLJ26r3hyepq1e6wbnejvgqM -``` - -::: diff --git a/docs/ai/run/docker.md b/docs/ai/run/docker.md deleted file mode 100644 index aedd93f7c75..00000000000 --- a/docs/ai/run/docker.md +++ /dev/null @@ -1,54 +0,0 @@ -# Docker - -A docker image is provided for running applications. - -## Docker Compose - -This is an example docker compose which runs an app from IPFS with the given project (`-p=ipfs://QmNaNBhXJoFpRJeNQcnTH8Yh6Rf4pzJy6VSnfnQSZHysdZ`) - -It also includes a web UI for testing purposes. -To use the web UI head to [http://localhost:8080](http://localhost:8080) and select the `subql-ai-0` model then start chatting. - -::: warning -Docker compose doesn't currently support running local projects as dependencies are not resolved. -This is something that will be fixed in the future. -::: - -```yml -services: - subql-ai: - image: subquerynetwork/subql-ai-app - ports: - - 7827:7827 - restart: unless-stopped - volumes: - - ./:/app - command: - - ${SUB_COMMAND:-} - - -p=ipfs://QmNaNBhXJoFpRJeNQcnTH8Yh6Rf4pzJy6VSnfnQSZHysdZ - - -h=http://host.docker.internal:11434 - healthcheck: - test: ["CMD", "curl", "-f", "http://subql-ai:7827/ready"] - interval: 3s - timeout: 5s - retries: 10 - - # A simple chat UI - ui: - image: ghcr.io/open-webui/open-webui:main - ports: - - 8080:8080 - restart: always - depends_on: - "subql-ai": - condition: service_healthy - environment: - - "OPENAI_API_BASE_URLS=http://subql-ai:7827/v1" - - "OPENAI_API_KEYS=foobar" - - "WEBUI_AUTH=false" - volumes: - - open-webui:/app/backend/data - -volumes: - open-webui: -``` diff --git a/docs/ai/run/local.md b/docs/ai/run/local.md deleted file mode 100644 index 6c16b3b6117..00000000000 --- a/docs/ai/run/local.md +++ /dev/null @@ -1,21 +0,0 @@ -# Running Locally - -Now that you have made your application and built it, you can run it locally to test it out. To do so run the following command, where the `-p` is the path to the `manifest.ts`, and `-h` is the URL of the Ollama endpoint: - -```shell -subql-ai -p ./path/to/manifest.ts -h http://ollama.public.url -``` - -Once the project is running you should see the following: `Listening on http://0.0.0.0:7827/` - -You can now interact with your application. The easiest way to do that is to run the repl in another terminal. - -```shell -subql-ai repl -``` - -This will start a CLI chat. You can type `/bye` to exit. - -Alternatively you can [run with docker](./docker.md) to get a web UI. - -You may want to refer to the [command line arguments](../run/cli.md) used in SubQuery AI App Framework. It will help you understand the commands better. diff --git a/docs/ai/snippets/add-a-function-tool.md b/docs/ai/snippets/add-a-function-tool.md deleted file mode 100644 index 7c50bb5acb5..00000000000 --- a/docs/ai/snippets/add-a-function-tool.md +++ /dev/null @@ -1 +0,0 @@ -Adding function tools is an important step of any integrated AI App. Function tools are functions that extend the functionality of the LLM. They can be used to do many things like request data from external APIs and services, perform computations or analyse structured data outputs from the AI. You can read more about function tooling [here](../build/function_tools.md). diff --git a/docs/ai/snippets/configure-app-logic.md b/docs/ai/snippets/configure-app-logic.md deleted file mode 100644 index d5b9a802fde..00000000000 --- a/docs/ai/snippets/configure-app-logic.md +++ /dev/null @@ -1 +0,0 @@ -To configure the app, you’ll need to edit the project entry point file (e.g., `project.ts` in this example). The [project entry point](../build/app.md#project-entrypoint) is where the tools and system prompt are initialised. diff --git a/docs/ai/snippets/configure-manifest-file.md b/docs/ai/snippets/configure-manifest-file.md deleted file mode 100644 index 96a94d5cccc..00000000000 --- a/docs/ai/snippets/configure-manifest-file.md +++ /dev/null @@ -1 +0,0 @@ -The file `manifest.ts` defines key configuration options for your app. You can find the configuration specifics [here](../build/app.md#project-manifest). diff --git a/docs/ai/snippets/create-a-new-app.md b/docs/ai/snippets/create-a-new-app.md deleted file mode 100644 index 27228373c4e..00000000000 --- a/docs/ai/snippets/create-a-new-app.md +++ /dev/null @@ -1,5 +0,0 @@ -You can initialise a new app using `subql-ai init`. It will ask you to provide a name and either an OpenAI endpoint or an Ollama model to use. - -![Init a new AI App](/assets/img/ai/guide-init.png) - -After you complete the initialisation process, you will see a folder with your project name created inside the directory. Please note that there should be three files, a `project.ts`, a `manifest.ts`, a `docker-compose.yml`, and a `README.md`. diff --git a/docs/ai/snippets/install-the-framework.md b/docs/ai/snippets/install-the-framework.md deleted file mode 100644 index e25bd6ba6d7..00000000000 --- a/docs/ai/snippets/install-the-framework.md +++ /dev/null @@ -1,9 +0,0 @@ -Run the following command to install the SubQuery AI framework globally on your system: - -```bash -deno install -g -f --allow-env --allow-net --allow-import --allow-read --allow-write --allow-ffi --allow-run --unstable-worker-options -n subql-ai jsr:@subql/ai-app-framework/cli -``` - -This will install the CLI and Runner. Make sure you follow the suggested instructions to add it to your path. - -You can confirm installation by running `subql-ai --help`. diff --git a/docs/ai/snippets/prerequisites.md b/docs/ai/snippets/prerequisites.md deleted file mode 100644 index 324e55c7ab4..00000000000 --- a/docs/ai/snippets/prerequisites.md +++ /dev/null @@ -1,11 +0,0 @@ -## Prerequisites - -In order to run an AI App locally, you must have the following services installed: - -- [Docker](https://docker.com/): This tutorial will use Docker to run a local version of SubQuery's node. -- [Deno](https://docs.deno.com/runtime/getting_started/installation/): A recent version of Deno, the JS engine for the SubQuery AI App Framework. - -You will also need access to either an Ollama or OpenAI inference endpoint: - -- [Ollama](https://ollama.com/). An endpoint to an Ollama instance, this could be running on your local computer or a commercial endpoint online, or -- [OpenAI](https://platform.openai.com). You will need a paid API Key. diff --git a/docs/ai/snippets/run-the-ai-app.md b/docs/ai/snippets/run-the-ai-app.md deleted file mode 100644 index a9901abbc32..00000000000 --- a/docs/ai/snippets/run-the-ai-app.md +++ /dev/null @@ -1,15 +0,0 @@ -We can run the project at any time using the following command, where the `-p` is the path to the `manifest.ts`, and `-h` is the URL of the Ollama endpoint. - -```bash -subql-ai -p ./manifest.ts -h http://host.docker.internal:11434 -``` - -Once the project is running you should see the following: `Listening on http://0.0.0.0:7827/`. You can now interact with your application. The easiest way to do that is to run the repl in another terminal. - -```shell -subql-ai repl -``` - -This will start a CLI chat. You can type `/bye` to exit. Alternatively, it is possible to launch the app via [Docker](../run/docker.md). - -You should review the instructions on [running locally](../run/local.md) or via [Docker](../run/docker.md). diff --git a/docs/ai/snippets/summary.md b/docs/ai/snippets/summary.md deleted file mode 100644 index 8cdd36e8457..00000000000 --- a/docs/ai/snippets/summary.md +++ /dev/null @@ -1,7 +0,0 @@ -From here you may want to look at the following guides: - -- Detailed documentation on the [AI App Manifest](../build/app.md). -- Enhance your AI App with [function tooling](../build/function_tools.md). -- Give your AI App more knowledge with [RAG support](../build/rag.md). -- [API](../api/api.md) of AI App. -- [Publish your AI App](../publish/publish.md) so it can run on the [SubQuery Decentralised Network](https://app.subquery.network). diff --git a/docs/ai/snippets/update-system-prompt.md b/docs/ai/snippets/update-system-prompt.md deleted file mode 100644 index 69106c929b5..00000000000 --- a/docs/ai/snippets/update-system-prompt.md +++ /dev/null @@ -1 +0,0 @@ -A good first place to start is by updating your **system prompts**. System prompts are the basic way you customise the behaviour of your AI agent. diff --git a/docs/ai/welcome.md b/docs/ai/welcome.md deleted file mode 100644 index 75adf112aee..00000000000 --- a/docs/ai/welcome.md +++ /dev/null @@ -1,54 +0,0 @@ -# Build with SubQuery's AI App Framework - -The AI App Framework allows you to build, deploy, and run production AI apps on the SubQuery Network in a trusted and decentralised environment. - -AI apps are self-contained and easily scalable AI agents that you can use to power your intelligent applications. They are sandboxed to a trusted runner and can be easily distributed and scaled horizontally across the SubQuery Network. - -![AI Apps Header Image](/assets/img/ai/header.png) - -## Features - -- **Effortless decentralised distribution:** The SubQuery AI App Framework uses a sandboxed environment for secure and efficient operations. Each AI App is encapsulated with its own manifest, enabling seamless distribution across the SubQuery Network. This ensures that horizontal scaling is not only easy but also secure, allowing developers to focus on innovation rather than infrastructure. -- **Empower your AI with RAGs:** By integrating [RAG (Retrieval-Augmented Generation) files](./build/rag.md), your AI Apps can leverage domain-specific knowledge efficiently. With initial support for LanceDB and future compatibility with other vector databases, developers can enhance their applications' performance and accuracy. Additionally, publishing to IPFS ensures data integrity and accessibility. -- **Your AI journey starts here:** The SubQuery AI App framework is designed with user-friendliness in mind, providing intuitive wrappers around core features. This lowers the barrier to entry for developers of all skill levels, making it easier to create, run, and deploy AI Apps. -- **Connect, create, and integrate with function tooling:** You can extend your AI Apps with additional [function tooling](./build/function_tools.md), facilitating connections to external systems and tools. This capability enables rich integrations, allowing users to create versatile applications that can interact seamlessly with blockchains and other ecosystems. -- **Choose your model:** By supporting a range of open-source Ollama LLM models as well as OpenAI, the SubQuery AI App Framework ensures that users can choose the best model for their applications without being locked into a specific model ecosystem. This flexibility fosters open-source innovation. -- **Proven standards for seamless integration:** SubQuery AI Apps expose the industry-standard [OpenAI API](./api/api.md), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. - -![AI App Framework Features](/assets/img/ai/features.jpg) - -## What can you build with SubQuery’s AI App Framework? - -SubQuery’s AI App Framework is a powerful but intuitive SDK to help you build advanced AI applications in minutes. It’s built on typescript and is extremely easy to use, essentially simplifying most of the backend tasks required to customise and integrate LLM models into production use cases. - -For example, you could use it to build: - -- **A customer support bot**, trained on the documentation for your project and able to guide customers through their problems. It could also be trained to submit clean and detailed customer support tickets when escalation is required. -- **Wallet assistants** that help users understand the tokens they have and guide them through the process of managing, bridging, or swapping them. For example, a user could ask, “how do I convert ETH to SQT?” and the AI would guide them through the options. -- **Content moderation for decentralised social networks**, AI Apps could be trained to moderate content by identifying spam or harmful content and using function tools to disable or hide them. -- **AI agents to improve participation in governance**, by analysing and summarising proposals in DAOs and assisting users in making informed voting decisions based on data-driven insights. -- **Dynamic pricing models**, by analysing demand and supply in real-time, adjusting prices for tokens or NFTs dynamically based on user behaviour and market trends. - -## Getting Started - - - -### Install the framework - -Run the following command to install the SubQuery AI framework globally on your system: - -```bash -deno install -g -f --allow-env --allow-net --allow-import --allow-read --allow-write --allow-ffi --allow-run --unstable-worker-options -n subql-ai jsr:@subql/ai-app-framework/cli -``` - -This will install the CLI and Runner. Make sure you follow the suggested instructions to add it to your path. - -You can confirm installation by running `subql-ai --help`. - -## Create a new App - -You can initialise a new app using `subql-ai init`. It will ask you to provide a name and an LLM model to use. - -![Init a new AI App](/assets/img/ai/guide-init.png) - -You can follow along on a [guide to create a basic SubQuery AI App with the SubQuery App framework here](./guides/fancy-greeter.md), or read the instructions on how to create an app yourself [here](./build/app.md) From 1b821ff6aaf857843234bf12e6487e09cf6765cc Mon Sep 17 00:00:00 2001 From: Ian He <39037239+ianhe8x@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:35:15 +1300 Subject: [PATCH 2/5] fix deploy-staging.yml --- .github/workflows/deploy-staging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index f2f4d0976ab..dd827c24c42 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -38,7 +38,7 @@ jobs: - name: Invalidate CloudFront uses: chetan/invalidate-cloudfront-action@v2 env: - DISTRIBUTION: GNUAPE6SUDC4 + DISTRIBUTION: ${{ vars.CF_DISTRIBUTION_DEV }} PATHS: "/*" AWS_REGION: "us-east-1" AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID_V2 }} From a9f80691e06169f3365fc4ce6afdfd3ae639f101 Mon Sep 17 00:00:00 2001 From: Ian He <39037239+ianhe8x@users.noreply.github.com> Date: Thu, 30 Oct 2025 17:57:20 +1300 Subject: [PATCH 3/5] fix per comment --- docs/.vuepress/config.ts | 2 +- docs/.vuepress/sidebar.ts | 39 ++------------------------------------- docs/ai/graphql_agent.md | 6 +++--- 3 files changed, 6 insertions(+), 41 deletions(-) diff --git a/docs/.vuepress/config.ts b/docs/.vuepress/config.ts index ebba53ce49a..68e9f810c6c 100644 --- a/docs/.vuepress/config.ts +++ b/docs/.vuepress/config.ts @@ -70,7 +70,7 @@ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= link: "/indexer/welcome.md", }, { - text: "SubQuery Graphql Agent", + text: "SubQuery GraphQL Agent", link: "/ai/graphql_agent.md", }, { diff --git a/docs/.vuepress/sidebar.ts b/docs/.vuepress/sidebar.ts index c5b18ca33ee..9116ea2e6fb 100644 --- a/docs/.vuepress/sidebar.ts +++ b/docs/.vuepress/sidebar.ts @@ -721,43 +721,8 @@ export const getSidebar = (locale: string) => ], "/ai/": [ { - text: "Welcome", - link: `${locale}/ai/welcome.md`, - }, - { - text: "Guides", - link: `${locale}/ai/guides/fancy-greeter.md`, - children: [ - `${locale}/ai/guides/fancy-greeter.md`, - `${locale}/ai/guides/subquery-docs-rag.md`, - `${locale}/ai/guides/delegation-helper.md`, - ], - }, - { - text: "Build", - link: `${locale}/ai/build/app.md`, - children: [ - `${locale}/ai/build/app.md`, - `${locale}/ai/build/function_tools.md`, - `${locale}/ai/build/rag.md`, - ], - }, - { - text: "Run", - link: `${locale}/ai/run/local.md`, - children: [ - `${locale}/ai/run/local.md`, - `${locale}/ai/run/cli.md`, - `${locale}/ai/run/docker.md`, - ], - }, - { - text: "API", - link: `${locale}/ai/api/api.md`, - }, - { - text: "Publish", - link: `${locale}/ai/publish/publish.md`, + text: "SubQuery GraphQL Agent", + link: `${locale}/ai/graphql_agent.md`, }, ], "/miscellaneous/": [ diff --git a/docs/ai/graphql_agent.md b/docs/ai/graphql_agent.md index 5bc6ebd14ca..5fbe32694e0 100644 --- a/docs/ai/graphql_agent.md +++ b/docs/ai/graphql_agent.md @@ -229,7 +229,7 @@ The agent follows this intelligent workflow: 6. **Summarization**: Provides user-friendly responses based on actual results ## Pricing and Billing -Comming soon +Coming soon ### Support Resources - **Documentation**: Comprehensive guides and API references @@ -242,8 +242,8 @@ Comming soon **What types of questions can I ask?** You can ask any question that can be answered by the data in the SubQuery project, from simple counts to complex analytical queries. -**Can I use the GraphQL Agent with my own SubQuery projects?** -Yes, in the coming graphql agent as MCP. +**Can I use the GraphQL Agent with my own SubQuery/SubGraph projects?** +Yes, the GraphQL Agent can be used as an MCP (Model Context Protocol) with custom SubQuery/SubGraph projects. Configure the agent to point to your GraphQL endpoint and include the project schema and any required authentication settings. **What's the difference between the free and paid versions?** The free tier has rate limits (5 queries/day) while paid versions offer higher limits, priority access, and additional features. From 68b4927b91b0afd9b40a8b6e4260c45839f8ab9c Mon Sep 17 00:00:00 2001 From: Ian He <39037239+ianhe8x@users.noreply.github.com> Date: Thu, 30 Oct 2025 18:19:27 +1300 Subject: [PATCH 4/5] remove sidebar for graphql agent --- docs/.vuepress/sidebar.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/.vuepress/sidebar.ts b/docs/.vuepress/sidebar.ts index 9116ea2e6fb..cdc7a602eec 100644 --- a/docs/.vuepress/sidebar.ts +++ b/docs/.vuepress/sidebar.ts @@ -719,12 +719,12 @@ export const getSidebar = (locale: string) => `${locale}/subquery_network/community.md`, `${locale}/subquery_network/glossary.md`, ], - "/ai/": [ - { - text: "SubQuery GraphQL Agent", - link: `${locale}/ai/graphql_agent.md`, - }, - ], + // "/ai/": [ + // { + // text: "SubQuery GraphQL Agent", + // link: `${locale}/ai/graphql_agent.md`, + // }, + // ], "/miscellaneous/": [ `${locale}/miscellaneous/contributing.md`, `${locale}/miscellaneous/social_media.md`, From e73a0778e3f79b7fb9d4daa28a4013343be26ba3 Mon Sep 17 00:00:00 2001 From: Ian He <39037239+ianhe8x@users.noreply.github.com> Date: Fri, 31 Oct 2025 11:36:22 +1300 Subject: [PATCH 5/5] remove sidebar for graphql agent --- docs/.vuepress/sidebar.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/.vuepress/sidebar.ts b/docs/.vuepress/sidebar.ts index cdc7a602eec..182c907d4c5 100644 --- a/docs/.vuepress/sidebar.ts +++ b/docs/.vuepress/sidebar.ts @@ -719,12 +719,12 @@ export const getSidebar = (locale: string) => `${locale}/subquery_network/community.md`, `${locale}/subquery_network/glossary.md`, ], - // "/ai/": [ + "/ai/": [ // { // text: "SubQuery GraphQL Agent", // link: `${locale}/ai/graphql_agent.md`, // }, - // ], + ], "/miscellaneous/": [ `${locale}/miscellaneous/contributing.md`, `${locale}/miscellaneous/social_media.md`,