diff --git a/.github/workflows/test-openrouter.yml b/.github/workflows/test-openrouter.yml new file mode 100644 index 0000000..3088ae5 --- /dev/null +++ b/.github/workflows/test-openrouter.yml @@ -0,0 +1,144 @@ +name: Test OpenRouter Models + +on: + schedule: + # Run every 6 hours + - cron: '0 */6 * * *' + workflow_dispatch: + inputs: + test_language: + description: 'Language to test (javascript, python, or both)' + required: false + default: 'both' + type: choice + options: + - both + - javascript + - python + push: + branches: + - main + paths: + - 'test-openrouter-models.mjs' + - 'test-openrouter-models.py' + - '.github/workflows/test-openrouter.yml' + +jobs: + test-javascript: + if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'javascript' + runs-on: ubuntu-latest + name: Test OpenRouter Models (JavaScript) + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Run OpenRouter model tests + env: + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + run: | + echo "Testing OpenRouter models with JavaScript..." + node test-openrouter-models.mjs > openrouter-test-results-js.log 2>&1 || true + cat openrouter-test-results-js.log + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: openrouter-test-results-javascript + path: openrouter-test-results-js.log + retention-days: 30 + + - name: Check for failures + run: | + if grep -q "Successful responses: 0" openrouter-test-results-js.log; then + echo "::warning::No successful model responses detected" + exit 1 + fi + + test-python: + if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'python' + runs-on: ubuntu-latest + name: Test OpenRouter Models (Python) + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install aiohttp + + - name: Run OpenRouter model tests + env: + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + run: | + echo "Testing OpenRouter models with Python..." + python test-openrouter-models.py > openrouter-test-results-py.log 2>&1 || true + cat openrouter-test-results-py.log + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: openrouter-test-results-python + path: openrouter-test-results-py.log + retention-days: 30 + + - name: Check for failures + run: | + if grep -q "Successful responses: 0" openrouter-test-results-py.log; then + echo "::warning::No successful model responses detected" + exit 1 + fi + + summary: + needs: [test-javascript, test-python] + if: always() + runs-on: ubuntu-latest + name: Test Summary + + steps: + - name: Download JavaScript results + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: openrouter-test-results-javascript + path: ./results/ + + - name: Download Python results + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: openrouter-test-results-python + path: ./results/ + + - name: Create summary + run: | + echo "# OpenRouter Model Test Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ -f "./results/openrouter-test-results-js.log" ]; then + echo "## JavaScript Results" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + tail -20 ./results/openrouter-test-results-js.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + fi + + if [ -f "./results/openrouter-test-results-py.log" ]; then + echo "## Python Results" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + tail -20 ./results/openrouter-test-results-py.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/test-piapi.yml b/.github/workflows/test-piapi.yml new file mode 100644 index 0000000..6d51284 --- /dev/null +++ b/.github/workflows/test-piapi.yml @@ -0,0 +1,146 @@ +name: Test PiAPI Models + +on: + schedule: + # Run every 6 hours + - cron: '0 */6 * * *' + workflow_dispatch: + inputs: + test_language: + description: 'Language to test (javascript, python, or both)' + required: false + default: 'both' + type: choice + options: + - both + - javascript + - python + push: + branches: + - main + paths: + - 'test-models.mjs' + - 'model.py' + - '.github/workflows/test-piapi.yml' + +jobs: + test-javascript: + if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'javascript' + runs-on: ubuntu-latest + name: Test PiAPI Models (JavaScript) + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Run PiAPI model tests + env: + OPENAI_API_KEY: ${{ secrets.PIAPI_API_KEY }} + OPENAI_API_BASE: ${{ secrets.PIAPI_API_BASE || 'https://api.deep.assistant.run.place/v1' }} + run: | + echo "Testing PiAPI models with JavaScript..." + node test-models.mjs > piapi-test-results-js.log 2>&1 || true + cat piapi-test-results-js.log + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: piapi-test-results-javascript + path: piapi-test-results-js.log + retention-days: 30 + + - name: Check for failures + run: | + if grep -q "Successful responses: 0" piapi-test-results-js.log; then + echo "::warning::No successful model responses detected" + exit 1 + fi + + test-python: + if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'python' + runs-on: ubuntu-latest + name: Test PiAPI Models (Python) + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install aiohttp + + - name: Run PiAPI model tests + env: + OPENAI_API_KEY: ${{ secrets.PIAPI_API_KEY }} + OPENAI_API_BASE: ${{ secrets.PIAPI_API_BASE || 'https://api.deep.assistant.run.place/v1' }} + run: | + echo "Testing PiAPI models with Python..." + python model.py > piapi-test-results-py.log 2>&1 || true + cat piapi-test-results-py.log + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: piapi-test-results-python + path: piapi-test-results-py.log + retention-days: 30 + + - name: Check for failures + run: | + if grep -q "Successful responses: 0" piapi-test-results-py.log; then + echo "::warning::No successful model responses detected" + exit 1 + fi + + summary: + needs: [test-javascript, test-python] + if: always() + runs-on: ubuntu-latest + name: Test Summary + + steps: + - name: Download JavaScript results + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: piapi-test-results-javascript + path: ./results/ + + - name: Download Python results + uses: actions/download-artifact@v4 + continue-on-error: true + with: + name: piapi-test-results-python + path: ./results/ + + - name: Create summary + run: | + echo "# PiAPI Model Test Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ -f "./results/piapi-test-results-js.log" ]; then + echo "## JavaScript Results" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + tail -20 ./results/piapi-test-results-js.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + fi + + if [ -f "./results/piapi-test-results-py.log" ]; then + echo "## Python Results" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + tail -20 ./results/piapi-test-results-py.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.gitignore b/.gitignore index 13dfa36..a6bd652 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ .env -node_modules/ \ No newline at end of file +node_modules/ +__pycache__/ +*.pyc +*.pyo +*.pyd \ No newline at end of file diff --git a/README.md b/README.md index b993b11..07971ed 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,50 @@ # API Tester -Testing examples for Deep.Assistant API Gateway that demonstrate OpenAI-compatible usage. +Testing and monitoring tools for AI API gateways, including model availability checking for Deep.Assistant (PiAPI) and OpenRouter. -## Examples +## Features -This repository contains examples in different programming languages: +- **Model Availability Testing**: Automated testing of model availability across multiple AI providers +- **Dual Provider Support**: Test both PiAPI and OpenRouter models +- **Multiple Languages**: JavaScript and Python implementations +- **GitHub Actions Integration**: Automated scheduled testing with CI/CD workflows +- **Detailed Reporting**: Comprehensive test results with response times and success rates + +## Model Testing Scripts + +### PiAPI Model Testing + +Test Deep.Assistant API Gateway models: + +**JavaScript:** +```bash +node test-models.mjs +``` + +**Python:** +```bash +python model.py +``` + +### OpenRouter Model Testing + +Test OpenRouter models: + +**JavaScript:** +```bash +export OPENROUTER_API_KEY="your-api-key" +node test-openrouter-models.mjs +``` + +**Python:** +```bash +export OPENROUTER_API_KEY="your-api-key" +python test-openrouter-models.py +``` + +## Usage Examples + +This repository also contains usage examples in different programming languages: - **[JavaScript Example](examples/javascript/)** - Node.js example using the OpenAI SDK - **[Python Example](examples/python/)** - Python example using the OpenAI SDK @@ -17,6 +57,28 @@ Each example directory contains: ## Quick Start +### Testing Model Availability + +1. Set your API key: + ```bash + # For PiAPI + export OPENAI_API_KEY="your-piapi-key" + + # For OpenRouter + export OPENROUTER_API_KEY="your-openrouter-key" + ``` + +2. Run the test script: + ```bash + # Test PiAPI models (JavaScript) + node test-models.mjs + + # Test OpenRouter models (Python) + python test-openrouter-models.py + ``` + +### Using the Examples + 1. Navigate to your preferred language example: ```bash cd examples/javascript # or examples/python @@ -24,7 +86,25 @@ Each example directory contains: 2. Follow the README instructions in that directory -## API Key +## API Keys +### PiAPI (Deep.Assistant) Get your API key from the Telegram bot: https://t.me/DeepGPTBot -Use the `/api` command to obtain your key. \ No newline at end of file +Use the `/api` command to obtain your key. + +### OpenRouter +Get your API key from: https://openrouter.ai/keys +Sign up and create an API key in your account dashboard. + +## GitHub Actions Workflows + +This repository includes automated testing workflows: + +- **PiAPI Model Testing**: Runs every 6 hours to check model availability +- **OpenRouter Model Testing**: Runs every 6 hours to check model availability + +Both workflows can be manually triggered from the Actions tab with language selection options. + +## Contributing + +Contributions are welcome! Please feel free to submit pull requests or open issues for bugs and feature requests. \ No newline at end of file diff --git a/test-openrouter-models.mjs b/test-openrouter-models.mjs new file mode 100644 index 0000000..723af37 --- /dev/null +++ b/test-openrouter-models.mjs @@ -0,0 +1,226 @@ +import readline from 'readline/promises'; +import { stdin as input, stdout as output } from 'process'; + +// Dynamically load use-m +const { use } = eval( + await fetch('https://unpkg.com/use-m/use.js').then(u => u.text()) +); + +// Load environment variables from .env +const dotenv = await use('dotenv@16.1.4'); +dotenv.config(); + +// OpenRouter models to test +const tryCompletionsConfig = { + // Frontier Reasoning Models + 'openai/gpt-5-pro': [], + 'openai/gpt-5.1': [], + 'anthropic/claude-opus-4.5': [], + 'anthropic/claude-sonnet-4.5': [], + 'anthropic/claude-haiku-4.5': [], + + // Specialized Coding Models + 'openai/gpt-5.1-codex': [], + 'kwaipilot/kat-coder-pro:free': [], + + // Advanced Reasoning Models + 'deepseek/deepseek-v3.2': [], + 'google/gemini-3-pro-preview': [], + 'google/gemini-2.5-flash-preview-09-2025': [], + 'moonshotai/kimi-k2-thinking': [], + + // Multimodal Vision Models + 'z-ai/glm-4.6v': [], + 'qwen/qwen3-vl-235b-a22b-instruct': [], + 'nvidia/nemotron-nano-12b-v2-vl': [], + + // Efficient/Open Models + 'mistralai/mistral-large-2512': [], + 'mistralai/ministral-14b-2512': [], + 'amazon/nova-2-lite-v1:free': [], + 'allenai/olmo-3-32b-think:free': [], + + // Specialized/Research Models + 'perplexity/sonar-pro-search': [], + 'prime-intellect/intellect-3': [], + 'minimax/minimax-m2': [], + 'x-ai/grok-4.1-fast': [], + + // Legacy/Popular Models + 'openai/gpt-4o': [], + 'openai/gpt-4o-mini': [], + 'anthropic/claude-3.5-sonnet': [], + 'anthropic/claude-3-opus': [], + 'google/gemini-pro': [], + 'meta-llama/llama-3.3-70b-instruct': [], + 'deepseek/deepseek-chat': [], + 'qwen/qwen-2.5-72b-instruct': [], +}; + +const models = Object.keys(tryCompletionsConfig); + +function formatStatus(status, maxLength = 50) { + if (status.length <= maxLength) { + return status; + } + return status.slice(0, maxLength - 3) + '...'; +} + +// Add a universal markdown table builder function +/** + * Builds a markdown table string. + * @param {string[]} headers - Column headers. + * @param {Array} rows - Rows of table data. + * @param {('left'|'center'|'right')[]} [alignments] - Optional alignment per column. + * @returns {string} Markdown-formatted table. + */ +function buildMarkdownTable(headers, rows, alignments = []) { + // Determine maximum content widths per column + const cols = headers.length; + const maxLen = Array(cols).fill(0); + // Measure header widths + headers.forEach((h, i) => { + maxLen[i] = Math.max(maxLen[i], String(h).length); + }); + // Measure rows widths + rows.forEach(row => { + row.forEach((cell, i) => { + const text = cell != null ? String(cell) : ''; + maxLen[i] = Math.max(maxLen[i], text.length); + }); + }); + // Build padded header cells + const headerCells = headers.map((h, i) => { + const text = String(h); + return text + ' '.repeat(maxLen[i] - text.length); + }); + // Build separator cells with alignment + const sepCells = headers.map((_, i) => { + const align = alignments[i]; + const length = maxLen[i]; + if (align === 'center') { + const hyphens = '-'.repeat(Math.max(length - 2, 1)); + return `:${hyphens}:`; + } + if (align === 'right') { + const hyphens = '-'.repeat(Math.max(length - 1, 1)); + return `${hyphens}:`; + } + // default left + return '-'.repeat(length); + }); + // Build padded data rows + const rowLines = rows.map(row => { + const cells = row.map((cell, i) => { + const text = cell != null ? String(cell) : ''; + const diff = maxLen[i] - text.length; + const align = alignments[i]; + if (align === 'center') { + const left = Math.floor(diff / 2); + const right = diff - left; + return ' '.repeat(left) + text + ' '.repeat(right); + } + if (align === 'right') { + return ' '.repeat(diff) + text; + } + // default left + return text + ' '.repeat(diff); + }); + return `| ${cells.join(' | ')} |`; + }); + // Combine all parts + const headerRow = `| ${headerCells.join(' | ')} |`; + const sepRow = `| ${sepCells.join(' | ')} |`; + return [headerRow, sepRow, ...rowLines].join('\n'); +} + +async function testModel(model, apiKey) { + const baseUrl = process.env.OPENROUTER_API_BASE || 'https://openrouter.ai/api/v1'; + const url = `${baseUrl}/chat/completions`; + const headers = { + 'Authorization': `Bearer ${apiKey}`, + 'Content-Type': 'application/json', + 'HTTP-Referer': 'https://github.com/link-assistant/api-tester', + 'X-Title': 'API Tester - OpenRouter Model Checker', + }; + const payload = { + messages: [{ role: 'user', content: 'hi' }], + model, + max_tokens: 20, + }; + + const startTime = Date.now(); + const result = { model, status: '', responseTime: 0, isCorrect: false, actualModel: '' }; + + try { + const response = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify(payload), + }); + const responseTime = (Date.now() - startTime) / 1000; + result.responseTime = Number(responseTime.toFixed(2)); + + if (response.ok) { + const data = await response.json(); + const responseModel = data.model || ''; + result.actualModel = responseModel; + + // OpenRouter may return the exact model ID or a variant + // Check if the response model matches the requested model + result.isCorrect = responseModel === model || responseModel.includes(model.split('/')[1]); + + result.status = result.isCorrect ? 'Success' : `Wrong model: ${responseModel}`; + } else { + result.status = `HTTP ${response.status}`; + try { + const errorData = await response.json(); + if (errorData.error && errorData.error.message) { + result.status += `: ${errorData.error.message}`; + } + console.error(`Error response for model ${model}:`, errorData); + } catch (e) { + // ignore JSON parse errors + } + } + } catch (e) { + result.status = `Error: ${e.message}`; + } + + return result; +} + +let apiKey = process.env.OPENROUTER_API_KEY; +if (!apiKey) { + const rl = readline.createInterface({ input, output }); + apiKey = await rl.question('Enter OpenRouter API key: '); + rl.close(); +} +console.log('\nStarting OpenRouter model tests...'); + +const results = await Promise.all(models.map(model => testModel(model, apiKey))); +// Replace manual table printing with markdown table builder +const totalTime = results.reduce((sum, r) => sum + r.responseTime, 0); + +console.log('\nTest Results:'); +const headers = ['Model', 'Actual Model', 'Status', 'Time', 'Working']; +const rows = results.map(r => [ + r.model, + r.actualModel || 'N/A', + formatStatus(r.status), + r.responseTime.toFixed(2), + r.isCorrect ? '🟩' : '🟥' +]); +console.log(buildMarkdownTable(headers, rows, ['left', 'left', 'left', 'right', 'center'])); + +console.log(`\nTotal tested: ${models.length} models`); +console.log(`Successful responses: ${results.filter(r => r.isCorrect).length}`); +console.log(`Total testing time: ${totalTime.toFixed(2)} sec`); + +const workingModels = results.filter(r => r.isCorrect).sort((a, b) => a.responseTime - b.responseTime); +if (workingModels.length > 0) { + console.log('\nWorking models (sorted by speed):'); + for (const { model, responseTime } of workingModels) { + console.log(`- ${model}: ${responseTime.toFixed(2)} sec`); + } +} diff --git a/test-openrouter-models.py b/test-openrouter-models.py new file mode 100644 index 0000000..b5662db --- /dev/null +++ b/test-openrouter-models.py @@ -0,0 +1,225 @@ +import aiohttp +import asyncio +import time +import os +import textwrap +from typing import Dict, List, Tuple + +# OpenRouter models to test +try_completions_config = { + # Frontier Reasoning Models + "openai/gpt-5-pro": [], + "openai/gpt-5.1": [], + "anthropic/claude-opus-4.5": [], + "anthropic/claude-sonnet-4.5": [], + "anthropic/claude-haiku-4.5": [], + + # Specialized Coding Models + "openai/gpt-5.1-codex": [], + "kwaipilot/kat-coder-pro:free": [], + + # Advanced Reasoning Models + "deepseek/deepseek-v3.2": [], + "google/gemini-3-pro-preview": [], + "google/gemini-2.5-flash-preview-09-2025": [], + "moonshotai/kimi-k2-thinking": [], + + # Multimodal Vision Models + "z-ai/glm-4.6v": [], + "qwen/qwen3-vl-235b-a22b-instruct": [], + "nvidia/nemotron-nano-12b-v2-vl": [], + + # Efficient/Open Models + "mistralai/mistral-large-2512": [], + "mistralai/ministral-14b-2512": [], + "amazon/nova-2-lite-v1:free": [], + "allenai/olmo-3-32b-think:free": [], + + # Specialized/Research Models + "perplexity/sonar-pro-search": [], + "prime-intellect/intellect-3": [], + "minimax/minimax-m2": [], + "x-ai/grok-4.1-fast": [], + + # Legacy/Popular Models + "openai/gpt-4o": [], + "openai/gpt-4o-mini": [], + "anthropic/claude-3.5-sonnet": [], + "anthropic/claude-3-opus": [], + "google/gemini-pro": [], + "meta-llama/llama-3.3-70b-instruct": [], + "deepseek/deepseek-chat": [], + "qwen/qwen-2.5-72b-instruct": [], +} + +models = list(try_completions_config.keys()) + + +def format_status(status: str, max_length: int = 40) -> str: + """Format status for table display""" + if len(status) <= max_length: + return status + return textwrap.shorten(status, width=max_length, placeholder="...") + + +def build_markdown_table( + headers: List[str], rows: List[List[str]], alignments: List[str] = None +) -> str: + """Build a markdown formatted table""" + if alignments is None: + alignments = ["left"] * len(headers) + + # Determine maximum content widths per column + max_len = [len(str(h)) for h in headers] + for row in rows: + for i, cell in enumerate(row): + max_len[i] = max(max_len[i], len(str(cell))) + + # Build header row + header_cells = [str(h).ljust(max_len[i]) for i, h in enumerate(headers)] + header_row = "| " + " | ".join(header_cells) + " |" + + # Build separator row + sep_cells = [] + for i, align in enumerate(alignments): + if align == "center": + sep_cells.append(":" + "-" * (max_len[i] - 2) + ":") + elif align == "right": + sep_cells.append("-" * (max_len[i] - 1) + ":") + else: # left + sep_cells.append("-" * max_len[i]) + sep_row = "| " + " | ".join(sep_cells) + " |" + + # Build data rows + data_rows = [] + for row in rows: + cells = [] + for i, cell in enumerate(row): + text = str(cell) + if alignments[i] == "right": + cells.append(text.rjust(max_len[i])) + elif alignments[i] == "center": + cells.append(text.center(max_len[i])) + else: + cells.append(text.ljust(max_len[i])) + data_rows.append("| " + " | ".join(cells) + " |") + + return "\n".join([header_row, sep_row] + data_rows) + + +async def test_model(session: aiohttp.ClientSession, model: str, api_key: str) -> Dict: + """Test a single OpenRouter model""" + base_url = os.getenv("OPENROUTER_API_BASE", "https://openrouter.ai/api/v1") + url = f"{base_url}/chat/completions" + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + "HTTP-Referer": "https://github.com/link-assistant/api-tester", + "X-Title": "API Tester - OpenRouter Model Checker", + } + + payload = { + "messages": [{"role": "user", "content": "hi"}], + "model": model, + "max_tokens": 20, + } + + start_time = time.time() + result = { + "model": model, + "status": "", + "response_time": 0.0, + "is_correct": False, + "actual_model": "", + } + + try: + async with session.post(url, json=payload, headers=headers, timeout=None) as response: + response_time = time.time() - start_time + result["response_time"] = round(response_time, 2) + + if response.status == 200: + data = await response.json() + response_model = data.get("model", "") + result["actual_model"] = response_model + + # OpenRouter may return the exact model ID or a variant + # Check if the response model matches the requested model + model_base = model.split("/")[1] if "/" in model else model + result["is_correct"] = ( + response_model == model or model_base in response_model + ) + + result["status"] = ( + "Success" + if result["is_correct"] + else f"Wrong model: {response_model}" + ) + + else: + result["status"] = f"HTTP {response.status}" + try: + error_data = await response.json() + if "error" in error_data and "message" in error_data["error"]: + result["status"] += f": {error_data['error']['message']}" + except: + pass + + except Exception as e: + result["status"] = f"Error: {str(e)}" + + return result + + +async def main(): + """Main function to test all OpenRouter models""" + api_key = os.getenv("OPENROUTER_API_KEY") + if not api_key: + api_key = input("Enter OpenRouter API key: ") + + print("\nStarting OpenRouter model tests...") + + async with aiohttp.ClientSession() as session: + tasks = [test_model(session, model, api_key) for model in models] + results = await asyncio.gather(*tasks) + + total_time = sum(r["response_time"] for r in results) + successful = sum(1 for r in results if r["is_correct"]) + + # Build results table + print("\nTest Results:") + headers = ["Model", "Actual Model", "Status", "Time", "Working"] + rows = [ + [ + r["model"], + r["actual_model"] or "N/A", + format_status(r["status"]), + f"{r['response_time']:.2f}", + "🟩" if r["is_correct"] else "🟥", + ] + for r in results + ] + print( + build_markdown_table( + headers, rows, ["left", "left", "left", "right", "center"] + ) + ) + + print(f"\nTotal tested: {len(models)} models") + print(f"Successful responses: {successful}") + print(f"Total testing time: {total_time:.2f} sec") + + # Sort working models by speed + working_models = sorted( + [(r["model"], r["response_time"]) for r in results if r["is_correct"]], + key=lambda x: x[1], + ) + if working_models: + print("\nWorking models (sorted by speed):") + for model, resp_time in working_models: + print(f"- {model}: {resp_time:.2f} sec") + + +if __name__ == "__main__": + asyncio.run(main())