Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
144 changes: 144 additions & 0 deletions .github/workflows/test-openrouter.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
name: Test OpenRouter Models

on:
schedule:
# Run every 6 hours
- cron: '0 */6 * * *'
workflow_dispatch:
inputs:
test_language:
description: 'Language to test (javascript, python, or both)'
required: false
default: 'both'
type: choice
options:
- both
- javascript
- python
push:
branches:
- main
paths:
- 'test-openrouter-models.mjs'
- 'test-openrouter-models.py'
- '.github/workflows/test-openrouter.yml'

jobs:
test-javascript:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'javascript'
runs-on: ubuntu-latest
name: Test OpenRouter Models (JavaScript)

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'

- name: Run OpenRouter model tests
env:
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
run: |
echo "Testing OpenRouter models with JavaScript..."
node test-openrouter-models.mjs > openrouter-test-results-js.log 2>&1 || true
cat openrouter-test-results-js.log

- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: openrouter-test-results-javascript
path: openrouter-test-results-js.log
retention-days: 30

- name: Check for failures
run: |
if grep -q "Successful responses: 0" openrouter-test-results-js.log; then
echo "::warning::No successful model responses detected"
exit 1
fi

test-python:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'python'
runs-on: ubuntu-latest
name: Test OpenRouter Models (Python)

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'

- name: Install dependencies
run: |
pip install aiohttp

- name: Run OpenRouter model tests
env:
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
run: |
echo "Testing OpenRouter models with Python..."
python test-openrouter-models.py > openrouter-test-results-py.log 2>&1 || true
cat openrouter-test-results-py.log

- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: openrouter-test-results-python
path: openrouter-test-results-py.log
retention-days: 30

- name: Check for failures
run: |
if grep -q "Successful responses: 0" openrouter-test-results-py.log; then
echo "::warning::No successful model responses detected"
exit 1
fi

summary:
needs: [test-javascript, test-python]
if: always()
runs-on: ubuntu-latest
name: Test Summary

steps:
- name: Download JavaScript results
uses: actions/download-artifact@v4
continue-on-error: true
with:
name: openrouter-test-results-javascript
path: ./results/

- name: Download Python results
uses: actions/download-artifact@v4
continue-on-error: true
with:
name: openrouter-test-results-python
path: ./results/

- name: Create summary
run: |
echo "# OpenRouter Model Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

if [ -f "./results/openrouter-test-results-js.log" ]; then
echo "## JavaScript Results" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
tail -20 ./results/openrouter-test-results-js.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi

if [ -f "./results/openrouter-test-results-py.log" ]; then
echo "## Python Results" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
tail -20 ./results/openrouter-test-results-py.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
146 changes: 146 additions & 0 deletions .github/workflows/test-piapi.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
name: Test PiAPI Models

on:
schedule:
# Run every 6 hours
- cron: '0 */6 * * *'
workflow_dispatch:
inputs:
test_language:
description: 'Language to test (javascript, python, or both)'
required: false
default: 'both'
type: choice
options:
- both
- javascript
- python
push:
branches:
- main
paths:
- 'test-models.mjs'
- 'model.py'
- '.github/workflows/test-piapi.yml'

jobs:
test-javascript:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'javascript'
runs-on: ubuntu-latest
name: Test PiAPI Models (JavaScript)

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'

- name: Run PiAPI model tests
env:
OPENAI_API_KEY: ${{ secrets.PIAPI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.PIAPI_API_BASE || 'https://api.deep.assistant.run.place/v1' }}
run: |
echo "Testing PiAPI models with JavaScript..."
node test-models.mjs > piapi-test-results-js.log 2>&1 || true
cat piapi-test-results-js.log

- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: piapi-test-results-javascript
path: piapi-test-results-js.log
retention-days: 30

- name: Check for failures
run: |
if grep -q "Successful responses: 0" piapi-test-results-js.log; then
echo "::warning::No successful model responses detected"
exit 1
fi

test-python:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.test_language == 'both' || github.event.inputs.test_language == 'python'
runs-on: ubuntu-latest
name: Test PiAPI Models (Python)

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.11'

- name: Install dependencies
run: |
pip install aiohttp

- name: Run PiAPI model tests
env:
OPENAI_API_KEY: ${{ secrets.PIAPI_API_KEY }}
OPENAI_API_BASE: ${{ secrets.PIAPI_API_BASE || 'https://api.deep.assistant.run.place/v1' }}
run: |
echo "Testing PiAPI models with Python..."
python model.py > piapi-test-results-py.log 2>&1 || true
cat piapi-test-results-py.log

- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: piapi-test-results-python
path: piapi-test-results-py.log
retention-days: 30

- name: Check for failures
run: |
if grep -q "Successful responses: 0" piapi-test-results-py.log; then
echo "::warning::No successful model responses detected"
exit 1
fi

summary:
needs: [test-javascript, test-python]
if: always()
runs-on: ubuntu-latest
name: Test Summary

steps:
- name: Download JavaScript results
uses: actions/download-artifact@v4
continue-on-error: true
with:
name: piapi-test-results-javascript
path: ./results/

- name: Download Python results
uses: actions/download-artifact@v4
continue-on-error: true
with:
name: piapi-test-results-python
path: ./results/

- name: Create summary
run: |
echo "# PiAPI Model Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

if [ -f "./results/piapi-test-results-js.log" ]; then
echo "## JavaScript Results" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
tail -20 ./results/piapi-test-results-js.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi

if [ -f "./results/piapi-test-results-py.log" ]; then
echo "## Python Results" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
tail -20 ./results/piapi-test-results-py.log >> $GITHUB_STEP_SUMMARY || echo "No results available" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
.env
node_modules/
node_modules/
__pycache__/
*.pyc
*.pyo
*.pyd
Loading