From d728c63bdb21ec56c7fa4fdcfc56006ccb4b4c0b Mon Sep 17 00:00:00 2001 From: Cyril Chan Date: Thu, 20 Nov 2025 23:07:38 +0800 Subject: [PATCH 1/5] feat: add Docker container build configuration - Add docker-compose.yml for orchestrating frontend and backend services - Add Dockerfile.backend for building backend container - Add Dockerfile.frontend for building frontend container - Add .dockerignore to exclude unnecessary files - Add docker documentation and scripts - Add database migration script --- .dockerignore | 72 ++++++ docker-compose.yml | 46 ++++ docker/Dockerfile.backend | 63 +++++ docker/Dockerfile.frontend | 40 ++++ docker/README.md | 220 ++++++++++++++++++ docker/daemon.json.example | 11 + docker/start-docker.sh | 57 +++++ ...01\347\247\273\350\257\264\346\230\216.md" | 73 ++++++ ...17\345\212\240\351\200\237\345\231\250.md" | 82 +++++++ ...43\345\206\263\346\226\271\346\241\210.md" | 172 ++++++++++++++ python/scripts/migrate_db.py | 104 +++++++++ 11 files changed, 940 insertions(+) create mode 100644 .dockerignore create mode 100644 docker-compose.yml create mode 100644 docker/Dockerfile.backend create mode 100644 docker/Dockerfile.frontend create mode 100644 docker/README.md create mode 100644 docker/daemon.json.example create mode 100644 docker/start-docker.sh create mode 100644 "docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" create mode 100644 "docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" create mode 100644 "docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" create mode 100644 python/scripts/migrate_db.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..6055abe40 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,72 @@ +# Git +.git +.gitignore +.gitattributes + +# Documentation +*.md +docs/ +README* + +# IDE +.vscode +.idea +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +logs/ +*.log + +# Node modules (will be installed in container) +node_modules/ +frontend/node_modules/ + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +python/.venv/ +*.egg-info/ +dist/ +build/ + +# Environment +.env +.env.local +.env.*.local + +# Database +*.db +*.db-journal +valuecell.db + +# LanceDB +lancedb/ + +# Build artifacts +frontend/build/ +frontend/dist/ +*.tsbuildinfo + +# Tauri +frontend/src-tauri/target/ + +# Docker +docker-compose*.yml +Dockerfile* +.dockerignore + +# Other +*.code-workspace +Makefile +start.sh +start.ps1 + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..8dba0d3ce --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,46 @@ +services: + backend: + build: + context: . + dockerfile: docker/Dockerfile.backend + container_name: valuecell-backend + ports: + - "8000:8000" + volumes: + - ./python:/app/python + - ./logs:/app/logs + - ./valuecell.db:/app/valuecell.db + - ./lancedb:/app/lancedb + environment: + - API_HOST=0.0.0.0 + - API_PORT=8000 + - CORS_ORIGINS=http://localhost:1420,http://localhost:3000 + env_file: + - .env + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/api/v1/healthz"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + frontend: + build: + context: . + dockerfile: docker/Dockerfile.frontend + container_name: valuecell-frontend + ports: + - "1420:1420" + volumes: + - ./frontend:/app/frontend + - /app/frontend/node_modules + environment: + - NODE_ENV=development + - VITE_API_BASE_URL=http://localhost:8000/api/v1 + - TAURI_DEV_HOST=0.0.0.0 + depends_on: + - backend + restart: unless-stopped + + diff --git a/docker/Dockerfile.backend b/docker/Dockerfile.backend new file mode 100644 index 000000000..a346c25fb --- /dev/null +++ b/docker/Dockerfile.backend @@ -0,0 +1,63 @@ +FROM docker.1ms.run/astral/uv:python3.12-bookworm-slim + +# Configure apt to use Aliyun mirror (for Debian) +RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources || \ + sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list || true + +# Install system dependencies including OpenSSL and CA certificates +RUN apt-get update && apt-get install -y \ + curl \ + openssl \ + ca-certificates \ + libssl3 \ + && rm -rf /var/lib/apt/lists/* \ + && update-ca-certificates + +# Install uv +COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv + +WORKDIR /app + +# Copy Python project files +COPY python/pyproject.toml python/uv.lock ./python/ +COPY python/README.md ./python/README.md +COPY python/scripts ./python/scripts +COPY python/valuecell ./python/valuecell +COPY python/configs ./python/configs + +# Configure uv to use PyPI mirror (Tsinghua) +# Create pip config for uv to use mirror +RUN mkdir -p /root/.pip && \ + echo "[global]" > /root/.pip/pip.conf && \ + echo "index-url = https://pypi.tuna.tsinghua.edu.cn/simple" >> /root/.pip/pip.conf && \ + echo "[install]" >> /root/.pip/pip.conf && \ + echo "trusted-host = pypi.tuna.tsinghua.edu.cn" >> /root/.pip/pip.conf + +# Also set environment variable for uv +ENV UV_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple +ENV PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple + +# Pre-cache the application dependencies +# Try with --locked first, fallback to without if lockfile is outdated +RUN --mount=type=cache,target=/root/.cache/uv \ + cd python && (uv sync --locked --no-install-project || uv sync --no-install-project) + +# Install the application dependencies +RUN --mount=type=cache,target=/root/.cache/uv \ + cd python && (uv sync --locked || uv sync) + +# Create logs directory +RUN mkdir -p /app/logs + +# Note: Database initialization will happen at runtime if needed + +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD curl -f http://localhost:8000/api/v1/healthz || exit 1 + +# Run the backend server +WORKDIR /app/python +CMD ["uv", "run", "-m", "valuecell.server.main"] + diff --git a/docker/Dockerfile.frontend b/docker/Dockerfile.frontend new file mode 100644 index 000000000..328863c0f --- /dev/null +++ b/docker/Dockerfile.frontend @@ -0,0 +1,40 @@ +# Use bun image from domestic mirror (docker.1ms.run) +FROM docker.1ms.run/oven/bun:1.3.0-slim + +# Configure apt to use Aliyun mirror (for Debian) +RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources || \ + sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list || true + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + curl \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +# Copy frontend files +COPY frontend/package.json frontend/bun.lock ./frontend/ + +# Configure bun to use npm registry mirror (Taobao) +RUN echo "registry=https://registry.npmmirror.com" > /root/.npmrc && \ + echo "_authToken=" >> /root/.npmrc + +# Set environment variable for bun registry +ENV BUN_CONFIG_REGISTRY=https://registry.npmmirror.com + +# Install dependencies +RUN cd frontend && bun install --frozen-lockfile + +# Copy frontend source code +COPY frontend ./frontend + +EXPOSE 1420 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=20s --retries=3 \ + CMD curl -f http://localhost:1420 || exit 1 + +# Run the frontend dev server +WORKDIR /app/frontend +CMD ["bun", "run", "dev"] + diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..4586ab4b4 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,220 @@ +# Docker 部署指南 + +本项目支持使用 Docker 容器运行前端和后端服务。 + +## 快速开始 + +### 1. 配置 Docker 镜像加速器(推荐) + +为了加快 Docker 镜像拉取速度,建议配置国内镜像源。 + +#### Windows/Mac (Docker Desktop) + +1. 打开 Docker Desktop +2. 进入 Settings → Docker Engine +3. 添加以下配置: + +```json +{ + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "https://hub-mirror.c.163.com", + "https://mirror.baidubce.com" + ] +} +``` + +4. 点击 "Apply & Restart" + +#### Linux + +创建或编辑 `/etc/docker/daemon.json`: + +```bash +sudo mkdir -p /etc/docker +sudo tee /etc/docker/daemon.json <<-'EOF' +{ + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "https://hub-mirror.c.163.com", + "https://mirror.baidubce.com" + ] +} +EOF +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +### 2. 准备环境文件 + +在项目根目录创建 `.env` 文件(如果不存在),包含必要的环境变量: + +```bash +# API 配置 +API_HOST=0.0.0.0 +API_PORT=8000 + +# 数据库配置(可选) +VALUECELL_SQLITE_DB=sqlite:///valuecell.db + +# CORS 配置 +CORS_ORIGINS=http://localhost:1420,http://localhost:3000 +``` + +### 3. 构建并启动服务 + +```bash +# 构建并启动所有服务 +docker-compose up -d + +# 查看日志 +docker-compose logs -f + +# 只启动后端 +docker-compose up -d backend + +# 只启动前端 +docker-compose up -d frontend +``` + +### 4. 访问服务 + +- **前端**: http://localhost:1420 +- **后端 API**: http://localhost:8000 +- **API 文档**: http://localhost:8000/docs + +### 5. 停止服务 + +```bash +# 停止所有服务 +docker-compose down + +# 停止并删除卷 +docker-compose down -v +``` + +## 服务说明 + +### Backend 服务 + +- **端口**: 8000 +- **镜像**: 基于 `ghcr.io/astral-sh/uv:python3.12-bookworm-slim` +- **工作目录**: `/app/python` +- **启动命令**: `uv run -m valuecell.server.main` +- **PyPI 镜像**: 已配置使用清华大学镜像源 + +### Frontend 服务 + +- **端口**: 1420 +- **镜像**: 基于 `oven/bun:1.3.0-slim` +- **工作目录**: `/app/frontend` +- **启动命令**: `bun run dev` +- **NPM 镜像**: 已配置使用淘宝镜像源 + +## 国内镜像源配置 + +Dockerfile 已自动配置以下国内镜像源: + +- **APT (Debian)**: 阿里云镜像 +- **PyPI (Python)**: 清华大学镜像 +- **NPM (Node.js)**: 淘宝镜像 + +## 数据持久化 + +以下目录/文件会被挂载到容器中,数据会持久化: + +- `./python` → `/app/python` (后端代码) +- `./logs` → `/app/logs` (日志文件) +- `./valuecell.db` → `/app/valuecell.db` (数据库) +- `./lancedb` → `/app/lancedb` (LanceDB 数据) +- `./frontend` → `/app/frontend` (前端代码) + +## 开发模式 + +在开发模式下,代码更改会自动反映到容器中(通过卷挂载): + +```bash +# 启动开发环境 +docker-compose up + +# 在另一个终端中查看日志 +docker-compose logs -f frontend +docker-compose logs -f backend +``` + +## 生产部署 + +对于生产环境,建议: + +1. 修改 `docker-compose.yml` 中的端口映射 +2. 使用环境变量文件管理配置 +3. 配置反向代理(如 Nginx) +4. 使用 Docker secrets 管理敏感信息 +5. 考虑使用多阶段构建优化镜像大小 + +## 故障排查 + +### 查看容器状态 + +```bash +docker-compose ps +``` + +### 查看容器日志 + +```bash +# 所有服务 +docker-compose logs + +# 特定服务 +docker-compose logs backend +docker-compose logs frontend + +# 实时日志 +docker-compose logs -f +``` + +### 进入容器调试 + +```bash +# 进入后端容器 +docker-compose exec backend bash + +# 进入前端容器 +docker-compose exec frontend sh +``` + +### 重建镜像 + +```bash +# 强制重建 +docker-compose build --no-cache + +# 重建并启动 +docker-compose up -d --build +``` + +### 网络问题 + +如果遇到网络连接问题: + +1. 检查 Docker 镜像加速器配置是否正确 +2. 尝试使用代理: + ```bash + export HTTP_PROXY=http://your-proxy:port + export HTTPS_PROXY=http://your-proxy:port + docker-compose build + ``` + +## 环境变量 + +可以通过 `.env` 文件或 `docker-compose.yml` 中的 `environment` 部分配置环境变量。 + +常用环境变量: + +- `API_HOST`: 后端 API 主机地址(默认: 0.0.0.0) +- `API_PORT`: 后端 API 端口(默认: 8000) +- `CORS_ORIGINS`: CORS 允许的源(逗号分隔) +- `VALUECELL_SQLITE_DB`: SQLite 数据库路径 +- `UV_INDEX_URL`: PyPI 镜像地址(已配置为清华源) +- `BUN_CONFIG_REGISTRY`: NPM 镜像地址(已配置为淘宝源) diff --git a/docker/daemon.json.example b/docker/daemon.json.example new file mode 100644 index 000000000..51e08c333 --- /dev/null +++ b/docker/daemon.json.example @@ -0,0 +1,11 @@ +{ + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "https://hub-mirror.c.163.com", + "https://mirror.baidubce.com" + ], + "insecure-registries": [], + "debug": false, + "experimental": false +} + diff --git a/docker/start-docker.sh b/docker/start-docker.sh new file mode 100644 index 000000000..476f14931 --- /dev/null +++ b/docker/start-docker.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Docker 快速启动脚本 + +set -e + +echo "==========================================" +echo "ValueCell Docker 启动脚本" +echo "==========================================" + +# 检查 Docker 是否安装 +if ! command -v docker &> /dev/null; then + echo "错误: 未找到 Docker。请先安装 Docker。" + exit 1 +fi + +# 检查 Docker Compose 是否安装 +if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then + echo "错误: 未找到 Docker Compose。请先安装 Docker Compose。" + exit 1 +fi + +# 检查 .env 文件 +if [ ! -f .env ]; then + echo "警告: 未找到 .env 文件。将使用默认配置。" + echo "建议创建 .env 文件并配置必要的环境变量。" +fi + +# 构建并启动服务 +echo "" +echo "构建 Docker 镜像..." +docker-compose build + +echo "" +echo "启动服务..." +docker-compose up -d + +echo "" +echo "等待服务启动..." +sleep 5 + +# 检查服务状态 +echo "" +echo "服务状态:" +docker-compose ps + +echo "" +echo "==========================================" +echo "服务已启动!" +echo "前端: http://localhost:1420" +echo "后端 API: http://localhost:8000" +echo "API 文档: http://localhost:8000/docs" +echo "==========================================" +echo "" +echo "查看日志: docker-compose logs -f" +echo "停止服务: docker-compose down" +echo "" + diff --git "a/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" "b/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" new file mode 100644 index 000000000..a0b087eea --- /dev/null +++ "b/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" @@ -0,0 +1,73 @@ +# 数据库迁移说明 + +## 问题描述 + +在运行策略相关功能时,遇到以下错误: + +``` +sqlite3.OperationalError: no such column: strategy_portfolio_views.total_realized_pnl +``` + +## 原因分析 + +数据库表 `strategy_portfolio_views` 缺少以下列: +- `total_realized_pnl` - 已实现盈亏 +- `gross_exposure` - 总敞口 +- `net_exposure` - 净敞口 + +这些列在代码模型中已定义,但现有数据库表结构未包含这些列。 + +## 解决方案 + +已创建数据库迁移脚本 `python/scripts/migrate_db.py`,用于自动添加缺失的列。 + +### 执行迁移 + +在容器中执行: + +```bash +docker compose exec backend uv run python scripts/migrate_db.py +``` + +或在本地执行: + +```bash +cd python +uv run python scripts/migrate_db.py +``` + +### 迁移脚本功能 + +1. 检查列是否存在(避免重复添加) +2. 安全地添加缺失的列 +3. 保留现有数据 +4. 提供详细的日志输出 + +## 已添加的列 + +- `total_realized_pnl` (NUMERIC(20, 8), nullable) +- `gross_exposure` (NUMERIC(20, 8), nullable) +- `net_exposure` (NUMERIC(20, 8), nullable) + +## 验证 + +迁移完成后,重启后端容器: + +```bash +docker compose restart backend +``` + +然后测试相关 API 端点: +- `/api/v1/strategies/portfolio_summary` +- `/api/v1/strategies/holding_price_curve` + +## 注意事项 + +- 迁移脚本是幂等的,可以安全地多次运行 +- 迁移不会删除或修改现有数据 +- 建议在迁移前备份数据库文件(`valuecell.db`) + +## 未来改进 + +建议集成数据库迁移工具(如 Alembic)来管理更复杂的数据库架构变更。 + diff --git "a/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" "b/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" new file mode 100644 index 000000000..b333f7e33 --- /dev/null +++ "b/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" @@ -0,0 +1,82 @@ +# 配置 Docker 镜像加速器 + +由于网络原因,拉取 Docker Hub 镜像可能较慢或失败。请按照以下步骤配置 Docker 镜像加速器。 + +## Windows (Docker Desktop) + +1. 打开 Docker Desktop +2. 点击右上角的 **设置** (Settings) 图标 +3. 选择 **Docker Engine** +4. 在 JSON 配置中添加以下内容: + +```json +{ + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "https://hub-mirror.c.163.com", + "https://mirror.baidubce.com" + ] +} +``` + +5. 点击 **Apply & Restart** 应用并重启 + +## Linux + +1. 创建或编辑 `/etc/docker/daemon.json`: + +```bash +sudo mkdir -p /etc/docker +sudo tee /etc/docker/daemon.json <<-'EOF' +{ + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "https://hub-mirror.c.163.com", + "https://mirror.baidubce.com" + ] +} +EOF +``` + +2. 重启 Docker 服务: + +```bash +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +## macOS (Docker Desktop) + +1. 打开 Docker Desktop +2. 点击菜单栏的 **Docker** → **Preferences** (或 **Settings**) +3. 选择 **Docker Engine** +4. 在 JSON 配置中添加镜像加速器配置(同 Windows) +5. 点击 **Apply & Restart** + +## 验证配置 + +运行以下命令验证配置是否生效: + +```bash +docker info | grep -A 10 "Registry Mirrors" +``` + +如果看到配置的镜像地址,说明配置成功。 + +## 其他镜像加速器 + +如果上述镜像源不可用,可以尝试: + +- **阿里云镜像加速器**(需要登录阿里云获取专属地址): + - 访问:https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors + - 获取专属加速地址,格式:`https://.mirror.aliyuncs.com` + +- **腾讯云镜像加速器**: + - `https://mirror.ccs.tencentyun.com` + +配置完成后,重新尝试构建: + +```bash +docker compose build frontend +``` + diff --git "a/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" "b/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" new file mode 100644 index 000000000..83b543e75 --- /dev/null +++ "b/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" @@ -0,0 +1,172 @@ +# 后端容器问题分析与解决方案 + +## 问题总结 + +根据后端容器日志分析,发现以下问题: + +### 1. OpenSSL/TLS 连接错误 ⚠️ + +**错误信息**: +``` +curl: (35) TLS connect error: error:00000000:invalid library (0):OPENSSL_internal:invalid library (0) +``` + +**影响**: +- yfinance 无法获取某些股票数据(如 ^IXIC, ^HSI) +- 部分 API 请求失败 + +**解决方案**: +- ✅ 已在 Dockerfile.backend 中添加完整的 OpenSSL 库支持 +- 需要重新构建镜像:`docker compose build backend` + +### 2. 网络连接问题 ⚠️ + +**错误信息**: +``` +aiohttp.client_exceptions.ClientConnectorError: Cannot connect to host fapi.binance.com:443 ssl:default [None] +``` + +**影响**: +- 无法连接到 Binance API 获取市场数据 +- 策略代理无法获取加密货币价格 + +**可能原因**: +- 容器网络配置问题 +- 防火墙或代理设置 +- DNS 解析问题 + +**解决方案**: +1. 检查容器网络配置 +2. 确保容器可以访问外部网络 +3. 如果使用代理,需要在 docker-compose.yml 中配置代理设置 + +### 3. API 密钥配置问题 ⚠️ + +**错误信息**: +``` +Error code: 401 - Api key is invalid +``` + +**影响**: +- SiliconFlow API 调用失败 +- LLM 功能无法使用 + +**解决方案**: +1. 检查 `.env` 文件中的 API 密钥配置 +2. 确保 SiliconFlow API 密钥有效 +3. 或者切换到其他已配置的 LLM 提供商(如 openrouter) + +### 4. 500 内部服务器错误 ⚠️ + +**错误接口**: +- `/api/v1/strategies/portfolio_summary` +- `/api/v1/strategies/holding_price_curve` + +**相关日志**: +``` +WARNING - Failed to persist strategy portfolio snapshot for strategy-32b072e7897a494490d3397d2e8a162f +``` + +**可能原因**: +- 数据库连接问题 +- 数据持久化逻辑错误 +- 策略数据不完整 + +**解决方案**: +- 检查数据库连接 +- 查看详细错误日志 +- 确保策略数据已正确初始化 + +### 5. 警告信息(可忽略)ℹ️ + +**警告**: +``` +warning: The package akracer==0.0.13 does not have an extra named py-mini-racer +``` + +**说明**: +- 这是依赖包的警告,不影响运行 +- 可以忽略 + +## 修复步骤 + +### 步骤 1:重新构建后端镜像 + +```bash +# 停止当前容器 +docker compose down + +# 重新构建后端镜像(包含 OpenSSL 修复) +docker compose build backend + +# 启动服务 +docker compose up -d +``` + +### 步骤 2:检查网络连接 + +```bash +# 进入后端容器 +docker compose exec backend bash + +# 测试网络连接 +curl -I https://fapi.binance.com +curl -I https://api.siliconflow.cn +``` + +### 步骤 3:配置 API 密钥 + +编辑 `.env` 文件,确保包含有效的 API 密钥: + +```bash +# SiliconFlow API 密钥 +SILICONFLOW_API_KEY=your-api-key-here + +# 或者使用其他提供商 +OPENROUTER_API_KEY=your-api-key-here +GOOGLE_API_KEY=your-api-key-here +``` + +### 步骤 4:验证修复 + +```bash +# 查看日志 +docker compose logs -f backend + +# 检查健康状态 +curl http://localhost:8000/api/v1/healthz +``` + +## 网络配置建议 + +如果遇到网络连接问题,可以在 `docker-compose.yml` 中添加网络配置: + +```yaml +services: + backend: + # ... 其他配置 ... + network_mode: bridge + dns: + - 8.8.8.8 + - 8.8.4.4 + # 如果需要代理 + # environment: + # - HTTP_PROXY=http://proxy:port + # - HTTPS_PROXY=http://proxy:port +``` + +## 监控建议 + +定期检查以下指标: +1. 容器健康状态:`docker compose ps` +2. 错误日志:`docker compose logs backend | grep ERROR` +3. API 响应时间:监控 `/api/v1/healthz` 端点 +4. 网络连接:测试外部 API 连接 + +## 联系支持 + +如果问题持续存在,请提供: +1. 完整的容器日志:`docker compose logs backend > backend.log` +2. 网络测试结果 +3. 环境配置信息 + diff --git a/python/scripts/migrate_db.py b/python/scripts/migrate_db.py new file mode 100644 index 000000000..4f21065af --- /dev/null +++ b/python/scripts/migrate_db.py @@ -0,0 +1,104 @@ +""" +Database migration script to add missing columns to existing tables. + +This script handles schema updates for existing databases without losing data. +""" + +import logging +import sys +from pathlib import Path + +from sqlalchemy import inspect, text + +from valuecell.server.config.settings import get_settings +from valuecell.server.db.connection import get_database_manager + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +def column_exists(engine, table_name: str, column_name: str) -> bool: + """Check if a column exists in a table.""" + inspector = inspect(engine) + columns = [col["name"] for col in inspector.get_columns(table_name)] + return column_name in columns + + +def add_column_if_not_exists(engine, table_name: str, column_name: str, column_type: str, nullable: bool = True): + """Add a column to a table if it doesn't exist.""" + if column_exists(engine, table_name, column_name): + logger.info(f"Column {table_name}.{column_name} already exists, skipping") + return True + + try: + nullable_clause = "" if nullable else " NOT NULL" + with engine.connect() as conn: + conn.execute( + text(f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_type}{nullable_clause}") + ) + conn.commit() + logger.info(f"Added column {table_name}.{column_name}") + return True + except Exception as e: + logger.error(f"Failed to add column {table_name}.{column_name}: {e}") + return False + + +def migrate_strategy_portfolio_views(): + """Migrate strategy_portfolio_views table to add missing columns.""" + db_manager = get_database_manager() + engine = db_manager.get_engine() + + # Check if table exists + inspector = inspect(engine) + if "strategy_portfolio_views" not in inspector.get_table_names(): + logger.warning("Table strategy_portfolio_views does not exist, skipping migration") + return True + + logger.info("Migrating strategy_portfolio_views table...") + + # Add missing columns + migrations = [ + ("total_realized_pnl", "NUMERIC(20, 8)", True), + ("gross_exposure", "NUMERIC(20, 8)", True), + ("net_exposure", "NUMERIC(20, 8)", True), + ] + + success = True + for column_name, column_type, nullable in migrations: + if not add_column_if_not_exists(engine, "strategy_portfolio_views", column_name, column_type, nullable): + success = False + + if success: + logger.info("Migration completed successfully") + else: + logger.error("Migration completed with errors") + + return success + + +def main(): + """Run database migrations.""" + logger.info("Starting database migration...") + logger.info("=" * 50) + + try: + # Migrate strategy_portfolio_views + if not migrate_strategy_portfolio_views(): + logger.error("Migration failed") + sys.exit(1) + + logger.info("=" * 50) + logger.info("All migrations completed successfully!") + sys.exit(0) + + except Exception as e: + logger.exception(f"Migration failed with error: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() + From 3a596b36e89a29dade5d6fbe4fed36e2222af13c Mon Sep 17 00:00:00 2001 From: Cyril Chan Date: Thu, 20 Nov 2025 23:53:54 +0800 Subject: [PATCH 2/5] Add migration for strategy_details missing columns (compose_id, instruction_id, avg_exec_price, realized_pnl, etc.) --- python/scripts/migrate_db.py | 82 ++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/python/scripts/migrate_db.py b/python/scripts/migrate_db.py index 4f21065af..e58c7a544 100644 --- a/python/scripts/migrate_db.py +++ b/python/scripts/migrate_db.py @@ -46,6 +46,35 @@ def add_column_if_not_exists(engine, table_name: str, column_name: str, column_t return False +def index_exists(engine, table_name: str, index_name: str) -> bool: + """Check if an index exists on a table.""" + inspector = inspect(engine) + indexes = [idx["name"] for idx in inspector.get_indexes(table_name)] + return index_name in indexes + + +def create_index_if_not_exists(engine, table_name: str, column_name: str, index_name: str = None): + """Create an index on a column if it doesn't exist.""" + if index_name is None: + index_name = f"ix_{table_name}_{column_name}" + + if index_exists(engine, table_name, index_name): + logger.info(f"Index {index_name} already exists, skipping") + return True + + try: + with engine.connect() as conn: + conn.execute( + text(f"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name}({column_name})") + ) + conn.commit() + logger.info(f"Created index {index_name} on {table_name}.{column_name}") + return True + except Exception as e: + logger.error(f"Failed to create index {index_name}: {e}") + return False + + def migrate_strategy_portfolio_views(): """Migrate strategy_portfolio_views table to add missing columns.""" db_manager = get_database_manager() @@ -79,6 +108,54 @@ def migrate_strategy_portfolio_views(): return success +def migrate_strategy_details(): + """Migrate strategy_details table to add missing columns.""" + db_manager = get_database_manager() + engine = db_manager.get_engine() + + # Check if table exists + inspector = inspect(engine) + if "strategy_details" not in inspector.get_table_names(): + logger.warning("Table strategy_details does not exist, skipping migration") + return True + + logger.info("Migrating strategy_details table...") + + # Columns to add with their types and whether they need an index + # (name, type, nullable, needs_index) + columns_to_add = [ + ("compose_id", "VARCHAR(200)", True, True), + ("instruction_id", "VARCHAR(200)", True, True), + ("avg_exec_price", "NUMERIC(20, 8)", True, False), + ("realized_pnl", "NUMERIC(20, 8)", True, False), + ("realized_pnl_pct", "NUMERIC(10, 6)", True, False), + ("notional_entry", "NUMERIC(20, 8)", True, False), + ("notional_exit", "NUMERIC(20, 8)", True, False), + ("fee_cost", "NUMERIC(20, 8)", True, False), + ("entry_time", "TIMESTAMP", True, False), + ("exit_time", "TIMESTAMP", True, False), + ] + + success = True + for column_name, column_type, nullable, needs_index in columns_to_add: + # Add column if it doesn't exist + if not add_column_if_not_exists(engine, "strategy_details", column_name, column_type, nullable): + success = False + + # Create index if column exists and needs index + if column_exists(engine, "strategy_details", column_name) and needs_index: + index_name = f"ix_strategy_details_{column_name}" + if not create_index_if_not_exists(engine, "strategy_details", column_name, index_name): + logger.warning(f"Failed to create index on {column_name}, but column exists") + + if success: + logger.info("Migration completed successfully") + else: + logger.error("Migration completed with errors") + + return success + + def main(): """Run database migrations.""" logger.info("Starting database migration...") @@ -90,6 +167,11 @@ def main(): logger.error("Migration failed") sys.exit(1) + # Migrate strategy_details + if not migrate_strategy_details(): + logger.error("Migration failed") + sys.exit(1) + logger.info("=" * 50) logger.info("All migrations completed successfully!") sys.exit(0) From 35d0c19c0b34f0d2e5b8fa21b5af194fb134113b Mon Sep 17 00:00:00 2001 From: Cyril Chan Date: Thu, 27 Nov 2025 00:01:44 +0800 Subject: [PATCH 3/5] refactor(docker): update database path handling and remove deprecated files - Changed the database volume mapping in `docker-compose.yml` to use a dedicated `data` directory. - Added a new environment variable `VALUECELL_SQLITE_DB` for SQLite database configuration. - Updated `Dockerfile.backend` to include an entrypoint script for database initialization. - Removed obsolete `daemon.json.example` and related documentation files. - Enhanced database path resolution in `db.py` to support both URL and filesystem path formats. --- docker-compose.yml | 3 +- docker/Dockerfile.backend | 10 +- docker/README.md | 217 ++++++++---------- docker/daemon.json.example | 11 - docker/entrypoint.sh | 71 ++++++ docker/start-docker.sh | 43 ++-- ...01\347\247\273\350\257\264\346\230\216.md" | 73 ------ ...17\345\212\240\351\200\237\345\231\250.md" | 82 ------- ...43\345\206\263\346\226\271\346\241\210.md" | 172 -------------- python/valuecell/utils/db.py | 26 ++- 10 files changed, 214 insertions(+), 494 deletions(-) delete mode 100644 docker/daemon.json.example create mode 100644 docker/entrypoint.sh delete mode 100644 "docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" delete mode 100644 "docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" delete mode 100644 "docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" diff --git a/docker-compose.yml b/docker-compose.yml index 8dba0d3ce..2ae3ae6f5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,12 +9,13 @@ services: volumes: - ./python:/app/python - ./logs:/app/logs - - ./valuecell.db:/app/valuecell.db + - ./data:/app/data - ./lancedb:/app/lancedb environment: - API_HOST=0.0.0.0 - API_PORT=8000 - CORS_ORIGINS=http://localhost:1420,http://localhost:3000 + - VALUECELL_SQLITE_DB=sqlite:////app/data/valuecell.db env_file: - .env restart: unless-stopped diff --git a/docker/Dockerfile.backend b/docker/Dockerfile.backend index a346c25fb..1356093b6 100644 --- a/docker/Dockerfile.backend +++ b/docker/Dockerfile.backend @@ -10,11 +10,10 @@ RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ + sqlite3 \ && rm -rf /var/lib/apt/lists/* \ && update-ca-certificates -# Install uv -COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv WORKDIR /app @@ -49,7 +48,12 @@ RUN --mount=type=cache,target=/root/.cache/uv \ # Create logs directory RUN mkdir -p /app/logs -# Note: Database initialization will happen at runtime if needed +# Copy entrypoint script +COPY docker/entrypoint.sh /app/entrypoint.sh +RUN chmod +x /app/entrypoint.sh + +# Set entrypoint +ENTRYPOINT ["/app/entrypoint.sh"] EXPOSE 8000 diff --git a/docker/README.md b/docker/README.md index 4586ab4b4..61f534cef 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,220 +1,183 @@ -# Docker 部署指南 +# Docker Deployment Guide -本项目支持使用 Docker 容器运行前端和后端服务。 +This project supports running frontend and backend services using Docker containers. -## 快速开始 +## Quick Start -### 1. 配置 Docker 镜像加速器(推荐) - -为了加快 Docker 镜像拉取速度,建议配置国内镜像源。 - -#### Windows/Mac (Docker Desktop) - -1. 打开 Docker Desktop -2. 进入 Settings → Docker Engine -3. 添加以下配置: - -```json -{ - "registry-mirrors": [ - "https://docker.mirrors.ustc.edu.cn", - "https://hub-mirror.c.163.com", - "https://mirror.baidubce.com" - ] -} -``` - -4. 点击 "Apply & Restart" - -#### Linux - -创建或编辑 `/etc/docker/daemon.json`: +### 1. Configure Environment Variables ```bash -sudo mkdir -p /etc/docker -sudo tee /etc/docker/daemon.json <<-'EOF' -{ - "registry-mirrors": [ - "https://docker.mirrors.ustc.edu.cn", - "https://hub-mirror.c.163.com", - "https://mirror.baidubce.com" - ] -} -EOF -sudo systemctl daemon-reload -sudo systemctl restart docker +cp .env.example .env ``` -### 2. 准备环境文件 - -在项目根目录创建 `.env` 文件(如果不存在),包含必要的环境变量: +Edit the `.env` file with your API keys and preferences. This configuration file is shared across all agents. See [Configuration Guide](../docs/CONFIGURATION_GUIDE.md) for details. -```bash -# API 配置 -API_HOST=0.0.0.0 -API_PORT=8000 - -# 数据库配置(可选) -VALUECELL_SQLITE_DB=sqlite:///valuecell.db +> **Note**: Some runtime environment variables (like `API_HOST`, `API_PORT`, `CORS_ORIGINS`) are already configured in `docker-compose.yml`. -# CORS 配置 -CORS_ORIGINS=http://localhost:1420,http://localhost:3000 -``` - -### 3. 构建并启动服务 +### 2. Build and Start Services ```bash -# 构建并启动所有服务 +# Build and start all services docker-compose up -d -# 查看日志 +# View logs docker-compose logs -f -# 只启动后端 +# Start backend only docker-compose up -d backend -# 只启动前端 +# Start frontend only docker-compose up -d frontend ``` -### 4. 访问服务 +### 3. Access Services -- **前端**: http://localhost:1420 -- **后端 API**: http://localhost:8000 -- **API 文档**: http://localhost:8000/docs +- **Frontend**: http://localhost:1420 +- **Backend API**: http://localhost:8000 +- **API Documentation**: http://localhost:8000/docs -### 5. 停止服务 +### 4. Stop Services ```bash -# 停止所有服务 +# Stop all services docker-compose down -# 停止并删除卷 +# Stop and remove volumes docker-compose down -v ``` -## 服务说明 +## Service Description + +### Backend Service -### Backend 服务 +- **Port**: 8000 +- **Image**: Based on `ghcr.io/astral-sh/uv:python3.12-bookworm-slim` +- **Working Directory**: `/app/python` +- **Entrypoint**: `/app/entrypoint.sh` (automatically initializes database if needed) +- **Start Command**: `uv run -m valuecell.server.main` +- **PyPI Mirror**: Configured to use Tsinghua University mirror source +- **Database**: Automatically initialized on first startup if not exists -- **端口**: 8000 -- **镜像**: 基于 `ghcr.io/astral-sh/uv:python3.12-bookworm-slim` -- **工作目录**: `/app/python` -- **启动命令**: `uv run -m valuecell.server.main` -- **PyPI 镜像**: 已配置使用清华大学镜像源 +### Frontend Service -### Frontend 服务 +- **Port**: 1420 +- **Image**: Based on `oven/bun:1.3.0-slim` +- **Working Directory**: `/app/frontend` +- **Start Command**: `bun run dev` +- **NPM Mirror**: Configured to use Taobao mirror source -- **端口**: 1420 -- **镜像**: 基于 `oven/bun:1.3.0-slim` -- **工作目录**: `/app/frontend` -- **启动命令**: `bun run dev` -- **NPM 镜像**: 已配置使用淘宝镜像源 +## Mirror Source Configuration -## 国内镜像源配置 +Dockerfiles have automatically configured the following mirror sources for faster downloads: -Dockerfile 已自动配置以下国内镜像源: +- **Docker Images**: Using `docker.1ms.run` mirror for base images (no additional Docker Desktop configuration needed) +- **APT (Debian)**: Alibaba Cloud mirror +- **PyPI (Python)**: Tsinghua University mirror +- **NPM (Node.js)**: Taobao mirror -- **APT (Debian)**: 阿里云镜像 -- **PyPI (Python)**: 清华大学镜像 -- **NPM (Node.js)**: 淘宝镜像 +> **Note**: The Dockerfiles use `docker.1ms.run` mirror for pulling base images, so you don't need to configure Docker Desktop registry mirrors separately. -## 数据持久化 +## Data Persistence -以下目录/文件会被挂载到容器中,数据会持久化: +The following directories/files are mounted to containers, and data will be persisted: -- `./python` → `/app/python` (后端代码) -- `./logs` → `/app/logs` (日志文件) -- `./valuecell.db` → `/app/valuecell.db` (数据库) -- `./lancedb` → `/app/lancedb` (LanceDB 数据) -- `./frontend` → `/app/frontend` (前端代码) +- `./python` → `/app/python` (backend code) +- `./logs` → `/app/logs` (log files) +- `./data` → `/app/data` (database and data files) + - Database file: `./data/valuecell.db` (automatically created if not exists) +- `./lancedb` → `/app/lancedb` (LanceDB data) +- `./frontend` → `/app/frontend` (frontend code) -## 开发模式 +> **Note**: The database is automatically initialized on first startup if it doesn't exist. The entrypoint script checks and initializes the database before starting the server. -在开发模式下,代码更改会自动反映到容器中(通过卷挂载): +## Development Mode + +In development mode, code changes are automatically reflected in containers (via volume mounts): ```bash -# 启动开发环境 +# Start development environment docker-compose up -# 在另一个终端中查看日志 +# View logs in another terminal docker-compose logs -f frontend docker-compose logs -f backend ``` -## 生产部署 +## Production Deployment -对于生产环境,建议: +For production environments, it is recommended to: -1. 修改 `docker-compose.yml` 中的端口映射 -2. 使用环境变量文件管理配置 -3. 配置反向代理(如 Nginx) -4. 使用 Docker secrets 管理敏感信息 -5. 考虑使用多阶段构建优化镜像大小 +1. Modify port mappings in `docker-compose.yml` +2. Use environment variable files to manage configuration +3. Configure reverse proxy (such as Nginx) +4. Use Docker secrets to manage sensitive information +5. Consider using multi-stage builds to optimize image size -## 故障排查 +## Troubleshooting -### 查看容器状态 +### View Container Status ```bash docker-compose ps ``` -### 查看容器日志 +### View Container Logs ```bash -# 所有服务 +# All services docker-compose logs -# 特定服务 +# Specific service docker-compose logs backend docker-compose logs frontend -# 实时日志 +# Real-time logs docker-compose logs -f ``` -### 进入容器调试 +### Enter Container for Debugging ```bash -# 进入后端容器 +# Enter backend container docker-compose exec backend bash -# 进入前端容器 +# Enter frontend container docker-compose exec frontend sh ``` -### 重建镜像 +### Rebuild Images ```bash -# 强制重建 +# Force rebuild docker-compose build --no-cache -# 重建并启动 +# Rebuild and start docker-compose up -d --build ``` -### 网络问题 +### Network Issues -如果遇到网络连接问题: +If encountering network connection issues: -1. 检查 Docker 镜像加速器配置是否正确 -2. 尝试使用代理: +1. The Dockerfiles already use `docker.1ms.run` mirror for base images, which should provide good download speeds +2. If you still experience issues, try using a proxy: ```bash export HTTP_PROXY=http://your-proxy:port export HTTPS_PROXY=http://your-proxy:port docker-compose build ``` -## 环境变量 +## Environment Variables + +Environment variables can be configured via the `.env` file. See [Configuration Guide](../docs/CONFIGURATION_GUIDE.md) for details. + +> **Note**: Some runtime variables (`API_HOST`, `API_PORT`, `CORS_ORIGINS`) are configured in `docker-compose.yml`. + +### Build-time Environment Variables -可以通过 `.env` 文件或 `docker-compose.yml` 中的 `environment` 部分配置环境变量。 +These variables are already configured in Dockerfiles and used during image build: -常用环境变量: +- `UV_INDEX_URL`: PyPI mirror address (configured in `Dockerfile.backend` as Tsinghua source) +- `BUN_CONFIG_REGISTRY`: NPM mirror address (configured in `Dockerfile.frontend` as Taobao source) -- `API_HOST`: 后端 API 主机地址(默认: 0.0.0.0) -- `API_PORT`: 后端 API 端口(默认: 8000) -- `CORS_ORIGINS`: CORS 允许的源(逗号分隔) -- `VALUECELL_SQLITE_DB`: SQLite 数据库路径 -- `UV_INDEX_URL`: PyPI 镜像地址(已配置为清华源) -- `BUN_CONFIG_REGISTRY`: NPM 镜像地址(已配置为淘宝源) +> **Note**: `UV_INDEX_URL` and `BUN_CONFIG_REGISTRY` are build-time variables set in the Dockerfiles. You don't need to configure them in `docker-compose.yml` or `.env` files as they only affect the image build process, not the running containers. diff --git a/docker/daemon.json.example b/docker/daemon.json.example deleted file mode 100644 index 51e08c333..000000000 --- a/docker/daemon.json.example +++ /dev/null @@ -1,11 +0,0 @@ -{ - "registry-mirrors": [ - "https://docker.mirrors.ustc.edu.cn", - "https://hub-mirror.c.163.com", - "https://mirror.baidubce.com" - ], - "insecure-registries": [], - "debug": false, - "experimental": false -} - diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100644 index 000000000..3c43c1cf3 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# Docker entrypoint script for backend service +# This script ensures the database is initialized before starting the server + +set -e + +echo "==========================================" +echo "ValueCell Backend Entrypoint" +echo "==========================================" + +# Get database path from environment or use default +DB_PATH="${VALUECELL_SQLITE_DB:-sqlite:///app/valuecell.db}" + +# Extract file path from SQLite URL (remove sqlite:// prefix, preserve leading slash) +if [[ "$DB_PATH" == sqlite:///* ]]; then + DB_FILE="${DB_PATH#sqlite://}" +else + DB_FILE="$DB_PATH" +fi + +echo "Database path: $DB_PATH" +echo "Database file: $DB_FILE" + +# Check if database file exists and is a regular file +if [ -e "$DB_FILE" ]; then + if [ -d "$DB_FILE" ]; then + echo "WARNING: Database path exists but is a directory, not a file!" + echo "Removing directory and creating database file..." + rm -rf "$DB_FILE" + elif [ -f "$DB_FILE" ]; then + echo "Database file exists: $DB_FILE" + # Check if database is valid SQLite file + if command -v sqlite3 &> /dev/null; then + if sqlite3 "$DB_FILE" "SELECT 1;" &> /dev/null; then + echo "Database file is valid SQLite database" + else + echo "WARNING: Database file exists but is not a valid SQLite database" + echo "Removing invalid file and will recreate..." + rm -f "$DB_FILE" + fi + fi + fi +fi + +# Create database directory if it doesn't exist +DB_DIR=$(dirname "$DB_FILE") +if [ "$DB_DIR" != "." ] && [ "$DB_DIR" != "/" ]; then + mkdir -p "$DB_DIR" + echo "Created database directory: $DB_DIR" +fi + +# Initialize database if it doesn't exist +if [ ! -f "$DB_FILE" ]; then + echo "Database file does not exist, initializing..." + cd /app/python + uv run -m valuecell.server.db.init_db || { + echo "ERROR: Database initialization failed" + exit 1 + } + echo "Database initialized successfully" +else + echo "Database file exists, skipping initialization" + # Run migration to ensure schema is up to date +fi + +echo "==========================================" +echo "Starting ValueCell Backend Server..." +echo "==========================================" + +# Execute the main command +exec "$@" diff --git a/docker/start-docker.sh b/docker/start-docker.sh index 476f14931..058aac988 100644 --- a/docker/start-docker.sh +++ b/docker/start-docker.sh @@ -1,57 +1,56 @@ #!/bin/bash -# Docker 快速启动脚本 +# Docker Quick Start Script set -e echo "==========================================" -echo "ValueCell Docker 启动脚本" +echo "ValueCell Docker Startup Script" echo "==========================================" -# 检查 Docker 是否安装 +# Check if Docker is installed if ! command -v docker &> /dev/null; then - echo "错误: 未找到 Docker。请先安装 Docker。" + echo "Error: Docker not found. Please install Docker first." exit 1 fi -# 检查 Docker Compose 是否安装 +# Check if Docker Compose is installed if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then - echo "错误: 未找到 Docker Compose。请先安装 Docker Compose。" + echo "Error: Docker Compose not found. Please install Docker Compose first." exit 1 fi -# 检查 .env 文件 +# Check for .env file if [ ! -f .env ]; then - echo "警告: 未找到 .env 文件。将使用默认配置。" - echo "建议创建 .env 文件并配置必要的环境变量。" + echo "Warning: .env file not found. Using default configuration." + echo "It is recommended to create a .env file and configure necessary environment variables." fi -# 构建并启动服务 +# Build and start services echo "" -echo "构建 Docker 镜像..." +echo "Building Docker images..." docker-compose build echo "" -echo "启动服务..." +echo "Starting services..." docker-compose up -d echo "" -echo "等待服务启动..." +echo "Waiting for services to start..." sleep 5 -# 检查服务状态 +# Check service status echo "" -echo "服务状态:" +echo "Service status:" docker-compose ps echo "" echo "==========================================" -echo "服务已启动!" -echo "前端: http://localhost:1420" -echo "后端 API: http://localhost:8000" -echo "API 文档: http://localhost:8000/docs" +echo "Services started!" +echo "Frontend: http://localhost:1420" +echo "Backend API: http://localhost:8000" +echo "API Documentation: http://localhost:8000/docs" echo "==========================================" echo "" -echo "查看日志: docker-compose logs -f" -echo "停止服务: docker-compose down" +echo "View logs: docker-compose logs -f" +echo "Stop services: docker-compose down" echo "" - diff --git "a/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" "b/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" deleted file mode 100644 index a0b087eea..000000000 --- "a/docker/\346\225\260\346\215\256\345\272\223\350\277\201\347\247\273\350\257\264\346\230\216.md" +++ /dev/null @@ -1,73 +0,0 @@ -# 数据库迁移说明 - -## 问题描述 - -在运行策略相关功能时,遇到以下错误: - -``` -sqlite3.OperationalError: no such column: strategy_portfolio_views.total_realized_pnl -``` - -## 原因分析 - -数据库表 `strategy_portfolio_views` 缺少以下列: -- `total_realized_pnl` - 已实现盈亏 -- `gross_exposure` - 总敞口 -- `net_exposure` - 净敞口 - -这些列在代码模型中已定义,但现有数据库表结构未包含这些列。 - -## 解决方案 - -已创建数据库迁移脚本 `python/scripts/migrate_db.py`,用于自动添加缺失的列。 - -### 执行迁移 - -在容器中执行: - -```bash -docker compose exec backend uv run python scripts/migrate_db.py -``` - -或在本地执行: - -```bash -cd python -uv run python scripts/migrate_db.py -``` - -### 迁移脚本功能 - -1. 检查列是否存在(避免重复添加) -2. 安全地添加缺失的列 -3. 保留现有数据 -4. 提供详细的日志输出 - -## 已添加的列 - -- `total_realized_pnl` (NUMERIC(20, 8), nullable) -- `gross_exposure` (NUMERIC(20, 8), nullable) -- `net_exposure` (NUMERIC(20, 8), nullable) - -## 验证 - -迁移完成后,重启后端容器: - -```bash -docker compose restart backend -``` - -然后测试相关 API 端点: -- `/api/v1/strategies/portfolio_summary` -- `/api/v1/strategies/holding_price_curve` - -## 注意事项 - -- 迁移脚本是幂等的,可以安全地多次运行 -- 迁移不会删除或修改现有数据 -- 建议在迁移前备份数据库文件(`valuecell.db`) - -## 未来改进 - -建议集成数据库迁移工具(如 Alembic)来管理更复杂的数据库架构变更。 - diff --git "a/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" "b/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" deleted file mode 100644 index b333f7e33..000000000 --- "a/docker/\351\205\215\347\275\256Docker\351\225\234\345\203\217\345\212\240\351\200\237\345\231\250.md" +++ /dev/null @@ -1,82 +0,0 @@ -# 配置 Docker 镜像加速器 - -由于网络原因,拉取 Docker Hub 镜像可能较慢或失败。请按照以下步骤配置 Docker 镜像加速器。 - -## Windows (Docker Desktop) - -1. 打开 Docker Desktop -2. 点击右上角的 **设置** (Settings) 图标 -3. 选择 **Docker Engine** -4. 在 JSON 配置中添加以下内容: - -```json -{ - "registry-mirrors": [ - "https://docker.mirrors.ustc.edu.cn", - "https://hub-mirror.c.163.com", - "https://mirror.baidubce.com" - ] -} -``` - -5. 点击 **Apply & Restart** 应用并重启 - -## Linux - -1. 创建或编辑 `/etc/docker/daemon.json`: - -```bash -sudo mkdir -p /etc/docker -sudo tee /etc/docker/daemon.json <<-'EOF' -{ - "registry-mirrors": [ - "https://docker.mirrors.ustc.edu.cn", - "https://hub-mirror.c.163.com", - "https://mirror.baidubce.com" - ] -} -EOF -``` - -2. 重启 Docker 服务: - -```bash -sudo systemctl daemon-reload -sudo systemctl restart docker -``` - -## macOS (Docker Desktop) - -1. 打开 Docker Desktop -2. 点击菜单栏的 **Docker** → **Preferences** (或 **Settings**) -3. 选择 **Docker Engine** -4. 在 JSON 配置中添加镜像加速器配置(同 Windows) -5. 点击 **Apply & Restart** - -## 验证配置 - -运行以下命令验证配置是否生效: - -```bash -docker info | grep -A 10 "Registry Mirrors" -``` - -如果看到配置的镜像地址,说明配置成功。 - -## 其他镜像加速器 - -如果上述镜像源不可用,可以尝试: - -- **阿里云镜像加速器**(需要登录阿里云获取专属地址): - - 访问:https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors - - 获取专属加速地址,格式:`https://.mirror.aliyuncs.com` - -- **腾讯云镜像加速器**: - - `https://mirror.ccs.tencentyun.com` - -配置完成后,重新尝试构建: - -```bash -docker compose build frontend -``` - diff --git "a/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" "b/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" deleted file mode 100644 index 83b543e75..000000000 --- "a/docker/\351\227\256\351\242\230\345\210\206\346\236\220\344\270\216\350\247\243\345\206\263\346\226\271\346\241\210.md" +++ /dev/null @@ -1,172 +0,0 @@ -# 后端容器问题分析与解决方案 - -## 问题总结 - -根据后端容器日志分析,发现以下问题: - -### 1. OpenSSL/TLS 连接错误 ⚠️ - -**错误信息**: -``` -curl: (35) TLS connect error: error:00000000:invalid library (0):OPENSSL_internal:invalid library (0) -``` - -**影响**: -- yfinance 无法获取某些股票数据(如 ^IXIC, ^HSI) -- 部分 API 请求失败 - -**解决方案**: -- ✅ 已在 Dockerfile.backend 中添加完整的 OpenSSL 库支持 -- 需要重新构建镜像:`docker compose build backend` - -### 2. 网络连接问题 ⚠️ - -**错误信息**: -``` -aiohttp.client_exceptions.ClientConnectorError: Cannot connect to host fapi.binance.com:443 ssl:default [None] -``` - -**影响**: -- 无法连接到 Binance API 获取市场数据 -- 策略代理无法获取加密货币价格 - -**可能原因**: -- 容器网络配置问题 -- 防火墙或代理设置 -- DNS 解析问题 - -**解决方案**: -1. 检查容器网络配置 -2. 确保容器可以访问外部网络 -3. 如果使用代理,需要在 docker-compose.yml 中配置代理设置 - -### 3. API 密钥配置问题 ⚠️ - -**错误信息**: -``` -Error code: 401 - Api key is invalid -``` - -**影响**: -- SiliconFlow API 调用失败 -- LLM 功能无法使用 - -**解决方案**: -1. 检查 `.env` 文件中的 API 密钥配置 -2. 确保 SiliconFlow API 密钥有效 -3. 或者切换到其他已配置的 LLM 提供商(如 openrouter) - -### 4. 500 内部服务器错误 ⚠️ - -**错误接口**: -- `/api/v1/strategies/portfolio_summary` -- `/api/v1/strategies/holding_price_curve` - -**相关日志**: -``` -WARNING - Failed to persist strategy portfolio snapshot for strategy-32b072e7897a494490d3397d2e8a162f -``` - -**可能原因**: -- 数据库连接问题 -- 数据持久化逻辑错误 -- 策略数据不完整 - -**解决方案**: -- 检查数据库连接 -- 查看详细错误日志 -- 确保策略数据已正确初始化 - -### 5. 警告信息(可忽略)ℹ️ - -**警告**: -``` -warning: The package akracer==0.0.13 does not have an extra named py-mini-racer -``` - -**说明**: -- 这是依赖包的警告,不影响运行 -- 可以忽略 - -## 修复步骤 - -### 步骤 1:重新构建后端镜像 - -```bash -# 停止当前容器 -docker compose down - -# 重新构建后端镜像(包含 OpenSSL 修复) -docker compose build backend - -# 启动服务 -docker compose up -d -``` - -### 步骤 2:检查网络连接 - -```bash -# 进入后端容器 -docker compose exec backend bash - -# 测试网络连接 -curl -I https://fapi.binance.com -curl -I https://api.siliconflow.cn -``` - -### 步骤 3:配置 API 密钥 - -编辑 `.env` 文件,确保包含有效的 API 密钥: - -```bash -# SiliconFlow API 密钥 -SILICONFLOW_API_KEY=your-api-key-here - -# 或者使用其他提供商 -OPENROUTER_API_KEY=your-api-key-here -GOOGLE_API_KEY=your-api-key-here -``` - -### 步骤 4:验证修复 - -```bash -# 查看日志 -docker compose logs -f backend - -# 检查健康状态 -curl http://localhost:8000/api/v1/healthz -``` - -## 网络配置建议 - -如果遇到网络连接问题,可以在 `docker-compose.yml` 中添加网络配置: - -```yaml -services: - backend: - # ... 其他配置 ... - network_mode: bridge - dns: - - 8.8.8.8 - - 8.8.4.4 - # 如果需要代理 - # environment: - # - HTTP_PROXY=http://proxy:port - # - HTTPS_PROXY=http://proxy:port -``` - -## 监控建议 - -定期检查以下指标: -1. 容器健康状态:`docker compose ps` -2. 错误日志:`docker compose logs backend | grep ERROR` -3. API 响应时间:监控 `/api/v1/healthz` 端点 -4. 网络连接:测试外部 API 连接 - -## 联系支持 - -如果问题持续存在,请提供: -1. 完整的容器日志:`docker compose logs backend > backend.log` -2. 网络测试结果 -3. 环境配置信息 - diff --git a/python/valuecell/utils/db.py b/python/valuecell/utils/db.py index 16c2138e9..54b4ed00f 100644 --- a/python/valuecell/utils/db.py +++ b/python/valuecell/utils/db.py @@ -1,12 +1,32 @@ import os +from urllib.parse import urlparse from .path import get_repo_root_path +def _url_to_path(url: str) -> str: + """Convert a SQLite SQLAlchemy URL to a filesystem path for aiosqlite. + + This keeps VALUECELL_SQLITE_DB usable as a full SQLAlchemy URL (e.g. + 'sqlite:////app/data/valuecell.db') while allowing conversation stores + to work with the underlying file path (e.g. '/app/data/valuecell.db'). + """ + if not url.startswith("sqlite://"): + # Not a SQLite URL, treat as a plain filesystem path + return url + + parsed = urlparse(url) + # For URLs like sqlite:////app/data/valuecell.db, parsed.path is + # '/app/data/valuecell.db', which is exactly what we want. + return parsed.path or url + + def resolve_db_path() -> str: - return os.environ.get("VALUECELL_SQLITE_DB") or os.path.join( - get_repo_root_path(), "valuecell.db" - ) + env = os.environ.get("VALUECELL_SQLITE_DB") + if env: + return _url_to_path(env) + + return os.path.join(get_repo_root_path(), "valuecell.db") def resolve_lancedb_uri() -> str: From efd11fa6bf4a2d423e06394aa231776ae3c6f576 Mon Sep 17 00:00:00 2001 From: Cyril Chan Date: Thu, 27 Nov 2025 00:22:07 +0800 Subject: [PATCH 4/5] Normalize entrypoint.sh to LF via .gitattributes --- .gitattributes | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..f5ca4f399 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +docker/entrypoint.sh text eol=lf + + From 4b1c3c56cdc302b1c54404cd44f309df518b2cfb Mon Sep 17 00:00:00 2001 From: Cyril Chan Date: Thu, 27 Nov 2025 00:24:34 +0800 Subject: [PATCH 5/5] Normalize backend entrypoint line endings in image build --- docker/Dockerfile.backend | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.backend b/docker/Dockerfile.backend index 1356093b6..f836568da 100644 --- a/docker/Dockerfile.backend +++ b/docker/Dockerfile.backend @@ -50,7 +50,8 @@ RUN mkdir -p /app/logs # Copy entrypoint script COPY docker/entrypoint.sh /app/entrypoint.sh -RUN chmod +x /app/entrypoint.sh +# Normalize line endings to LF (handle CRLF from Windows) and make executable +RUN sed -i 's/\r$//' /app/entrypoint.sh && chmod +x /app/entrypoint.sh # Set entrypoint ENTRYPOINT ["/app/entrypoint.sh"]