diff --git a/backend/migrations/003_create_tokens_table.sql b/backend/migrations/003_create_tokens_table.sql new file mode 100644 index 00000000..503feb98 --- /dev/null +++ b/backend/migrations/003_create_tokens_table.sql @@ -0,0 +1,11 @@ +-- Migration: Create tokens table +CREATE TABLE IF NOT EXISTS tokens ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + address VARCHAR(255) UNIQUE NOT NULL, + symbol VARCHAR(32), + name VARCHAR(128), + decimals INTEGER, + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS idx_tokens_address ON tokens(address); diff --git a/backend/src/models/index.js b/backend/src/models/index.js index e63f03eb..abba23ba 100644 --- a/backend/src/models/index.js +++ b/backend/src/models/index.js @@ -1,10 +1,15 @@ const { sequelize } = require('../database/connection'); + const ClaimsHistory = require('./claimsHistory'); const Vault = require('./vault'); const SubSchedule = require('./subSchedule'); const TVL = require('./tvl'); const Beneficiary = require('./beneficiary'); const Organization = require('./organization'); +const { Token, initTokenModel } = require('./token'); + + +initTokenModel(sequelize); const models = { ClaimsHistory, @@ -13,6 +18,7 @@ const models = { TVL, Beneficiary, Organization, + Token, sequelize, }; diff --git a/backend/src/models/token.js b/backend/src/models/token.js new file mode 100644 index 00000000..cfd413d9 --- /dev/null +++ b/backend/src/models/token.js @@ -0,0 +1,44 @@ +const { DataTypes, Model } = require('sequelize'); + +class Token extends Model {} + +function initTokenModel(sequelize) { + Token.init( + { + id: { + type: DataTypes.UUID, + defaultValue: DataTypes.UUIDV4, + primaryKey: true, + }, + address: { + type: DataTypes.STRING, + allowNull: false, + unique: true, + }, + symbol: { + type: DataTypes.STRING(32), + }, + name: { + type: DataTypes.STRING(128), + }, + decimals: { + type: DataTypes.INTEGER, + }, + createdAt: { + type: DataTypes.DATE, + defaultValue: DataTypes.NOW, + }, + updatedAt: { + type: DataTypes.DATE, + defaultValue: DataTypes.NOW, + }, + }, + { + sequelize, + tableName: 'tokens', + indexes: [{ fields: ['address'] }], + } + ); +} + +module.exports = { Token, initTokenModel }; diff --git a/backend/src/services/tokenMetadataWorker.js b/backend/src/services/tokenMetadataWorker.js new file mode 100644 index 00000000..8131ae23 --- /dev/null +++ b/backend/src/services/tokenMetadataWorker.js @@ -0,0 +1,62 @@ +const { Token } = require('../models/token'); +const Vault = require('../models/vault'); +const axios = require('axios'); + +const SOROBAN_RPC_URL = process.env.SOROBAN_RPC_URL || 'https://soroban-rpc.testnet.stellar.org'; + +/** + * Worker to detect new token addresses and fetch/store their metadata. + */ +class TokenMetadataWorker { + constructor(sequelize) { + this.sequelize = sequelize; + } + + async detectAndFetchNewTokens() { + // 1. Get all unique token addresses from Vaults + const vaults = await Vault.findAll({ attributes: ['token_address'] }); + const addresses = vaults.map(function(v) { return v.token_address; }); + const uniqueAddresses = Array.from(new Set(addresses)); + + // 2. For each address, check if it exists in tokens table + for (let i = 0; i < uniqueAddresses.length; i++) { + const address = uniqueAddresses[i]; + const exists = await Token.findOne({ where: { address } }); + if (!exists) { + // 3. Fetch metadata from Stellar + try { + const meta = await this.fetchTokenMetadata(address); + if (meta) { + await Token.create({ + address: address, + symbol: meta.symbol, + name: meta.name, + decimals: meta.decimals, + }); + console.log(`Token metadata stored for ${address}`); + } + } catch (err) { + console.error(`Failed to fetch/store metadata for ${address}:`, err); + } + } + } + } + + async fetchTokenMetadata(address) { + // Example: Replace with actual Soroban RPC call + try { + const response = await axios.post(`${SOROBAN_RPC_URL}/getTokenMetadata`, { address: address }); + const symbol = response.data.symbol; + const name = response.data.name; + const decimals = response.data.decimals; + if (symbol && name && typeof decimals === 'number') { + return { symbol: symbol, name: name, decimals: decimals }; + } + return null; + } catch (err) { + return null; + } + } +} + +module.exports = { TokenMetadataWorker }; diff --git a/backend/src/workers/tokenMetaWorker.js b/backend/src/workers/tokenMetaWorker.js new file mode 100644 index 00000000..84ac170e --- /dev/null +++ b/backend/src/workers/tokenMetaWorker.js @@ -0,0 +1,10 @@ +const { sequelize } = require('../models'); +const { TokenMetadataWorker } = require('../services/tokenMetadataWorker'); + +async function runWorker() { + const worker = new TokenMetadataWorker(sequelize); + await worker.detectAndFetchNewTokens(); + process.exit(0); +} + +runWorker(); diff --git a/backup_postgres.sh b/backup_postgres.sh new file mode 100644 index 00000000..27ebdc54 --- /dev/null +++ b/backup_postgres.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Automated Postgres backup script + +set -e + +# Config +PG_DB="vestingvault" +PG_USER="postgres" +PG_HOST="localhost" +BACKUP_DIR="/var/backups/vestingvault" +DATE=$(date +"%Y-%m-%d_%H-%M-%S") +DUMP_FILE="$BACKUP_DIR/backup_$DATE.sql" +ARCHIVE_FILE="$DUMP_FILE.gz" + +# Ensure backup directory exists +mkdir -p "$BACKUP_DIR" + +# Dump Postgres database +pg_dump -h "$PG_HOST" -U "$PG_USER" "$PG_DB" > "$DUMP_FILE" + +# Compress the dump +gzip "$DUMP_FILE" + +# Upload to S3 (requires AWS CLI configured with encryption) +S3_BUCKET="s3://vestingvault-backups" +aws s3 cp "$ARCHIVE_FILE" "$S3_BUCKET/" --sse AES256 + +# Cleanup old backups (local and S3) +find "$BACKUP_DIR" -name "*.gz" -mtime +30 -exec rm {} \; +aws s3 ls "$S3_BUCKET/" | awk '{print $4}' | while read file; do + # Extract date from filename and check if older than 30 days + FILE_DATE=$(echo $file | grep -oP '\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}') + if [[ $FILE_DATE ]]; then + FILE_TIMESTAMP=$(date -d "$FILE_DATE" +%s) + THIRTY_DAYS_AGO=$(date -d '30 days ago' +%s) + if (( FILE_TIMESTAMP < THIRTY_DAYS_AGO )); then + aws s3 rm "$S3_BUCKET/$file" + fi + fi +done + +# Log +echo "Backup completed and uploaded to S3: $ARCHIVE_FILE"