Files
internet-id/docker-compose.production.yml
Copilot 14014cb784 Production hardening: Remove deprecations, optimize chain queries, enforce explicit configuration (#134)
* Initial plan

* Fix onLimitReached deprecation in rate-limit middleware

- Replace deprecated onLimitReached callback with inline logging in handler
- Update tests to verify logging happens in the rate limit handler
- Remove onLimitReached from exports as it's no longer a separate function

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Fix fromBlock: 0 in event queries for better performance

- Replace fromBlock: 0 with smart default (last 1M blocks)
- Add REGISTRY_START_BLOCK env var for configurable starting block
- Update make-proof.ts, verification-jobs.routes.ts, and verification-queue.service.ts
- Document new env variable in .env.example

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Remove hardcoded testnet RPC fallback for production safety

- Replace testnet fallbacks with proper error handling when RPC_URL is not configured
- Update registry.service.ts, blockchain.service.ts to throw errors if RPC_URL missing
- Update CLI scripts (verify.ts, register.ts, make-proof.ts) to fail fast without RPC_URL
- Update API routes to return 503 error when RPC_URL is not configured
- Update .env.example to emphasize RPC_URL is required

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Add documentation for dual Prisma generators and pin Redis version

- Add comprehensive comments in schema.prisma explaining dual generator setup
- Document why both generators are needed (API vs Next.js web app)
- Pin Redis version to 7.2-alpine in all docker-compose files for reproducibility
- Update docker-compose.yml, docker-compose.production.yml, and docker-compose.staging.yml

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Address code review feedback - add validation for REGISTRY_START_BLOCK

- Add proper validation for parseInt to handle NaN cases
- Ensure REGISTRY_START_BLOCK is validated before use
- Add comment explaining intentional empty catch block
- Prevents invalid block numbers from breaking event queries

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Refactor block range validation into shared utility function

- Create block-range.util.ts with getStartBlock helper
- Extract duplicated validation logic from make-proof.ts, verification-jobs.routes.ts, and verification-queue.service.ts
- Improves code maintainability and ensures consistent validation
- Add comprehensive JSDoc documentation

Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>

* Add comprehensive test coverage for block-range utility

- Create test/utils/block-range.util.test.ts following existing test patterns
- Test valid REGISTRY_START_BLOCK values (positive, zero, large numbers)
- Test invalid inputs (NaN, negative, empty string, whitespace)
- Test default fallback behavior (current block - 1M)
- Test edge cases (low block numbers, decimals, provider errors)
- 15 test cases covering all code paths and validation logic

Co-authored-by: PatrickFanella <61631520+PatrickFanella@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: onnwee <211922112+onnwee@users.noreply.github.com>
Co-authored-by: PatrickFanella <61631520+PatrickFanella@users.noreply.github.com>
2026-02-16 19:25:50 -06:00

195 lines
5.4 KiB
YAML

version: "3.9"
# Docker Compose configuration for PRODUCTION environment
# This file extends docker-compose.yml with production-specific settings
services:
# Nginx reverse proxy with SSL/TLS termination
nginx:
environment:
- DOMAIN=${DOMAIN:-internet-id.example.com}
- NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx/conf.d
volumes:
- ./ops/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./ops/nginx/conf.d/production.conf.template:/etc/nginx/templates/default.conf.template:ro
- certbot_www:/var/www/certbot:ro
- certbot_conf:/etc/letsencrypt:ro
- nginx_logs:/var/log/nginx
deploy:
resources:
limits:
cpus: '1.0'
memory: 512M
reservations:
cpus: '0.5'
memory: 256M
# Express API server
api:
build:
context: .
dockerfile: Dockerfile.api
target: runner
image: internet-id-api:production
environment:
- NODE_ENV=production
- DATABASE_URL=${DATABASE_URL}
- API_KEY=${API_KEY}
- RPC_URL=${RPC_URL}
- IPFS_API_URL=${IPFS_API_URL}
- WEB3_STORAGE_TOKEN=${WEB3_STORAGE_TOKEN}
- PINATA_JWT=${PINATA_JWT}
- REDIS_URL=${REDIS_URL:-redis://redis:6379}
- LOG_LEVEL=${LOG_LEVEL:-info}
restart: always
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '1.0'
memory: 1G
replicas: 2
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/api/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
# Next.js web application
web:
build:
context: .
dockerfile: web/Dockerfile
target: runner
image: internet-id-web:production
environment:
- NODE_ENV=production
- NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE:-https://${DOMAIN}/api}
- NEXT_PUBLIC_SITE_BASE=${NEXT_PUBLIC_SITE_BASE:-https://${DOMAIN}}
- NEXTAUTH_URL=${NEXTAUTH_URL:-https://${DOMAIN}}
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET}
- DATABASE_URL=${DATABASE_URL}
- GITHUB_ID=${GITHUB_ID}
- GITHUB_SECRET=${GITHUB_SECRET}
- GOOGLE_ID=${GOOGLE_ID}
- GOOGLE_SECRET=${GOOGLE_SECRET}
restart: always
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '1.0'
memory: 1G
replicas: 2
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3000/"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
# PostgreSQL database
db:
image: postgres:16-alpine
environment:
POSTGRES_USER: ${POSTGRES_USER:-internetid}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB:-internetid}
# Enable WAL archiving for point-in-time recovery
command: >
postgres
-c wal_level=replica
-c archive_mode=on
-c archive_command='test ! -f /var/lib/postgresql/backups/wal_archive/%f && cp %p /var/lib/postgresql/backups/wal_archive/%f'
-c max_connections=100
-c shared_buffers=256MB
-c effective_cache_size=1GB
-c maintenance_work_mem=64MB
-c checkpoint_completion_target=0.9
-c wal_buffers=16MB
-c default_statistics_target=100
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c work_mem=2621kB
-c min_wal_size=1GB
-c max_wal_size=4GB
volumes:
- db_data_production:/var/lib/postgresql/data
- backup_data_production:/var/lib/postgresql/backups
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '1.0'
memory: 2G
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER"]
interval: 10s
timeout: 5s
retries: 5
# Redis cache
redis:
image: redis:7.2-alpine
command: redis-server --maxmemory 512mb --maxmemory-policy allkeys-lru --appendonly yes
volumes:
- redis_data_production:/data
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
reservations:
cpus: '0.5'
memory: 512M
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
# Backup service for automated database backups
backup:
build:
context: .
dockerfile: Dockerfile.backup
image: internet-id-backup:production
environment:
POSTGRES_HOST: db
POSTGRES_PORT: 5432
POSTGRES_USER: ${POSTGRES_USER:-internetid}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB:-internetid}
BACKUP_DIR: /var/lib/postgresql/backups
RETENTION_DAYS: ${RETENTION_DAYS:-30}
S3_BUCKET: ${S3_BUCKET}
S3_REGION: ${S3_REGION:-us-east-1}
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
volumes:
- backup_data_production:/var/lib/postgresql/backups
- ./ops/backup:/opt/backup-scripts:ro
depends_on:
db:
condition: service_healthy
entrypoint: /bin/sh
# Run backups every 6 hours in production
command: -c "while true; do /opt/backup-scripts/backup-database.sh full; sleep 21600; done"
restart: always
volumes:
db_data_production:
backup_data_production:
redis_data_production:
certbot_www:
certbot_conf:
certbot_logs:
nginx_logs: