Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,111 @@
version: '3.8'
networks:
maple-private-prod:
external: true
maple-public-prod:
external: true
secrets:
maplepress_jwt_secret:
external: true
redis_password:
external: true
meilisearch_master_key:
external: true
# Uncomment if using S3/SeaweedFS:
# s3_access_key:
# external: true
# s3_secret_key:
# external: true
services:
backend:
image: registry.digitalocean.com/ssp/maplepress_backend:latest
hostname: backend
networks:
- maple-public-prod # Receive requests from NGINX
- maple-private-prod # Access databases
secrets:
- maplepress_jwt_secret
- redis_password
- meilisearch_master_key
# Uncomment if using S3:
# - s3_access_key
# - s3_secret_key
environment:
# Application Configuration
- APP_ENVIRONMENT=production
- APP_VERSION=${APP_VERSION:-1.0.0}
# HTTP Server Configuration
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8000
# Cassandra Database Configuration
# Use all 3 Cassandra nodes for high availability
- DATABASE_HOSTS=cassandra-1:9042,cassandra-2:9042,cassandra-3:9042
- DATABASE_KEYSPACE=maplepress
- DATABASE_CONSISTENCY=QUORUM
- DATABASE_REPLICATION=3
- DATABASE_MIGRATIONS_PATH=file://migrations
# Meilisearch Configuration
- MEILISEARCH_HOST=http://meilisearch:7700
# Logger Configuration
- LOGGER_LEVEL=info
- LOGGER_FORMAT=json
# S3/Object Storage Configuration (if using)
# - AWS_ENDPOINT=https://your-region.digitaloceanspaces.com
# - AWS_REGION=us-east-1
# - AWS_BUCKET_NAME=maplepress-prod
# Read secrets and set as environment variables using entrypoint
entrypoint: ["/bin/sh", "-c"]
command:
- |
export APP_JWT_SECRET=$$(cat /run/secrets/maplepress_jwt_secret)
export CACHE_PASSWORD=$$(cat /run/secrets/redis_password)
export MEILISEARCH_API_KEY=$$(cat /run/secrets/meilisearch_master_key)
# Uncomment if using S3:
# export AWS_ACCESS_KEY=$$(cat /run/secrets/s3_access_key)
# export AWS_SECRET_KEY=$$(cat /run/secrets/s3_secret_key)
# Set Redis configuration
export CACHE_HOST=redis
export CACHE_PORT=6379
export CACHE_DB=0
# Start the backend
exec /app/maplepress-backend
deploy:
replicas: 1
placement:
constraints:
- node.labels.backend == true
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
memory: 1G
cpus: '1.0'
reservations:
memory: 512M
cpus: '0.5'
update_config:
parallelism: 1
delay: 10s
failure_action: rollback
order: start-first # Zero-downtime: start new before stopping old
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "--header=X-Tenant-ID: healthcheck", "http://localhost:8000/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 60s

View file

@ -0,0 +1,101 @@
version: '3.8'
networks:
maple-private-prod:
external: true
volumes:
cassandra-1-data:
cassandra-2-data:
cassandra-3-data:
services:
cassandra-1:
image: cassandra:5.0.4
hostname: cassandra-1
networks:
- maple-private-prod
environment:
- CASSANDRA_CLUSTER_NAME=maple-prod-cluster
- CASSANDRA_DC=datacenter1
- CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch
- CASSANDRA_SEEDS=cassandra-1,cassandra-2,cassandra-3
- MAX_HEAP_SIZE=512M
- HEAP_NEWSIZE=128M
volumes:
- cassandra-1-data:/var/lib/cassandra
deploy:
replicas: 1
placement:
constraints:
- node.labels.cassandra == node1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
healthcheck:
test: ["CMD-SHELL", "cqlsh -e 'describe cluster' || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
cassandra-2:
image: cassandra:5.0.4
hostname: cassandra-2
networks:
- maple-private-prod
environment:
- CASSANDRA_CLUSTER_NAME=maple-prod-cluster
- CASSANDRA_DC=datacenter1
- CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch
- CASSANDRA_SEEDS=cassandra-1,cassandra-2,cassandra-3
- MAX_HEAP_SIZE=512M
- HEAP_NEWSIZE=128M
volumes:
- cassandra-2-data:/var/lib/cassandra
deploy:
replicas: 1
placement:
constraints:
- node.labels.cassandra == node2
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
healthcheck:
test: ["CMD-SHELL", "cqlsh -e 'describe cluster' || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
cassandra-3:
image: cassandra:5.0.4
hostname: cassandra-3
networks:
- maple-private-prod
environment:
- CASSANDRA_CLUSTER_NAME=maple-prod-cluster
- CASSANDRA_DC=datacenter1
- CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch
- CASSANDRA_SEEDS=cassandra-1,cassandra-2,cassandra-3
- MAX_HEAP_SIZE=512M
- HEAP_NEWSIZE=128M
volumes:
- cassandra-3-data:/var/lib/cassandra
deploy:
replicas: 1
placement:
constraints:
- node.labels.cassandra == node3
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
healthcheck:
test: ["CMD-SHELL", "cqlsh -e 'describe cluster' || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s

View file

@ -0,0 +1,114 @@
#!/bin/bash
#
# Cassandra Cluster Sequential Deployment Script
# This script deploys Cassandra nodes sequentially to avoid race conditions
# during cluster formation.
#
set -e
STACK_NAME="cassandra"
STACK_FILE="cassandra-stack.yml"
echo "=== Cassandra Cluster Sequential Deployment ==="
echo ""
# Check if stack file exists
if [ ! -f "$STACK_FILE" ]; then
echo "ERROR: $STACK_FILE not found in current directory"
exit 1
fi
echo "Step 1: Deploying cassandra-1 (seed node)..."
docker stack deploy -c "$STACK_FILE" "$STACK_NAME"
# Scale down cassandra-2 and cassandra-3 temporarily
docker service scale "${STACK_NAME}_cassandra-2=0" > /dev/null 2>&1
docker service scale "${STACK_NAME}_cassandra-3=0" > /dev/null 2>&1
echo "Waiting for cassandra-1 to become healthy (this takes ~5-8 minutes)..."
echo "Checking every 30 seconds..."
# Wait for cassandra-1 to be running
COUNTER=0
MAX_WAIT=20 # 20 * 30 seconds = 10 minutes max
while [ $COUNTER -lt $MAX_WAIT ]; do
REPLICAS=$(docker service ls --filter "name=${STACK_NAME}_cassandra-1" --format "{{.Replicas}}")
if [ "$REPLICAS" = "1/1" ]; then
echo "✓ cassandra-1 is running"
# Give it extra time to fully initialize
echo "Waiting additional 2 minutes for cassandra-1 to fully initialize..."
sleep 120
break
fi
echo " cassandra-1 status: $REPLICAS (waiting...)"
sleep 30
COUNTER=$((COUNTER + 1))
done
if [ $COUNTER -eq $MAX_WAIT ]; then
echo "ERROR: cassandra-1 failed to start within 10 minutes"
echo "Check logs with: docker service logs ${STACK_NAME}_cassandra-1"
exit 1
fi
echo ""
echo "Step 2: Starting cassandra-2..."
docker service scale "${STACK_NAME}_cassandra-2=1"
echo "Waiting for cassandra-2 to become healthy (this takes ~5-8 minutes)..."
COUNTER=0
while [ $COUNTER -lt $MAX_WAIT ]; do
REPLICAS=$(docker service ls --filter "name=${STACK_NAME}_cassandra-2" --format "{{.Replicas}}")
if [ "$REPLICAS" = "1/1" ]; then
echo "✓ cassandra-2 is running"
echo "Waiting additional 2 minutes for cassandra-2 to join cluster..."
sleep 120
break
fi
echo " cassandra-2 status: $REPLICAS (waiting...)"
sleep 30
COUNTER=$((COUNTER + 1))
done
if [ $COUNTER -eq $MAX_WAIT ]; then
echo "ERROR: cassandra-2 failed to start within 10 minutes"
echo "Check logs with: docker service logs ${STACK_NAME}_cassandra-2"
exit 1
fi
echo ""
echo "Step 3: Starting cassandra-3..."
docker service scale "${STACK_NAME}_cassandra-3=1"
echo "Waiting for cassandra-3 to become healthy (this takes ~5-8 minutes)..."
COUNTER=0
while [ $COUNTER -lt $MAX_WAIT ]; do
REPLICAS=$(docker service ls --filter "name=${STACK_NAME}_cassandra-3" --format "{{.Replicas}}")
if [ "$REPLICAS" = "1/1" ]; then
echo "✓ cassandra-3 is running"
echo "Waiting additional 2 minutes for cassandra-3 to join cluster..."
sleep 120
break
fi
echo " cassandra-3 status: $REPLICAS (waiting...)"
sleep 30
COUNTER=$((COUNTER + 1))
done
if [ $COUNTER -eq $MAX_WAIT ]; then
echo "ERROR: cassandra-3 failed to start within 10 minutes"
echo "Check logs with: docker service logs ${STACK_NAME}_cassandra-3"
exit 1
fi
echo ""
echo "=== Deployment Complete ==="
echo ""
echo "All 3 Cassandra nodes should now be running and forming a cluster."
echo ""
echo "Verify cluster status by SSH'ing to any worker node and running:"
echo " docker exec -it \$(docker ps -q --filter \"name=cassandra\") nodetool status"
echo ""
echo "You should see 3 nodes with status 'UN' (Up Normal)."
echo ""

View file

@ -0,0 +1,56 @@
version: '3.8'
networks:
maple-private-prod:
external: true
volumes:
meilisearch-data:
secrets:
meilisearch_master_key:
external: true
services:
meilisearch:
image: getmeili/meilisearch:v1.5
hostname: meilisearch
networks:
- maple-private-prod
volumes:
- meilisearch-data:/meili_data
secrets:
- meilisearch_master_key
entrypoint: ["/bin/sh", "-c"]
command:
- |
export MEILI_MASTER_KEY=$$(cat /run/secrets/meilisearch_master_key)
exec meilisearch
environment:
- MEILI_ENV=production
- MEILI_NO_ANALYTICS=true
- MEILI_DB_PATH=/meili_data
- MEILI_HTTP_ADDR=0.0.0.0:7700
- MEILI_LOG_LEVEL=INFO
- MEILI_MAX_INDEXING_MEMORY=512mb
- MEILI_MAX_INDEXING_THREADS=2
deploy:
replicas: 1
placement:
constraints:
- node.labels.meilisearch == true
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
memory: 1G
reservations:
memory: 768M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7700/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s

View file

@ -0,0 +1,71 @@
version: '3.8'
networks:
maple-public-prod:
external: true
volumes:
nginx-ssl-certs:
nginx-ssl-www:
services:
nginx:
image: nginx:alpine
hostname: nginx
networks:
- maple-public-prod
ports:
- "80:80"
- "443:443"
volumes:
- nginx-ssl-certs:/etc/letsencrypt
- nginx-ssl-www:/var/www/certbot
- /var/run/docker.sock:/tmp/docker.sock:ro # For nginx-proxy
configs:
- source: nginx_config
target: /etc/nginx/nginx.conf
- source: nginx_site_config
target: /etc/nginx/conf.d/default.conf
deploy:
replicas: 1
placement:
constraints:
- node.labels.backend == true # Same node as backend
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
resources:
limits:
memory: 256M
cpus: '0.5'
reservations:
memory: 128M
cpus: '0.25'
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 10s
certbot:
image: certbot/certbot:latest
hostname: certbot
volumes:
- nginx-ssl-certs:/etc/letsencrypt
- nginx-ssl-www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
deploy:
replicas: 1
placement:
constraints:
- node.labels.backend == true
restart_policy:
condition: on-failure
configs:
nginx_config:
file: ./nginx.conf
nginx_site_config:
file: ./site.conf

View file

@ -0,0 +1,55 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 2048;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Logging
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'rt=$request_time uct="$upstream_connect_time" '
'uht="$upstream_header_time" urt="$upstream_response_time"';
access_log /var/log/nginx/access.log main;
# Performance
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 100M;
# Gzip
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml text/javascript
application/json application/javascript application/xml+rss
application/rss+xml application/atom+xml image/svg+xml
text/x-component application/x-font-ttf font/opentype;
# Security headers (default)
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Rate limiting zones
limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=api:10m rate=100r/s;
limit_req_status 429;
# Include site configurations
include /etc/nginx/conf.d/*.conf;
}

View file

@ -0,0 +1,73 @@
version: '3.8'
networks:
maple-private-prod:
external: true
volumes:
redis-data:
secrets:
redis_password:
external: true
services:
redis:
image: redis:7-alpine
hostname: redis
networks:
- maple-private-prod
volumes:
- redis-data:/data
secrets:
- redis_password
# Command with password from secret
command: >
sh -c '
redis-server
--requirepass "$$(cat /run/secrets/redis_password)"
--bind 0.0.0.0
--port 6379
--protected-mode no
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
--appendfilename "appendonly.aof"
--appendfsync everysec
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--loglevel notice
--databases 16
--timeout 300
--tcp-keepalive 300
--io-threads 2
--io-threads-do-reads yes
--slowlog-log-slower-than 10000
--slowlog-max-len 128
--activerehashing yes
--maxclients 10000
--rename-command FLUSHDB ""
--rename-command FLUSHALL ""
--rename-command CONFIG ""
'
deploy:
replicas: 1
placement:
constraints:
- node.labels.redis == true
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
resources:
limits:
memory: 768M
reservations:
memory: 512M
healthcheck:
test: ["CMD", "sh", "-c", "redis-cli -a $$(cat /run/secrets/redis_password) ping | grep PONG"]
interval: 10s
timeout: 3s
retries: 3
start_period: 10s

View file

@ -0,0 +1,161 @@
# Maple Infrastructure - Redis Production Configuration
# This file is used by the Redis Docker container
# ==============================================================================
# NETWORK
# ==============================================================================
# Bind to all interfaces (Docker networking handles access control)
bind 0.0.0.0
# Default Redis port
port 6379
# Protected mode disabled (we rely on Docker network isolation)
# Only containers on maple-prod overlay network can access
protected-mode no
# ==============================================================================
# PERSISTENCE
# ==============================================================================
# RDB Snapshots (background saves)
# Save if at least 1 key changed in 900 seconds (15 min)
save 900 1
# Save if at least 10 keys changed in 300 seconds (5 min)
save 300 10
# Save if at least 10000 keys changed in 60 seconds (1 min)
save 60 10000
# Stop writes if RDB snapshot fails (data safety)
stop-writes-on-bgsave-error yes
# Compress RDB files
rdbcompression yes
# Checksum RDB files
rdbchecksum yes
# RDB filename
dbfilename dump.rdb
# Working directory for RDB and AOF files
dir /data
# ==============================================================================
# APPEND-ONLY FILE (AOF) - Additional Durability
# ==============================================================================
# Enable AOF for better durability
appendonly yes
# AOF filename
appendfilename "appendonly.aof"
# Sync strategy: fsync every second (good balance)
# Options: always, everysec, no
appendfsync everysec
# Don't fsync during rewrite (prevents blocking)
no-appendfsync-on-rewrite no
# Auto-rewrite AOF when it grows 100% larger
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# ==============================================================================
# MEMORY MANAGEMENT
# ==============================================================================
# Maximum memory (adjust based on your droplet RAM)
# For 2GB droplet with Redis only: 1.5GB safe limit
# For 2GB droplet with other services: 512MB-1GB
maxmemory 512mb
# Eviction policy when maxmemory reached
# allkeys-lru: Evict least recently used keys (good for cache)
# volatile-lru: Only evict keys with TTL set
# noeviction: Return errors when memory limit reached
maxmemory-policy allkeys-lru
# LRU/LFU algorithm precision (higher = more accurate, more CPU)
maxmemory-samples 5
# ==============================================================================
# SECURITY
# ==============================================================================
# Require password for all operations
# IMPORTANT: This is loaded from Docker secret in production
# requirepass will be set via command line argument
# Disable dangerous commands in production
rename-command FLUSHDB ""
rename-command FLUSHALL ""
rename-command CONFIG ""
# ==============================================================================
# LOGGING
# ==============================================================================
# Log level: debug, verbose, notice, warning
loglevel notice
# Log to stdout (Docker captures logs)
logfile ""
# ==============================================================================
# DATABASES
# ==============================================================================
# Number of databases (default 16)
databases 16
# ==============================================================================
# PERFORMANCE TUNING
# ==============================================================================
# Timeout for idle client connections (0 = disabled)
timeout 300
# TCP keepalive
tcp-keepalive 300
# Number of I/O threads (use for high load)
# 0 = auto-detect, 1 = single-threaded
io-threads 2
io-threads-do-reads yes
# ==============================================================================
# SLOW LOG
# ==============================================================================
# Log queries slower than 10ms
slowlog-log-slower-than 10000
# Keep last 128 slow queries
slowlog-max-len 128
# ==============================================================================
# ADVANCED
# ==============================================================================
# Enable active rehashing
activerehashing yes
# Client output buffer limits
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Max number of clients
maxclients 10000
# ==============================================================================
# NOTES
# ==============================================================================
# This configuration is optimized for:
# - Production caching workload
# - 2GB RAM droplet
# - Single Redis instance (not clustered)
# - AOF + RDB persistence
# - Docker Swarm networking
#
# Monitoring commands:
# - INFO: Get server stats
# - SLOWLOG GET: View slow queries
# - MEMORY STATS: Memory usage breakdown
# - CLIENT LIST: Connected clients
# ==============================================================================

View file

@ -0,0 +1,108 @@
# Upstream backend service
upstream backend {
server backend:8000;
keepalive 32;
}
# HTTP server - redirect to HTTPS
server {
listen 80;
listen [::]:80;
server_name getmaplepress.ca www.getmaplepress.ca;
# Let's Encrypt challenge location
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
# Health check endpoint (for load balancer)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# Redirect all other HTTP traffic to HTTPS
location / {
return 301 https://$host$request_uri;
}
}
# HTTPS server
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name getmaplepress.ca www.getmaplepress.ca;
# SSL certificates (Let's Encrypt)
ssl_certificate /etc/letsencrypt/live/getmaplepress.ca/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/getmaplepress.ca/privkey.pem;
# SSL configuration (Mozilla Intermediate)
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_stapling on;
ssl_stapling_verify on;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Logging
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
# Proxy settings
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Buffer settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
proxy_busy_buffers_size 8k;
# API endpoints (rate limited)
location /api/ {
limit_req zone=api burst=20 nodelay;
proxy_pass http://backend;
}
# All other requests
location / {
limit_req zone=general burst=5 nodelay;
proxy_pass http://backend;
}
# Health check (internal)
location /health {
access_log off;
proxy_pass http://backend/health;
}
# Metrics endpoint (if exposed)
location /metrics {
access_log off;
deny all; # Only allow from monitoring systems
# allow 10.116.0.0/16; # Uncomment to allow from VPC
proxy_pass http://backend/metrics;
}
}