Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,17 @@
# OS specific artificats
.DS_Store
# Environment variables
.env
# Private developer documentation
_md/*
# Dveloper's private notebook
private.txt
private_prod.md
private.md
private_*.md
todo.txt
private_docs
private_docs/*

View file

@ -0,0 +1,140 @@
# Application
APP_ENVIRONMENT=development
APP_VERSION=0.1.0
APP_DATA_DIRECTORY=./data
# Server
SERVER_HOST=0.0.0.0
SERVER_PORT=8000
SERVER_READ_TIMEOUT=30s
SERVER_WRITE_TIMEOUT=30s
SERVER_IDLE_TIMEOUT=60s
SERVER_SHUTDOWN_TIMEOUT=10s
# ============================================================================
# Cassandra Database Configuration
# ============================================================================
# Default: Docker development (task dev)
# For running OUTSIDE Docker (./maplefile-backend daemon):
# Change to: DATABASE_HOSTS=localhost:9042
# Note: Uses shared infrastructure at monorepo/cloud/infrastructure/development
# The shared dev cluster has 3 nodes: cassandra-1, cassandra-2, cassandra-3
DATABASE_HOSTS=cassandra-1,cassandra-2,cassandra-3
DATABASE_KEYSPACE=maplefile
DATABASE_CONSISTENCY=QUORUM
DATABASE_USERNAME=
DATABASE_PASSWORD=
DATABASE_MIGRATIONS_PATH=./migrations
DATABASE_AUTO_MIGRATE=true
DATABASE_CONNECT_TIMEOUT=10s
DATABASE_REQUEST_TIMEOUT=5s
DATABASE_REPLICATION=3
DATABASE_MAX_RETRIES=3
DATABASE_RETRY_DELAY=1s
# ============================================================================
# Redis Cache Configuration
# ============================================================================
# Default: Docker development (task dev)
# For running OUTSIDE Docker (./maplefile-backend daemon):
# Change to: CACHE_HOST=localhost
# Note: Uses shared infrastructure at monorepo/cloud/infrastructure/development
CACHE_HOST=redis
CACHE_PORT=6379
CACHE_PASSWORD=
CACHE_DB=0
# ============================================================================
# S3 Object Storage Configuration (SeaweedFS)
# ============================================================================
# Default: Docker development (task dev) with SeaweedFS
# For running OUTSIDE Docker with SeaweedFS:
# Change to: S3_ENDPOINT=http://localhost:8333
# For AWS S3:
# S3_ENDPOINT can be left empty or set to https://s3.amazonaws.com
# For S3-compatible services (DigitalOcean Spaces, MinIO, etc.):
# S3_ENDPOINT should be the service endpoint
# Note: Uses shared infrastructure at monorepo/cloud/infrastructure/development
# SeaweedFS development settings (accepts any credentials):
# Using nginx-s3-proxy on port 8334 for CORS-enabled access from frontend
S3_ENDPOINT=http://seaweedfs:8333
S3_PUBLIC_ENDPOINT=http://localhost:8334
S3_ACCESS_KEY=any
S3_SECRET_KEY=any
S3_BUCKET=maplefile
S3_REGION=us-east-1
S3_USE_SSL=false
# S3_USE_PATH_STYLE: true for SeaweedFS/MinIO (dev), false for DigitalOcean Spaces/AWS S3 (prod)
S3_USE_PATH_STYLE=true
# JWT Authentication
JWT_SECRET=change-me-in-production
JWT_ACCESS_TOKEN_DURATION=15m
# JWT_REFRESH_TOKEN_DURATION: Default 168h (7 days). For enhanced security, consider 24h-48h.
# Shorter durations require more frequent re-authentication but limit token exposure window.
JWT_REFRESH_TOKEN_DURATION=168h
JWT_SESSION_DURATION=24h
JWT_SESSION_CLEANUP_INTERVAL=1h
# Email (Mailgun)
MAILGUN_API_KEY=
MAILGUN_DOMAIN=
MAILGUN_API_BASE=https://api.mailgun.net/v3
MAILGUN_FROM_EMAIL=noreply@maplefile.app
MAILGUN_FROM_NAME=MapleFile
MAILGUN_FRONTEND_URL=http://localhost:3000
MAILGUN_MAINTENANCE_EMAIL=your@email_address.com
MAILGUN_FRONTEND_DOMAIN=127.0.0.1:3000
MAILGUN_BACKEND_DOMAIN=127.0.0.1:8000
# Observability
OBSERVABILITY_ENABLED=true
OBSERVABILITY_PORT=9090
OBSERVABILITY_HEALTH_TIMEOUT=5s
OBSERVABILITY_METRICS_ENABLED=true
OBSERVABILITY_HEALTH_ENABLED=true
OBSERVABILITY_DETAILED_HEALTH=false
# Logging
LOG_LEVEL=info
LOG_FORMAT=json
LOG_STACKTRACE=false
LOG_CALLER=true
# Security
SECURITY_GEOLITE_DB_PATH=./data/GeoLite2-Country.mmdb
SECURITY_BANNED_COUNTRIES=
SECURITY_RATE_LIMIT_ENABLED=true
SECURITY_IP_BLOCK_ENABLED=true
# ============================================================================
# Leader Election Configuration
# ============================================================================
# Enable leader election for multi-instance deployments (load balancer)
# When enabled, only ONE instance becomes the leader and executes scheduled tasks
# Uses Redis for distributed coordination (no additional infrastructure needed)
LEADER_ELECTION_ENABLED=true
LEADER_ELECTION_LOCK_TTL=10s
LEADER_ELECTION_HEARTBEAT_INTERVAL=3s
LEADER_ELECTION_RETRY_INTERVAL=2s
# ============================================================================
# Invite Email Configuration
# ============================================================================
# Maximum invitation emails a user can send per day to non-registered users
# Conservative limit to protect email domain reputation
MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY=3
# ============================================================================
# Login Rate Limiting Configuration
# ============================================================================
# Controls brute-force protection for login attempts
# IP-based: Limits total login attempts from a single IP address
# Account-based: Limits failed attempts per account before lockout
#
# Development: More lenient limits (50 attempts per IP)
# Production: Consider stricter limits (10-20 attempts per IP)
LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP=50
LOGIN_RATE_LIMIT_IP_WINDOW=15m
LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT=10
LOGIN_RATE_LIMIT_LOCKOUT_DURATION=30m

241
cloud/maplefile-backend/.gitignore vendored Normal file
View file

@ -0,0 +1,241 @@
#—————————
# OSX
#—————————
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
#—————————
# WINDOWS
#—————————
# Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
#—————————
# LINUX
#—————————
# KDE directory preferences
.directory
.idea # PyCharm
*/.idea/
#—————————
# Python
#—————————
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
#—————————————————————————————
# Python VirtualEnv Directory
#—————————————————————————————
# Important Note: Make sure this is the name of the virtualenv directory
# that you set when you where setting up the project.
env/
env/*
env
.env
*.cfg
env/pip-selfcheck.json
*.csv#
.env.production
.env.prod
.env.qa
#—————————
# GOLANG
#—————————
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
#—————————————————————————————
# Application Specific Ignores
#—————————————————————————————
# Do not share production data used to populate the project's database.
data
badgerdb_data
# Do not share developer's private notebook
private.txt
private_prod.md
private.md
private_*.md
todo.txt
private_docs
private_docs/*
# Do not share some templates
static/Pedigree.pdf
# Executable
bin/
maplefile-backend
# Do not store the keystore
static/keystore
# Do not share our GeoLite database.
GeoLite2-Country.mmdb
# Do not save the `crev` text output
crev-project.txt
# Blacklist - Don't share items we banned from the server.
static/blacklist/ips.json
static/blacklist/urls.json
internal/static/blacklist/ips.json
internal/static/blacklist/urls.json
static/cassandra-jdbc-wrapper-*
# Do not save our temporary files.
tmp
# Temporary - don't save one module yet.
internal/ipe.zip
internal/papercloud.zip
# Do not share private developer documentation
_md/*

View file

@ -0,0 +1,104 @@
# Multi-stage build for MapleFile Backend
# Stage 1: Build the Go binary
FROM golang:1.25.4-alpine AS builder
# Install build dependencies
RUN apk add --no-cache git ca-certificates tzdata
# Set working directory
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
# Download dependencies
RUN go mod download
# Copy source code
COPY . .
# Build arguments for version tracking
ARG GIT_COMMIT=unknown
ARG BUILD_TIME=unknown
# Build the binary with optimizations
# CGO_ENABLED=0 for static binary
# -ldflags flags: -s (strip debug info) -w (strip DWARF)
# Embed git commit and build time for version tracking
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
-ldflags="-s -w -X main.Version=0.1.0 -X main.GitCommit=${GIT_COMMIT} -X main.BuildTime=${BUILD_TIME}" \
-o maplefile-backend \
.
# Verify the binary works
RUN ./maplefile-backend version
# Stage 2: Create minimal runtime image
FROM alpine:latest
# Install runtime dependencies and debugging tools
RUN apk --no-cache add \
ca-certificates \
tzdata \
curl \
wget \
bash \
bind-tools \
iputils \
netcat-openbsd \
busybox-extras \
strace \
procps \
htop \
nano \
vim
# DEVELOPERS NOTE:
# Network Debugging:
# - bind-tools - DNS utilities (dig, nslookup, host) - Critical for your current issue!
# - iputils - Network utilities (ping, traceroute)
# - netcat-openbsd - TCP/UDP connection testing (nc command)
# - busybox-extras - Additional networking tools (telnet, etc.)
#
# Process Debugging:
# - strace - System call tracer (debug what the app is doing)
# - procps - Process utilities (ps, top, etc.)
# - htop - Interactive process viewer
#
# Shell & Editing:
# - bash - Full bash shell (better than ash)
# - nano - Simple text editor
# - vim - Advanced text editor
# File Transfer:
# - wget - Download files (alternative to curl)
# Create non-root user
RUN addgroup -g 1000 maplefile && \
adduser -D -u 1000 -G maplefile maplefile
# Set working directory
WORKDIR /app
# Copy binary from builder
COPY --from=builder /app/maplefile-backend .
# Copy migrations
COPY --from=builder /app/migrations ./migrations
# Create data directory
RUN mkdir -p /app/data && \
chown -R maplefile:maplefile /app
# Switch to non-root user
USER maplefile
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# Default command
CMD ["./maplefile-backend", "daemon"]

View file

@ -0,0 +1,496 @@
# 🚀 MapleFile Backend
> Secure, end-to-end encrypted file storage backend - Zero-knowledge architecture built with Go.
MapleFile provides military-grade file encryption with client-side E2EE (End-to-End Encryption). Features include collection-based organization, granular sharing permissions, JWT authentication, and S3-compatible object storage. Your files are encrypted on your device before reaching our servers - we never see your data.
## 📋 Prerequisites
**⚠️ Required:** You must have the infrastructure running first.
If you haven't set up the infrastructure yet:
1. Go to [`../infrastructure/README.md`](../infrastructure/README.md)
2. Follow the setup instructions
3. Come back here once infrastructure is running
**Verify infrastructure is healthy:**
```bash
cd cloud/infrastructure/development
task dev:status
# All services should show (healthy)
```
## 🏁 Getting Started
### Installation
```bash
# From the monorepo root:
cd cloud/maplefile-backend
# Create environment file:
cp .env.sample .env
# Start the backend:
task dev
```
The backend runs at **http://localhost:8000**
### Verify Installation
Open a **new terminal** (leave `task dev` running):
```bash
curl http://localhost:8000/health
# Should return: {"status":"healthy","service":"maplefile-backend","di":"Wire"}
```
> **Note:** Your first terminal shows backend logs. Keep it running and use a second terminal for testing.
## 💻 Developing
### Initial Configuration
**Environment Files:**
- **`.env.sample`** - Template with defaults (committed to git)
- **`.env`** - Your local configuration (git-ignored, created from `.env.sample`)
- Use **only `.env`** for configuration (docker-compose loads this file)
The `.env` file defaults work for Docker development. **Optional:** Change `BACKEND_APP_JWT_SECRET` to a random string (use a password generator).
### Running in Development Mode
```bash
# Start backend with hot-reload
task dev
# View logs (in another terminal)
docker logs -f maplefile-backend-dev
# Stop backend
task dev:down
# Or press Ctrl+C in the task dev terminal
```
**What happens when you run `task dev`:**
- Docker starts the backend container
- Auto-migrates database tables
- Starts HTTP server on port 8000
- Enables hot-reload (auto-restarts on code changes)
Wait for: `✅ Database migrations completed successfully` in the logs
### Daily Workflow
```bash
# Morning - check infrastructure (from monorepo root)
cd cloud/infrastructure/development && task dev:status
# Start backend (from monorepo root)
cd cloud/maplefile-backend && task dev
# Make code changes - backend auto-restarts
# Stop backend when done
# Press Ctrl+C
```
### Testing
```bash
# Run all tests
task test
# Code quality checks
task format # Format code
task lint # Run linters
```
### Database Operations
**View database:**
```bash
# From monorepo root
cd cloud/infrastructure/development
task cql
# Inside cqlsh:
USE maplefile;
DESCRIBE TABLES;
SELECT * FROM users_by_id;
```
**Reset database (⚠️ deletes all data):**
```bash
task db:clear
```
## 🔧 Usage
### Testing the API
Create a test user to verify the backend works:
**1. Register a user:**
```bash
curl -X POST http://localhost:8000/api/v1/auth/register \
-H "Content-Type: application/json" \
-d '{
"email": "test@example.com",
"first_name": "Test",
"last_name": "User",
"phone": "+1234567890",
"country": "Canada",
"timezone": "America/Toronto",
"salt": "base64-encoded-salt",
"kdf_algorithm": "argon2id",
"kdf_iterations": 3,
"kdf_memory": 65536,
"kdf_parallelism": 4,
"kdf_salt_length": 16,
"kdf_key_length": 32,
"encryptedMasterKey": "base64-encoded-encrypted-master-key",
"publicKey": "base64-encoded-public-key",
"encryptedPrivateKey": "base64-encoded-encrypted-private-key",
"encryptedRecoveryKey": "base64-encoded-encrypted-recovery-key",
"masterKeyEncryptedWithRecoveryKey": "base64-encoded-master-key-encrypted-with-recovery",
"agree_terms_of_service": true,
"agree_promotions": false,
"agree_to_tracking_across_third_party_apps_and_services": false
}'
```
> **Note:** MapleFile uses end-to-end encryption. The frontend (maplefile-frontend) handles all cryptographic operations. For manual API testing, you'll need to generate valid encryption keys using libsodium. See the frontend registration implementation for reference.
**Response:**
```json
{
"message": "Registration successful. Please check your email to verify your account.",
"user_id": "uuid-here"
}
```
**2. Verify email:**
Check your email for the verification code, then:
```bash
curl -X POST http://localhost:8000/api/v1/auth/verify-email \
-H "Content-Type: application/json" \
-d '{
"email": "test@example.com",
"verification_code": "123456"
}'
```
**3. Login:**
```bash
curl -X POST http://localhost:8000/api/v1/auth/login \
-H "Content-Type: application/json" \
-d '{
"email": "test@example.com"
}'
```
Check your email for the OTP (One-Time Password), then complete login:
```bash
curl -X POST http://localhost:8000/api/v1/auth/login/verify-otp \
-H "Content-Type: application/json" \
-d '{
"email": "test@example.com",
"otp": "your-otp-code",
"encrypted_challenge": "base64-encoded-challenge-response"
}'
```
**Response:**
```json
{
"access_token": "eyJhbGci...",
"refresh_token": "eyJhbGci...",
"access_expiry": "2025-11-12T13:00:00Z",
"refresh_expiry": "2025-11-19T12:00:00Z"
}
```
Save the `access_token` from the response:
```bash
export TOKEN="eyJhbGci...your-access-token-here"
```
**4. Get your profile:**
```bash
curl http://localhost:8000/api/v1/me \
-H "Authorization: JWT $TOKEN"
```
**5. Get dashboard:**
```bash
curl http://localhost:8000/api/v1/dashboard \
-H "Authorization: JWT $TOKEN"
```
**6. Create a collection (folder):**
```bash
curl -X POST http://localhost:8000/api/v1/collections \
-H "Content-Type: application/json" \
-H "Authorization: JWT $TOKEN" \
-d '{
"name": "My Documents",
"description": "Personal documents",
"collection_type": "folder",
"encrypted_collection_key": "base64-encoded-encrypted-key"
}'
```
**7. Upload a file:**
```bash
# First, get a presigned URL
curl -X POST http://localhost:8000/api/v1/files/presigned-url \
-H "Content-Type: application/json" \
-H "Authorization: JWT $TOKEN" \
-d '{
"file_name": "document.pdf",
"file_size": 1024000,
"mime_type": "application/pdf",
"collection_id": "your-collection-id"
}'
# Upload the encrypted file to the presigned URL (using the URL from response)
curl -X PUT "presigned-url-here" \
--upload-file your-encrypted-file.enc
# Report upload completion
curl -X POST http://localhost:8000/api/v1/files/upload-complete \
-H "Content-Type: application/json" \
-H "Authorization: JWT $TOKEN" \
-d '{
"file_id": "file-id-from-presigned-response",
"status": "completed"
}'
```
### Frontend Integration
**Access the frontend:**
- URL: http://localhost:5173
- The frontend handles all encryption/decryption automatically
- See [`../../web/maplefile-frontend/README.md`](../../web/maplefile-frontend/README.md)
**Key Features:**
- 🔐 **Client-side encryption** - Files encrypted before upload
- 🔑 **E2EE Key Chain** - Password → KEK → Master Key → Collection Keys → File Keys
- 📁 **Collections** - Organize files in encrypted folders
- 🤝 **Sharing** - Share collections with read-only, read-write, or admin permissions
- 🔄 **Sync modes** - Cloud-only, local-only, or hybrid storage
**Next steps:**
- Frontend setup: [`../../web/maplefile-frontend/README.md`](../../web/maplefile-frontend/README.md)
- Complete API documentation: See API endpoints in code
## ⚙️ Configuration
### Environment Variables
Key variables in `.env`:
| Variable | Default | Description |
|----------|---------|-------------|
| `BACKEND_APP_JWT_SECRET` | `change-me-in-production` | Secret for JWT token signing |
| `BACKEND_APP_SERVER_PORT` | `8000` | HTTP server port |
| `BACKEND_DB_HOSTS` | `cassandra-1,cassandra-2,cassandra-3` | Cassandra cluster nodes |
| `BACKEND_CACHE_HOST` | `redis` | Redis cache host |
| `BACKEND_MAPLEFILE_S3_ENDPOINT` | `http://seaweedfs:8333` | S3 storage URL |
| `BACKEND_MAPLEFILE_S3_BUCKET` | `maplefile` | S3 bucket name |
**Docker vs Local:**
- Docker: Uses container names (`cassandra-1`, `redis`, `seaweedfs`)
- Local: Change to `localhost`
See `.env.sample` for complete documentation.
### Task Commands
| Command | Description |
|---------|-------------|
| `task dev` | Start backend (auto-migrate + hot-reload) |
| `task dev:down` | Stop backend |
| `task test` | Run tests |
| `task format` | Format code |
| `task lint` | Run linters |
| `task db:clear` | Reset database (⚠️ deletes data) |
| `task migrate:up` | Manual migration |
| `task build` | Build binary |
## 🔍 Troubleshooting
### Backend won't start - "connection refused"
**Error:** `dial tcp 127.0.0.1:9042: connect: connection refused`
**Cause:** `.env` file has `localhost` instead of container names.
**Fix:**
```bash
cd cloud/maplefile-backend
rm .env
cp .env.sample .env
task dev
```
### Infrastructure not running
**Error:** Cassandra or Redis not available
**Fix:**
```bash
cd cloud/infrastructure/development
task dev:start
task dev:status # Wait until all show (healthy)
```
### Port 8000 already in use
**Fix:**
```bash
lsof -i :8000 # Find what's using the port
# Stop the other service, or change BACKEND_APP_SERVER_PORT in .env
```
### Token expired (401 errors)
JWT tokens expire after 60 minutes. Re-run the [login steps](#testing-the-api) to get a new token.
### Database keyspace not found
**Error:** `Keyspace 'maplefile' does not exist` or `failed to create user`
**Cause:** The Cassandra keyspace hasn't been created yet. This is a one-time infrastructure setup.
**Fix:**
```bash
# Initialize the keyspace (one-time setup)
cd cloud/infrastructure/development
# Find Cassandra container
export CASSANDRA_CONTAINER=$(docker ps --filter "name=cassandra" -q | head -1)
# Create keyspace
docker exec -it $CASSANDRA_CONTAINER cqlsh -e "
CREATE KEYSPACE IF NOT EXISTS maplefile
WITH replication = {
'class': 'SimpleStrategy',
'replication_factor': 3
};"
# Verify keyspace exists
docker exec -it $CASSANDRA_CONTAINER cqlsh -e "DESCRIBE KEYSPACE maplefile;"
# Restart backend to retry migrations
cd ../../maplefile-backend
task dev:restart
```
**Note:** The backend auto-migrates tables on startup, but expects the keyspace to already exist. This is standard practice - keyspaces are infrastructure setup, not application migrations.
## 🛠️ Technology Stack
- **Go 1.23+** - Programming language
- **Clean Architecture** - Code organization
- **Wire** - Dependency injection (Google's code generation)
- **Cassandra 5.0.4** - Distributed database (3-node cluster)
- **Redis 7** - Caching layer
- **SeaweedFS** - S3-compatible object storage
- **JWT** - User authentication
- **ChaCha20-Poly1305** - Authenticated encryption (client-side)
- **Argon2id** - Password hashing / KDF
## 🌐 Services
When you run MapleFile, these services are available:
| Service | Port | Purpose | Access |
|---------|------|---------|--------|
| MapleFile Backend | 8000 | HTTP API | http://localhost:8000 |
| MapleFile Frontend | 5173 | Web UI | http://localhost:5173 |
| Cassandra | 9042 | Database | `task cql` (from infrastructure dir) |
| Redis | 6379 | Cache | `task redis` (from infrastructure dir) |
| SeaweedFS S3 | 8333 | Object storage | http://localhost:8333 |
| SeaweedFS UI | 9333 | Storage admin | http://localhost:9333 |
## 🏗️ Architecture
### Project Structure
```
maplefile-backend/
├── cmd/ # CLI commands (daemon, migrate, version)
├── config/ # Configuration loading
├── internal/ # Application code
│ ├── app/ # Wire application wiring
│ ├── domain/ # Domain entities
│ │ ├── collection/ # Collections (folders)
│ │ ├── crypto/ # Encryption types
│ │ ├── file/ # File metadata
│ │ ├── user/ # User accounts
│ │ └── ...
│ ├── repo/ # Repository implementations (Cassandra)
│ ├── usecase/ # Use cases / business logic
│ ├── service/ # Service layer
│ └── interface/ # HTTP handlers
│ └── http/ # REST API endpoints
├── pkg/ # Shared infrastructure
│ ├── storage/ # Database, cache, S3, memory
│ ├── security/ # JWT, encryption, password hashing
│ └── emailer/ # Email sending
├── migrations/ # Cassandra schema migrations
└── docs/ # Documentation
```
### Key Features
- **🔐 Zero-Knowledge Architecture**: Files encrypted on client, server never sees plaintext
- **🔑 E2EE Key Chain**: User Password → KEK → Master Key → Collection Keys → File Keys
- **📦 Storage Modes**: `encrypted_only`, `hybrid`, `decrypted_only`
- **🤝 Collection Sharing**: `read_only`, `read_write`, `admin` permissions
- **💾 Two-Tier Caching**: Redis + Cassandra-based cache
- **📊 Storage Quotas**: 10GB default per user
- **🔄 File Versioning**: Soft delete with tombstone tracking
### End-to-End Encryption Flow
```
1. User enters password → Frontend derives KEK (Key Encryption Key)
2. KEK → Encrypts/decrypts Master Key (stored encrypted on server)
3. Master Key → Encrypts/decrypts Collection Keys
4. Collection Key → Encrypts/decrypts File Keys
5. File Key → Encrypts/decrypts actual file content
Server only stores:
- Encrypted Master Key (encrypted with KEK from password)
- Encrypted Collection Keys (encrypted with Master Key)
- Encrypted File Keys (encrypted with Collection Key)
- Encrypted file content (encrypted with File Key)
Server NEVER has access to:
- User's password
- KEK (derived from password on client)
- Decrypted Master Key
- Decrypted Collection Keys
- Decrypted File Keys
- Plaintext file content
```
## 🔗 Links
- **Frontend Application:** [`../../web/maplefile-frontend/README.md`](../../web/maplefile-frontend/README.md)
- **CLI Tool:** [`../../native/desktop/maplefile/README.md`](../../native/desktop/maplefile/README.md)
- **Architecture Details:** [`../../CLAUDE.md`](../../CLAUDE.md)
- **Repository:** [Codeberg - mapleopentech/monorepo](https://codeberg.org/mapleopentech/monorepo)
## 🤝 Contributing
Found a bug? Want a feature to improve MapleFile? Please create an [issue](https://codeberg.org/mapleopentech/monorepo/issues/new).
## 📝 License
This application is licensed under the [**GNU Affero General Public License v3.0**](https://opensource.org/license/agpl-v3). See [LICENSE](../../LICENSE) for more information.

View file

@ -0,0 +1,179 @@
version: "3"
env:
COMPOSE_PROJECT_NAME: maplefile
# Variables for Docker Compose command detection
vars:
DOCKER_COMPOSE_CMD:
sh: |
if command -v docker-compose >/dev/null 2>&1; then
echo "docker-compose"
elif docker compose version >/dev/null 2>&1; then
echo "docker compose"
else
echo "docker-compose"
fi
tasks:
# Development workflow (requires infrastructure)
dev:
desc: Start app in development mode (requires infrastructure running)
deps: [dev:check-infra]
cmds:
- "{{.DOCKER_COMPOSE_CMD}} -f docker-compose.dev.yml up --build"
- echo "Press Ctrl+C to stop"
dev:down:
desc: Stop development app
cmds:
- "{{.DOCKER_COMPOSE_CMD}} -f docker-compose.dev.yml down"
dev:restart:
desc: Quick restart (fast!)
cmds:
- "{{.DOCKER_COMPOSE_CMD}} -f docker-compose.dev.yml restart"
- echo "✅ MapleFile backend restarted"
dev:logs:
desc: View app logs
cmds:
- "{{.DOCKER_COMPOSE_CMD}} -f docker-compose.dev.yml logs -f"
dev:shell:
desc: Open shell in running container
cmds:
- docker exec -it maplefile-backend-dev sh
dev:check-infra:
desc: Verify infrastructure is running
silent: true
cmds:
- |
if ! docker network inspect maple-dev >/dev/null 2>&1; then
echo "❌ Infrastructure not running!"
echo ""
echo "Start it with:"
echo " cd ../infrastructure/development && task dev:start"
echo ""
exit 1
fi
if ! docker ps | grep -q maple-cassandra-1-dev; then
echo "❌ Cassandra not running!"
echo ""
echo "Start it with:"
echo " cd ../infrastructure/development && task dev:start"
echo ""
exit 1
fi
echo "✅ Infrastructure is running"
# Database operations
migrate:up:
desc: Run all migrations up
cmds:
- ./maplefile-backend migrate up
migrate:down:
desc: Run all migrations down
cmds:
- ./maplefile-backend migrate down
migrate:create:
desc: Create new migration (usage task migrate:create -- create_users)
cmds:
- ./maplefile-backend migrate create {{.CLI_ARGS}}
db:clear:
desc: Clear Cassandra database (drop and recreate keyspace)
deps: [build]
cmds:
- echo "⚠️ Dropping keyspace 'maplefile'..."
- docker exec maple-cassandra-1-dev cqlsh -e "DROP KEYSPACE IF EXISTS maplefile;"
- echo "✅ Keyspace dropped"
- echo "🔄 Running migrations to recreate schema..."
- ./maplefile-backend migrate up
- echo "✅ Database cleared and recreated"
db:reset:
desc: Reset database using migrations (down then up)
deps: [build]
cmds:
- echo "🔄 Running migrations down..."
- ./maplefile-backend migrate down
- echo "🔄 Running migrations up..."
- ./maplefile-backend migrate up
- echo "✅ Database reset complete"
# Build and test
build:
desc: Build the Go binary
cmds:
- go build -o maplefile-backend .
test:
desc: Run tests
cmds:
- go test ./... -v
test:short:
desc: Run short tests only
cmds:
- go test ./... -short
lint:
desc: Run linters
cmds:
- go vet ./...
vulncheck:
desc: Check for known vulnerabilities in dependencies
cmds:
- go run golang.org/x/vuln/cmd/govulncheck ./...
nilaway:
desc: Run nilaway static analysis for nil pointer dereferences
cmds:
- go run go.uber.org/nilaway/cmd/nilaway ./...
format:
desc: Format code
cmds:
- go fmt ./...
tidy:
desc: Tidy Go modules
cmds:
- go mod tidy
wire:
desc: Generate dependency injection code using Wire
cmds:
- wire ./app
- echo "✅ Wire dependency injection code generated"
clean:
desc: Clean build artifacts
cmds:
- rm -f maplefile-backend
deploy:
desc: (DevOps only) Command will build the production container of this project and deploy to the private docker container registry.
vars:
GIT_COMMIT:
sh: git rev-parse --short HEAD
GIT_COMMIT_FULL:
sh: git rev-parse HEAD
BUILD_TIME:
sh: date -u '+%Y-%m-%dT%H:%M:%SZ'
cmds:
- echo "Building version {{.GIT_COMMIT}} at {{.BUILD_TIME}}"
- docker build -f Dockerfile --rm
--build-arg GIT_COMMIT={{.GIT_COMMIT_FULL}}
--build-arg BUILD_TIME={{.BUILD_TIME}}
-t registry.digitalocean.com/ssp/maplefile-backend:prod
-t registry.digitalocean.com/ssp/maplefile-backend:{{.GIT_COMMIT}}
--platform linux/amd64 .
- docker push registry.digitalocean.com/ssp/maplefile-backend:prod
- docker push registry.digitalocean.com/ssp/maplefile-backend:{{.GIT_COMMIT}}
- echo "Deployed version {{.GIT_COMMIT}} - use this to verify on production"

View file

@ -0,0 +1,139 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/gocql/gocql"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler/tasks"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/database/cassandradb"
)
// Application represents the main application using Wire DI
type Application struct {
config *config.Config
httpServer *http.WireServer
logger *zap.Logger
migrator *cassandradb.Migrator
scheduler *scheduler.Scheduler
ipAnonymizationTask *tasks.IPAnonymizationTask
dbSession *gocql.Session
}
// ProvideApplication creates the application instance for Wire
func ProvideApplication(
cfg *config.Config,
httpServer *http.WireServer,
logger *zap.Logger,
migrator *cassandradb.Migrator,
sched *scheduler.Scheduler,
ipAnonymizationTask *tasks.IPAnonymizationTask,
dbSession *gocql.Session,
) *Application {
return &Application{
config: cfg,
httpServer: httpServer,
logger: logger,
migrator: migrator,
scheduler: sched,
ipAnonymizationTask: ipAnonymizationTask,
dbSession: dbSession,
}
}
// Start starts the application
func (app *Application) Start() error {
app.logger.Info("🚀 MapleFile Backend Starting (Wire DI)",
zap.String("version", app.config.App.Version),
zap.String("environment", app.config.App.Environment),
zap.String("di_framework", "Google Wire"))
// Run database migrations automatically on startup if enabled
if app.config.Database.AutoMigrate {
app.logger.Info("Auto-migration enabled, running database migrations...")
if err := app.migrator.Up(); err != nil {
app.logger.Error("Failed to run database migrations", zap.Error(err))
return fmt.Errorf("migration failed: %w", err)
}
app.logger.Info("✅ Database migrations completed successfully")
// Wait for schema agreement across all Cassandra nodes
// This ensures all nodes have the new schema before we start accepting requests
app.logger.Info("⏳ Waiting for Cassandra schema agreement...")
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
if err := app.dbSession.AwaitSchemaAgreement(ctx); err != nil {
app.logger.Warn("Schema agreement wait failed, continuing anyway",
zap.Error(err),
zap.String("note", "This may cause transient errors on first requests"))
} else {
app.logger.Info("✅ Cassandra schema agreement reached")
}
} else {
app.logger.Info("Auto-migration disabled (DATABASE_AUTO_MIGRATE=false), skipping migrations")
}
// Register scheduled tasks
app.logger.Info("Registering scheduled tasks...")
if err := app.scheduler.RegisterTask(app.ipAnonymizationTask); err != nil {
app.logger.Error("Failed to register IP anonymization task", zap.Error(err))
return fmt.Errorf("task registration failed: %w", err)
}
// Start scheduler
if err := app.scheduler.Start(); err != nil {
app.logger.Error("Failed to start scheduler", zap.Error(err))
return fmt.Errorf("scheduler startup failed: %w", err)
}
// Start HTTP server in goroutine
errChan := make(chan error, 1)
go func() {
if err := app.httpServer.Start(); err != nil {
errChan <- err
}
}()
// Wait for interrupt signal or server error
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
select {
case err := <-errChan:
app.logger.Error("HTTP server failed", zap.Error(err))
return fmt.Errorf("server startup failed: %w", err)
case sig := <-quit:
app.logger.Info("Received shutdown signal", zap.String("signal", sig.String()))
}
app.logger.Info("👋 MapleFile Backend Shutting Down")
// Graceful shutdown with timeout
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Stop scheduler first
app.logger.Info("Stopping scheduler...")
if err := app.scheduler.Stop(); err != nil {
app.logger.Error("Scheduler shutdown error", zap.Error(err))
// Continue with shutdown even if scheduler fails
}
// Stop HTTP server
if err := app.httpServer.Shutdown(ctx); err != nil {
app.logger.Error("Server shutdown error", zap.Error(err))
return fmt.Errorf("server shutdown failed: %w", err)
}
app.logger.Info("✅ MapleFile Backend Stopped Successfully")
return nil
}

View file

@ -0,0 +1,332 @@
//go:build wireinject
// +build wireinject
package app
import (
"github.com/google/wire"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/collection"
commonhttp "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/common"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/dashboard"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/file"
http_inviteemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/inviteemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/me"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/tag"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler/tasks"
blockedemailrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/blockedemail"
collectionrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/collection"
filemetadatarepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/filemetadata"
fileobjectstoragerepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/fileobjectstorage"
inviteemailratelimitrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/inviteemailratelimit"
storagedailyusagerepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/storagedailyusage"
storageusageeventrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/storageusageevent"
tagrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/tag"
userrepo "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/user"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
svc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
svc_dashboard "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/dashboard"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
svc_inviteemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/inviteemail"
svc_ipanonymization "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/ipanonymization"
svc_me "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me"
svc_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/tag"
svc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/tag"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/distributedmutex"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/leaderelection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/logger"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/ratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/jwt"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/database/cassandradb"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/memory/redis"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/object/s3"
)
// InitializeApplication wires up all dependencies using Google Wire
func InitializeApplication(cfg *config.Configuration) (*Application, error) {
wire.Build(
// Infrastructure layer (pkg/)
logger.ProvideLogger,
auditlog.ProvideAuditLogger,
cassandradb.ProvideCassandraConnection,
cassandradb.NewMigrator,
cassandracache.ProvideCassandraCacher,
redis.ProvideRedisUniversalClient,
s3.ProvideS3ObjectStorageProvider,
jwt.ProvideJWTProvider,
mailgun.ProvideMapleFileModuleEmailer,
distributedmutex.ProvideDistributedMutexAdapter,
leaderelection.ProvideLeaderElection,
ratelimit.ProvideLoginRateLimiter,
ratelimit.ProvideAuthFailureRateLimiter,
middleware.ProvideRateLimitMiddleware,
middleware.ProvideSecurityHeadersMiddleware,
// Repository layer
blockedemailrepo.NewBlockedEmailRepository,
filemetadatarepo.ProvideRepository,
fileobjectstoragerepo.ProvideRepository,
userrepo.ProvideRepository,
collectionrepo.ProvideRepository,
storagedailyusagerepo.ProvideRepository,
storageusageeventrepo.ProvideRepository,
inviteemailratelimitrepo.ProvideRepository,
tagrepo.ProvideTagRepository,
// Use case layer - Collection (10 providers - only used ones)
uc_collection.ProvideGetCollectionUseCase,
uc_collection.ProvideUpdateCollectionUseCase,
uc_collection.ProvideHardDeleteCollectionUseCase,
uc_collection.ProvideCheckCollectionAccessUseCase,
uc_collection.ProvideGetCollectionSyncDataUseCase,
uc_collection.ProvideCountUserFoldersUseCase,
uc_collection.ProvideAnonymizeOldIPsUseCase,
uc_collection.ProvideListCollectionsByUserUseCase,
uc_collection.ProvideRemoveUserFromAllCollectionsUseCase,
uc_collection.ProvideAnonymizeUserReferencesUseCase,
// Use case layer - File Metadata (15 providers - only used ones)
uc_filemetadata.ProvideCreateFileMetadataUseCase,
uc_filemetadata.ProvideGetFileMetadataUseCase,
uc_filemetadata.ProvideGetFileMetadataByCollectionUseCase,
uc_filemetadata.ProvideUpdateFileMetadataUseCase,
uc_filemetadata.ProvideSoftDeleteFileMetadataUseCase,
uc_filemetadata.ProvideHardDeleteFileMetadataUseCase,
uc_filemetadata.ProvideCountUserFilesUseCase,
uc_filemetadata.ProvideGetFileMetadataByOwnerIDUseCase,
uc_filemetadata.ProvideGetFileMetadataByIDsUseCase,
uc_filemetadata.ProvideListFileMetadataSyncDataUseCase,
uc_filemetadata.ProvideDeleteManyFileMetadataUseCase,
uc_filemetadata.ProvideCheckFileExistsUseCase,
uc_filemetadata.ProvideListRecentFilesUseCase,
uc_filemetadata.ProvideAnonymizeOldIPsUseCase,
uc_filemetadata.ProvideAnonymizeUserReferencesUseCase,
// Use case layer - File Object Storage (6 providers - only used ones)
uc_fileobjectstorage.ProvideGeneratePresignedUploadURLUseCase,
uc_fileobjectstorage.ProvideGeneratePresignedDownloadURLUseCase,
uc_fileobjectstorage.ProvideDeleteEncryptedDataUseCase,
uc_fileobjectstorage.ProvideDeleteMultipleEncryptedDataUseCase,
uc_fileobjectstorage.ProvideVerifyObjectExistsUseCase,
uc_fileobjectstorage.ProvideGetObjectSizeUseCase,
// Use case layer - User (10 providers)
uc_user.ProvideUserCreateUseCase,
uc_user.ProvideUserGetByIDUseCase,
uc_user.ProvideUserGetByEmailUseCase,
uc_user.ProvideUserGetByVerificationCodeUseCase,
uc_user.ProvideUserUpdateUseCase,
uc_user.ProvideUserDeleteByIDUseCase,
uc_user.ProvideUserStorageQuotaHelperUseCase,
uc_user.ProvideAnonymizeOldIPsUseCase,
uc_user.ProvideAnonymizeUserIPsImmediatelyUseCase,
uc_user.ProvideClearUserCacheUseCase,
// Use case layer - Blocked Email (4 providers)
uc_blockedemail.NewCreateBlockedEmailUseCase,
uc_blockedemail.NewListBlockedEmailsUseCase,
uc_blockedemail.NewDeleteBlockedEmailUseCase,
uc_blockedemail.NewCheckBlockedEmailUseCase,
// Use case layer - Storage Daily Usage (3 providers - only used ones)
uc_storagedailyusage.ProvideGetStorageDailyUsageTrendUseCase,
uc_storagedailyusage.ProvideUpdateStorageUsageUseCase,
uc_storagedailyusage.ProvideDeleteByUserUseCase,
// Use case layer - Storage Usage Event (2 providers)
uc_storageusageevent.ProvideCreateStorageUsageEventUseCase,
uc_storageusageevent.ProvideDeleteByUserUseCase,
// Use case layer - Tag (11 providers)
uc_tag.ProvideCreateTagUseCase,
uc_tag.ProvideGetTagByIDUseCase,
uc_tag.ProvideListTagsByUserUseCase,
uc_tag.ProvideUpdateTagUseCase,
uc_tag.ProvideDeleteTagUseCase,
uc_tag.ProvideAssignTagUseCase,
uc_tag.ProvideUnassignTagUseCase,
uc_tag.ProvideGetTagsForEntityUseCase,
uc_tag.ProvideListCollectionsByTagUseCase,
uc_tag.ProvideListFilesByTagUseCase,
// NOTE: ProvideCreateDefaultTagsUseCase removed - default tags must be created client-side due to E2EE
// Service layer - Collection (15 providers)
svc_collection.ProvideCreateCollectionService,
svc_collection.ProvideGetCollectionService,
svc_collection.ProvideListUserCollectionsService,
svc_collection.ProvideUpdateCollectionService,
svc_collection.ProvideSoftDeleteCollectionService,
svc_collection.ProvideArchiveCollectionService,
svc_collection.ProvideRestoreCollectionService,
svc_collection.ProvideListSharedCollectionsService,
svc_collection.ProvideFindRootCollectionsService,
svc_collection.ProvideFindCollectionsByParentService,
svc_collection.ProvideGetCollectionSyncDataService,
svc_collection.ProvideMoveCollectionService,
svc_collection.ProvideGetFilteredCollectionsService,
svc_collection.ProvideShareCollectionService,
svc_collection.ProvideRemoveMemberService,
// Service layer - File (14 providers)
svc_file.ProvideCreatePendingFileService,
svc_file.ProvideGetPresignedUploadURLService,
svc_file.ProvideCompleteFileUploadService,
svc_file.ProvideGetFileService,
svc_file.ProvideGetPresignedDownloadURLService,
svc_file.ProvideListFilesByCollectionService,
svc_file.ProvideListRecentFilesService,
svc_file.ProvideUpdateFileService,
svc_file.ProvideSoftDeleteFileService,
svc_file.ProvideArchiveFileService,
svc_file.ProvideRestoreFileService,
svc_file.ProvideDeleteMultipleFilesService,
svc_file.ProvideListFileSyncDataService,
svc_file.ProvideListFilesByOwnerIDService,
// Service layer - Auth (10 providers)
svc_auth.ProvideRegisterService,
svc_auth.ProvideVerifyEmailService,
svc_auth.ProvideResendVerificationService,
svc_auth.ProvideRequestOTTService,
svc_auth.ProvideVerifyOTTService,
svc_auth.ProvideCompleteLoginService,
svc_auth.ProvideRefreshTokenService,
svc_auth.ProvideRecoveryInitiateService,
svc_auth.ProvideRecoveryVerifyService,
svc_auth.ProvideRecoveryCompleteService,
// Service layer - Me (3 providers)
svc_me.ProvideGetMeService,
svc_me.ProvideUpdateMeService,
svc_me.ProvideDeleteMeService,
// Service layer - Dashboard (1 provider)
svc_dashboard.ProvideGetDashboardService,
// Service layer - User (2 providers)
svc_user.ProvideUserPublicLookupService,
svc_user.ProvideCompleteUserDeletionService,
// Service layer - Blocked Email (3 providers)
svc_blockedemail.ProvideCreateBlockedEmailService,
svc_blockedemail.ProvideListBlockedEmailsService,
svc_blockedemail.ProvideDeleteBlockedEmailService,
// Service layer - Invite Email (1 provider)
svc_inviteemail.ProvideSendInviteEmailService,
// Service layer - IP Anonymization (1 provider)
svc_ipanonymization.ProvideAnonymizeOldIPsService,
// Service layer - Tag (2 providers)
svc_tag.ProvideTagService,
svc_tag.ProvideSearchByTagsService,
// Service layer - Storage Daily Usage (none currently used)
// Middleware
middleware.ProvideMiddleware,
// HTTP handlers - Common
commonhttp.ProvideMapleFileVersionHTTPHandler,
// HTTP handlers - Dashboard
dashboard.ProvideGetDashboardHTTPHandler,
// HTTP handlers - Me
me.ProvideGetMeHTTPHandler,
me.ProvidePutUpdateMeHTTPHandler,
me.ProvideDeleteMeHTTPHandler,
// HTTP handlers - User (1 provider)
user.ProvideUserPublicLookupHTTPHandler,
// HTTP handlers - Blocked Email (3 providers)
blockedemail.ProvideCreateBlockedEmailHTTPHandler,
blockedemail.ProvideListBlockedEmailsHTTPHandler,
blockedemail.ProvideDeleteBlockedEmailHTTPHandler,
// HTTP handlers - Invite Email (1 provider)
http_inviteemail.ProvideSendInviteEmailHTTPHandler,
// HTTP handlers - Collection (15 providers)
collection.ProvideCreateCollectionHTTPHandler,
collection.ProvideGetCollectionHTTPHandler,
collection.ProvideListUserCollectionsHTTPHandler,
collection.ProvideUpdateCollectionHTTPHandler,
collection.ProvideSoftDeleteCollectionHTTPHandler,
collection.ProvideArchiveCollectionHTTPHandler,
collection.ProvideRestoreCollectionHTTPHandler,
collection.ProvideListSharedCollectionsHTTPHandler,
collection.ProvideFindRootCollectionsHTTPHandler,
collection.ProvideFindCollectionsByParentHTTPHandler,
collection.ProvideCollectionSyncHTTPHandler,
collection.ProvideMoveCollectionHTTPHandler,
collection.ProvideGetFilteredCollectionsHTTPHandler,
collection.ProvideShareCollectionHTTPHandler,
collection.ProvideRemoveMemberHTTPHandler,
// HTTP handlers - File (14 providers)
file.ProvideCreatePendingFileHTTPHandler,
file.ProvideGetPresignedUploadURLHTTPHandler,
file.ProvideCompleteFileUploadHTTPHandler,
file.ProvideGetFileHTTPHandler,
file.ProvideGetPresignedDownloadURLHTTPHandler,
file.ProvideReportDownloadCompletedHTTPHandler,
file.ProvideListFilesByCollectionHTTPHandler,
file.ProvideListRecentFilesHTTPHandler,
file.ProvideUpdateFileHTTPHandler,
file.ProvideSoftDeleteFileHTTPHandler,
file.ProvideArchiveFileHTTPHandler,
file.ProvideRestoreFileHTTPHandler,
file.ProvideDeleteMultipleFilesHTTPHandler,
file.ProvideFileSyncHTTPHandler,
// HTTP handlers - Tag (12 providers)
tag.ProvideCreateTagHTTPHandler,
tag.ProvideListTagsHTTPHandler,
tag.ProvideGetTagHTTPHandler,
tag.ProvideUpdateTagHTTPHandler,
tag.ProvideDeleteTagHTTPHandler,
tag.ProvideAssignTagHTTPHandler,
tag.ProvideUnassignTagHTTPHandler,
tag.ProvideGetTagsForCollectionHTTPHandler,
tag.ProvideGetTagsForFileHTTPHandler,
tag.ProvideListCollectionsByTagHandler,
tag.ProvideListFilesByTagHandler,
tag.ProvideSearchByTagsHandler,
// HTTP layer - Aggregate Handlers
http.ProvideHandlers,
// HTTP layer - Server
http.ProvideServer,
// Scheduler
scheduler.ProvideScheduler,
tasks.ProvideIPAnonymizationTask,
// Application
ProvideApplication,
)
return nil, nil
}

View file

@ -0,0 +1,274 @@
// Code generated by Wire. DO NOT EDIT.
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
//go:build !wireinject
// +build !wireinject
package app
import (
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http"
blockedemail4 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail"
collection4 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/common"
dashboard2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/dashboard"
file2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/file"
inviteemail2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/inviteemail"
me2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/me"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
tag4 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/tag"
user4 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/scheduler/tasks"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/fileobjectstorage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/inviteemailratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/storagedailyusage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/tag"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
blockedemail3 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
collection3 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/dashboard"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/inviteemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/ipanonymization"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me"
tag3 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/tag"
user3 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user"
blockedemail2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
collection2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
filemetadata2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
fileobjectstorage2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
storagedailyusage2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
storageusageevent2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
tag2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/tag"
user2 "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/distributedmutex"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/leaderelection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/logger"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/ratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/jwt"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/database/cassandradb"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/memory/redis"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/object/s3"
)
// Injectors from wire.go:
// InitializeApplication wires up all dependencies using Google Wire
func InitializeApplication(cfg *config.Config) (*Application, error) {
zapLogger, err := logger.ProvideLogger(cfg)
if err != nil {
return nil, err
}
mapleFileVersionHTTPHandler := common.ProvideMapleFileVersionHTTPHandler(zapLogger)
session, err := cassandradb.ProvideCassandraConnection(cfg, zapLogger)
if err != nil {
return nil, err
}
collectionRepository := collection.ProvideRepository(cfg, session, zapLogger)
fileMetadataRepository := filemetadata.ProvideRepository(cfg, session, zapLogger, collectionRepository)
listRecentFilesUseCase := filemetadata2.ProvideListRecentFilesUseCase(cfg, zapLogger, fileMetadataRepository, collectionRepository)
listRecentFilesService := file.ProvideListRecentFilesService(cfg, zapLogger, listRecentFilesUseCase)
repository := user.ProvideRepository(cfg, session, zapLogger)
userGetByIDUseCase := user2.ProvideUserGetByIDUseCase(cfg, zapLogger, repository)
countUserFilesUseCase := filemetadata2.ProvideCountUserFilesUseCase(cfg, zapLogger, fileMetadataRepository, collectionRepository)
countUserFoldersUseCase := collection2.ProvideCountUserFoldersUseCase(cfg, zapLogger, collectionRepository)
storageDailyUsageRepository := storagedailyusage.ProvideRepository(cfg, session, zapLogger)
getStorageDailyUsageTrendUseCase := storagedailyusage2.ProvideGetStorageDailyUsageTrendUseCase(cfg, zapLogger, storageDailyUsageRepository)
getCollectionUseCase := collection2.ProvideGetCollectionUseCase(cfg, zapLogger, collectionRepository)
getDashboardService := dashboard.ProvideGetDashboardService(cfg, zapLogger, listRecentFilesService, userGetByIDUseCase, countUserFilesUseCase, countUserFoldersUseCase, getStorageDailyUsageTrendUseCase, getCollectionUseCase)
jwtProvider := jwt.ProvideJWTProvider(cfg)
middlewareMiddleware := middleware.ProvideMiddleware(zapLogger, jwtProvider, userGetByIDUseCase)
getDashboardHTTPHandler := dashboard2.ProvideGetDashboardHTTPHandler(cfg, zapLogger, getDashboardService, middlewareMiddleware)
userCreateUseCase := user2.ProvideUserCreateUseCase(cfg, zapLogger, repository)
userUpdateUseCase := user2.ProvideUserUpdateUseCase(cfg, zapLogger, repository)
getMeService := me.ProvideGetMeService(cfg, zapLogger, userGetByIDUseCase, userCreateUseCase, userUpdateUseCase)
getMeHTTPHandler := me2.ProvideGetMeHTTPHandler(cfg, zapLogger, getMeService, middlewareMiddleware)
userGetByEmailUseCase := user2.ProvideUserGetByEmailUseCase(cfg, zapLogger, repository)
updateMeService := me.ProvideUpdateMeService(cfg, zapLogger, userGetByIDUseCase, userGetByEmailUseCase, userUpdateUseCase)
putUpdateMeHTTPHandler := me2.ProvidePutUpdateMeHTTPHandler(cfg, zapLogger, updateMeService, middlewareMiddleware)
userDeleteByIDUseCase := user2.ProvideUserDeleteByIDUseCase(cfg, zapLogger, repository)
getFileMetadataByOwnerIDUseCase := filemetadata2.ProvideGetFileMetadataByOwnerIDUseCase(cfg, zapLogger, fileMetadataRepository)
listFilesByOwnerIDService := file.ProvideListFilesByOwnerIDService(cfg, zapLogger, getFileMetadataByOwnerIDUseCase)
getFileMetadataUseCase := filemetadata2.ProvideGetFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
updateFileMetadataUseCase := filemetadata2.ProvideUpdateFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
softDeleteFileMetadataUseCase := filemetadata2.ProvideSoftDeleteFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
hardDeleteFileMetadataUseCase := filemetadata2.ProvideHardDeleteFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
s3ObjectStorage := s3.ProvideS3ObjectStorageProvider(cfg, zapLogger)
fileObjectStorageRepository := fileobjectstorage.ProvideRepository(cfg, zapLogger, s3ObjectStorage)
deleteEncryptedDataUseCase := fileobjectstorage2.ProvideDeleteEncryptedDataUseCase(cfg, zapLogger, fileObjectStorageRepository)
userStorageQuotaHelperUseCase := user2.ProvideUserStorageQuotaHelperUseCase(zapLogger, storageDailyUsageRepository)
storageUsageEventRepository := storageusageevent.ProvideRepository(cfg, session, zapLogger)
createStorageUsageEventUseCase := storageusageevent2.ProvideCreateStorageUsageEventUseCase(cfg, zapLogger, storageUsageEventRepository)
updateStorageUsageUseCase := storagedailyusage2.ProvideUpdateStorageUsageUseCase(cfg, zapLogger, storageDailyUsageRepository)
softDeleteFileService := file.ProvideSoftDeleteFileService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, updateFileMetadataUseCase, softDeleteFileMetadataUseCase, hardDeleteFileMetadataUseCase, deleteEncryptedDataUseCase, listFilesByOwnerIDService, userStorageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
listCollectionsByUserUseCase := collection2.ProvideListCollectionsByUserUseCase(cfg, zapLogger, collectionRepository)
updateCollectionUseCase := collection2.ProvideUpdateCollectionUseCase(cfg, zapLogger, collectionRepository)
hardDeleteCollectionUseCase := collection2.ProvideHardDeleteCollectionUseCase(cfg, zapLogger, collectionRepository)
deleteMultipleEncryptedDataUseCase := fileobjectstorage2.ProvideDeleteMultipleEncryptedDataUseCase(cfg, zapLogger, fileObjectStorageRepository)
softDeleteCollectionService := collection3.ProvideSoftDeleteCollectionService(cfg, zapLogger, collectionRepository, fileMetadataRepository, getCollectionUseCase, updateCollectionUseCase, hardDeleteCollectionUseCase, deleteMultipleEncryptedDataUseCase, userStorageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
removeUserFromAllCollectionsUseCase := collection2.ProvideRemoveUserFromAllCollectionsUseCase(zapLogger, collectionRepository)
deleteByUserUseCase := storagedailyusage2.ProvideDeleteByUserUseCase(zapLogger, storageDailyUsageRepository)
storageusageeventDeleteByUserUseCase := storageusageevent2.ProvideDeleteByUserUseCase(zapLogger, storageUsageEventRepository)
anonymizeUserIPsImmediatelyUseCase := user2.ProvideAnonymizeUserIPsImmediatelyUseCase(cfg, zapLogger, repository, collectionRepository, fileMetadataRepository)
clearUserCacheUseCase := user2.ProvideClearUserCacheUseCase(cfg, zapLogger)
anonymizeUserReferencesUseCase := filemetadata2.ProvideAnonymizeUserReferencesUseCase(zapLogger, fileMetadataRepository)
collectionAnonymizeUserReferencesUseCase := collection2.ProvideAnonymizeUserReferencesUseCase(zapLogger, collectionRepository)
completeUserDeletionService := user3.ProvideCompleteUserDeletionService(cfg, zapLogger, userGetByIDUseCase, userDeleteByIDUseCase, listFilesByOwnerIDService, softDeleteFileService, listCollectionsByUserUseCase, softDeleteCollectionService, removeUserFromAllCollectionsUseCase, deleteByUserUseCase, storageusageeventDeleteByUserUseCase, anonymizeUserIPsImmediatelyUseCase, clearUserCacheUseCase, anonymizeUserReferencesUseCase, collectionAnonymizeUserReferencesUseCase)
deleteMeService := me.ProvideDeleteMeService(cfg, zapLogger, completeUserDeletionService)
deleteMeHTTPHandler := me2.ProvideDeleteMeHTTPHandler(cfg, zapLogger, deleteMeService, middlewareMiddleware)
userPublicLookupService := user3.ProvideUserPublicLookupService(cfg, zapLogger, userGetByEmailUseCase)
userPublicLookupHTTPHandler := user4.ProvideUserPublicLookupHTTPHandler(cfg, zapLogger, userPublicLookupService, middlewareMiddleware)
blockedEmailRepository := blockedemail.NewBlockedEmailRepository(cfg, zapLogger, session)
createBlockedEmailUseCase := blockedemail2.NewCreateBlockedEmailUseCase(zapLogger, blockedEmailRepository)
createBlockedEmailService := blockedemail3.ProvideCreateBlockedEmailService(cfg, zapLogger, createBlockedEmailUseCase, userGetByEmailUseCase)
createBlockedEmailHTTPHandler := blockedemail4.ProvideCreateBlockedEmailHTTPHandler(cfg, zapLogger, createBlockedEmailService, middlewareMiddleware)
listBlockedEmailsUseCase := blockedemail2.NewListBlockedEmailsUseCase(zapLogger, blockedEmailRepository)
listBlockedEmailsService := blockedemail3.ProvideListBlockedEmailsService(cfg, zapLogger, listBlockedEmailsUseCase)
listBlockedEmailsHTTPHandler := blockedemail4.ProvideListBlockedEmailsHTTPHandler(cfg, zapLogger, listBlockedEmailsService, middlewareMiddleware)
deleteBlockedEmailUseCase := blockedemail2.NewDeleteBlockedEmailUseCase(zapLogger, blockedEmailRepository)
deleteBlockedEmailService := blockedemail3.ProvideDeleteBlockedEmailService(cfg, zapLogger, deleteBlockedEmailUseCase)
deleteBlockedEmailHTTPHandler := blockedemail4.ProvideDeleteBlockedEmailHTTPHandler(cfg, zapLogger, deleteBlockedEmailService, middlewareMiddleware)
inviteemailratelimitRepository := inviteemailratelimit.ProvideRepository(cfg, session, zapLogger)
emailer := mailgun.ProvideMapleFileModuleEmailer(cfg)
sendInviteEmailService := inviteemail.ProvideSendInviteEmailService(cfg, zapLogger, repository, inviteemailratelimitRepository, emailer)
sendInviteEmailHTTPHandler := inviteemail2.ProvideSendInviteEmailHTTPHandler(cfg, zapLogger, sendInviteEmailService, middlewareMiddleware)
tagRepository := tag.ProvideTagRepository(session)
createCollectionService := collection3.ProvideCreateCollectionService(cfg, zapLogger, userGetByIDUseCase, collectionRepository, tagRepository)
createCollectionHTTPHandler := collection4.ProvideCreateCollectionHTTPHandler(cfg, zapLogger, createCollectionService, middlewareMiddleware)
universalClient, err := redis.ProvideRedisUniversalClient(cfg, zapLogger)
if err != nil {
return nil, err
}
authFailureRateLimiter := ratelimit.ProvideAuthFailureRateLimiter(universalClient, cfg, zapLogger)
getCollectionService := collection3.ProvideGetCollectionService(cfg, zapLogger, collectionRepository, userGetByIDUseCase, authFailureRateLimiter)
getCollectionHTTPHandler := collection4.ProvideGetCollectionHTTPHandler(cfg, zapLogger, getCollectionService, middlewareMiddleware)
listUserCollectionsService := collection3.ProvideListUserCollectionsService(cfg, zapLogger, collectionRepository, fileMetadataRepository)
listUserCollectionsHTTPHandler := collection4.ProvideListUserCollectionsHTTPHandler(cfg, zapLogger, listUserCollectionsService, middlewareMiddleware)
updateCollectionService := collection3.ProvideUpdateCollectionService(cfg, zapLogger, collectionRepository, authFailureRateLimiter)
updateCollectionHTTPHandler := collection4.ProvideUpdateCollectionHTTPHandler(cfg, zapLogger, updateCollectionService, middlewareMiddleware)
softDeleteCollectionHTTPHandler := collection4.ProvideSoftDeleteCollectionHTTPHandler(cfg, zapLogger, softDeleteCollectionService, middlewareMiddleware)
archiveCollectionService := collection3.ProvideArchiveCollectionService(cfg, zapLogger, collectionRepository)
archiveCollectionHTTPHandler := collection4.ProvideArchiveCollectionHTTPHandler(cfg, zapLogger, archiveCollectionService, middlewareMiddleware)
restoreCollectionService := collection3.ProvideRestoreCollectionService(cfg, zapLogger, collectionRepository)
restoreCollectionHTTPHandler := collection4.ProvideRestoreCollectionHTTPHandler(cfg, zapLogger, restoreCollectionService, middlewareMiddleware)
findCollectionsByParentService := collection3.ProvideFindCollectionsByParentService(cfg, zapLogger, collectionRepository)
findCollectionsByParentHTTPHandler := collection4.ProvideFindCollectionsByParentHTTPHandler(cfg, zapLogger, findCollectionsByParentService, middlewareMiddleware)
findRootCollectionsService := collection3.ProvideFindRootCollectionsService(cfg, zapLogger, collectionRepository)
findRootCollectionsHTTPHandler := collection4.ProvideFindRootCollectionsHTTPHandler(cfg, zapLogger, findRootCollectionsService, middlewareMiddleware)
moveCollectionService := collection3.ProvideMoveCollectionService(cfg, zapLogger, collectionRepository)
moveCollectionHTTPHandler := collection4.ProvideMoveCollectionHTTPHandler(cfg, zapLogger, moveCollectionService, middlewareMiddleware)
checkBlockedEmailUseCase := blockedemail2.NewCheckBlockedEmailUseCase(zapLogger, blockedEmailRepository)
shareCollectionService := collection3.ProvideShareCollectionService(cfg, zapLogger, collectionRepository, checkBlockedEmailUseCase, userGetByIDUseCase, emailer)
shareCollectionHTTPHandler := collection4.ProvideShareCollectionHTTPHandler(cfg, zapLogger, shareCollectionService, middlewareMiddleware)
removeMemberService := collection3.ProvideRemoveMemberService(cfg, zapLogger, collectionRepository)
removeMemberHTTPHandler := collection4.ProvideRemoveMemberHTTPHandler(cfg, zapLogger, removeMemberService, middlewareMiddleware)
listSharedCollectionsService := collection3.ProvideListSharedCollectionsService(cfg, zapLogger, collectionRepository, fileMetadataRepository)
listSharedCollectionsHTTPHandler := collection4.ProvideListSharedCollectionsHTTPHandler(cfg, zapLogger, listSharedCollectionsService, middlewareMiddleware)
getFilteredCollectionsService := collection3.ProvideGetFilteredCollectionsService(cfg, zapLogger, collectionRepository)
getFilteredCollectionsHTTPHandler := collection4.ProvideGetFilteredCollectionsHTTPHandler(cfg, zapLogger, getFilteredCollectionsService, middlewareMiddleware)
getCollectionSyncDataUseCase := collection2.ProvideGetCollectionSyncDataUseCase(cfg, zapLogger, collectionRepository)
getCollectionSyncDataService := collection3.ProvideGetCollectionSyncDataService(cfg, zapLogger, getCollectionSyncDataUseCase)
collectionSyncHTTPHandler := collection4.ProvideCollectionSyncHTTPHandler(cfg, zapLogger, getCollectionSyncDataService, middlewareMiddleware)
softDeleteFileHTTPHandler := file2.ProvideSoftDeleteFileHTTPHandler(cfg, zapLogger, softDeleteFileService, middlewareMiddleware)
getFileMetadataByIDsUseCase := filemetadata2.ProvideGetFileMetadataByIDsUseCase(cfg, zapLogger, fileMetadataRepository)
deleteManyFileMetadataUseCase := filemetadata2.ProvideDeleteManyFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
deleteMultipleFilesService := file.ProvideDeleteMultipleFilesService(cfg, zapLogger, collectionRepository, getFileMetadataByIDsUseCase, deleteManyFileMetadataUseCase, deleteMultipleEncryptedDataUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
deleteMultipleFilesHTTPHandler := file2.ProvideDeleteMultipleFilesHTTPHandler(cfg, zapLogger, deleteMultipleFilesService, middlewareMiddleware)
getFileService := file.ProvideGetFileService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase)
getFileHTTPHandler := file2.ProvideGetFileHTTPHandler(cfg, zapLogger, getFileService, middlewareMiddleware)
getFileMetadataByCollectionUseCase := filemetadata2.ProvideGetFileMetadataByCollectionUseCase(cfg, zapLogger, fileMetadataRepository)
listFilesByCollectionService := file.ProvideListFilesByCollectionService(cfg, zapLogger, collectionRepository, getFileMetadataByCollectionUseCase)
listFilesByCollectionHTTPHandler := file2.ProvideListFilesByCollectionHTTPHandler(cfg, zapLogger, listFilesByCollectionService, middlewareMiddleware)
updateFileService := file.ProvideUpdateFileService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, updateFileMetadataUseCase)
updateFileHTTPHandler := file2.ProvideUpdateFileHTTPHandler(cfg, zapLogger, updateFileService, middlewareMiddleware)
checkCollectionAccessUseCase := collection2.ProvideCheckCollectionAccessUseCase(cfg, zapLogger, collectionRepository)
checkFileExistsUseCase := filemetadata2.ProvideCheckFileExistsUseCase(cfg, zapLogger, fileMetadataRepository)
createFileMetadataUseCase := filemetadata2.ProvideCreateFileMetadataUseCase(cfg, zapLogger, fileMetadataRepository)
generatePresignedUploadURLUseCase := fileobjectstorage2.ProvideGeneratePresignedUploadURLUseCase(cfg, zapLogger, fileObjectStorageRepository)
createPendingFileService := file.ProvideCreatePendingFileService(cfg, zapLogger, getCollectionUseCase, checkCollectionAccessUseCase, checkFileExistsUseCase, createFileMetadataUseCase, generatePresignedUploadURLUseCase, userStorageQuotaHelperUseCase, tagRepository)
createPendingFileHTTPHandler := file2.ProvideCreatePendingFileHTTPHandler(cfg, zapLogger, createPendingFileService, middlewareMiddleware)
verifyObjectExistsUseCase := fileobjectstorage2.ProvideVerifyObjectExistsUseCase(cfg, zapLogger, fileObjectStorageRepository)
getObjectSizeUseCase := fileobjectstorage2.ProvideGetObjectSizeUseCase(cfg, zapLogger, fileObjectStorageRepository)
completeFileUploadService := file.ProvideCompleteFileUploadService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, updateFileMetadataUseCase, verifyObjectExistsUseCase, getObjectSizeUseCase, deleteEncryptedDataUseCase, userStorageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
completeFileUploadHTTPHandler := file2.ProvideCompleteFileUploadHTTPHandler(cfg, zapLogger, completeFileUploadService, middlewareMiddleware)
getPresignedUploadURLService := file.ProvideGetPresignedUploadURLService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, generatePresignedUploadURLUseCase)
getPresignedUploadURLHTTPHandler := file2.ProvideGetPresignedUploadURLHTTPHandler(cfg, zapLogger, getPresignedUploadURLService, middlewareMiddleware)
generatePresignedDownloadURLUseCase := fileobjectstorage2.ProvideGeneratePresignedDownloadURLUseCase(cfg, zapLogger, fileObjectStorageRepository)
getPresignedDownloadURLService := file.ProvideGetPresignedDownloadURLService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, generatePresignedDownloadURLUseCase)
getPresignedDownloadURLHTTPHandler := file2.ProvideGetPresignedDownloadURLHTTPHandler(cfg, zapLogger, getPresignedDownloadURLService, middlewareMiddleware)
reportDownloadCompletedHTTPHandler := file2.ProvideReportDownloadCompletedHTTPHandler(cfg, zapLogger, middlewareMiddleware)
archiveFileService := file.ProvideArchiveFileService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, updateFileMetadataUseCase)
archiveFileHTTPHandler := file2.ProvideArchiveFileHTTPHandler(cfg, zapLogger, archiveFileService, middlewareMiddleware)
restoreFileService := file.ProvideRestoreFileService(cfg, zapLogger, collectionRepository, getFileMetadataUseCase, updateFileMetadataUseCase)
restoreFileHTTPHandler := file2.ProvideRestoreFileHTTPHandler(cfg, zapLogger, restoreFileService, middlewareMiddleware)
listRecentFilesHTTPHandler := file2.ProvideListRecentFilesHTTPHandler(cfg, zapLogger, listRecentFilesService, middlewareMiddleware)
listFileMetadataSyncDataUseCase := filemetadata2.ProvideListFileMetadataSyncDataUseCase(cfg, zapLogger, fileMetadataRepository)
listFileSyncDataService := file.ProvideListFileSyncDataService(cfg, zapLogger, listFileMetadataSyncDataUseCase, collectionRepository)
fileSyncHTTPHandler := file2.ProvideFileSyncHTTPHandler(cfg, zapLogger, listFileSyncDataService, middlewareMiddleware)
createTagUseCase := tag2.ProvideCreateTagUseCase(tagRepository)
getTagByIDUseCase := tag2.ProvideGetTagByIDUseCase(tagRepository)
listTagsByUserUseCase := tag2.ProvideListTagsByUserUseCase(tagRepository)
updateTagUseCase := tag2.ProvideUpdateTagUseCase(tagRepository, collectionRepository, fileMetadataRepository, zapLogger)
deleteTagUseCase := tag2.ProvideDeleteTagUseCase(tagRepository, collectionRepository, fileMetadataRepository, zapLogger)
assignTagUseCase := tag2.ProvideAssignTagUseCase(tagRepository, collectionRepository, fileMetadataRepository)
unassignTagUseCase := tag2.ProvideUnassignTagUseCase(tagRepository, collectionRepository, fileMetadataRepository)
getTagsForEntityUseCase := tag2.ProvideGetTagsForEntityUseCase(tagRepository)
tagService := tag3.ProvideTagService(createTagUseCase, getTagByIDUseCase, listTagsByUserUseCase, updateTagUseCase, deleteTagUseCase, assignTagUseCase, unassignTagUseCase, getTagsForEntityUseCase)
createTagHTTPHandler := tag4.ProvideCreateTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
listTagsHTTPHandler := tag4.ProvideListTagsHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
getTagHTTPHandler := tag4.ProvideGetTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
updateTagHTTPHandler := tag4.ProvideUpdateTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
deleteTagHTTPHandler := tag4.ProvideDeleteTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
assignTagHTTPHandler := tag4.ProvideAssignTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
unassignTagHTTPHandler := tag4.ProvideUnassignTagHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
getTagsForCollectionHTTPHandler := tag4.ProvideGetTagsForCollectionHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
getTagsForFileHTTPHandler := tag4.ProvideGetTagsForFileHTTPHandler(cfg, zapLogger, tagService, middlewareMiddleware)
listCollectionsByTagUseCase := tag2.ProvideListCollectionsByTagUseCase(tagRepository, collectionRepository)
listCollectionsByTagHandler := tag4.ProvideListCollectionsByTagHandler(listCollectionsByTagUseCase, zapLogger)
listFilesByTagUseCase := tag2.ProvideListFilesByTagUseCase(tagRepository, fileMetadataRepository)
listFilesByTagHandler := tag4.ProvideListFilesByTagHandler(listFilesByTagUseCase, zapLogger)
searchByTagsService := tag3.ProvideSearchByTagsService(zapLogger, listCollectionsByTagUseCase, listFilesByTagUseCase)
searchByTagsHandler := tag4.ProvideSearchByTagsHandler(searchByTagsService, zapLogger, middlewareMiddleware)
handlers := http.ProvideHandlers(cfg, zapLogger, mapleFileVersionHTTPHandler, getDashboardHTTPHandler, getMeHTTPHandler, putUpdateMeHTTPHandler, deleteMeHTTPHandler, userPublicLookupHTTPHandler, createBlockedEmailHTTPHandler, listBlockedEmailsHTTPHandler, deleteBlockedEmailHTTPHandler, sendInviteEmailHTTPHandler, createCollectionHTTPHandler, getCollectionHTTPHandler, listUserCollectionsHTTPHandler, updateCollectionHTTPHandler, softDeleteCollectionHTTPHandler, archiveCollectionHTTPHandler, restoreCollectionHTTPHandler, findCollectionsByParentHTTPHandler, findRootCollectionsHTTPHandler, moveCollectionHTTPHandler, shareCollectionHTTPHandler, removeMemberHTTPHandler, listSharedCollectionsHTTPHandler, getFilteredCollectionsHTTPHandler, collectionSyncHTTPHandler, softDeleteFileHTTPHandler, deleteMultipleFilesHTTPHandler, getFileHTTPHandler, listFilesByCollectionHTTPHandler, updateFileHTTPHandler, createPendingFileHTTPHandler, completeFileUploadHTTPHandler, getPresignedUploadURLHTTPHandler, getPresignedDownloadURLHTTPHandler, reportDownloadCompletedHTTPHandler, archiveFileHTTPHandler, restoreFileHTTPHandler, listRecentFilesHTTPHandler, fileSyncHTTPHandler, createTagHTTPHandler, listTagsHTTPHandler, getTagHTTPHandler, updateTagHTTPHandler, deleteTagHTTPHandler, assignTagHTTPHandler, unassignTagHTTPHandler, getTagsForCollectionHTTPHandler, getTagsForFileHTTPHandler, listCollectionsByTagHandler, listFilesByTagHandler, searchByTagsHandler)
auditLogger := auditlog.ProvideAuditLogger(zapLogger)
registerService := auth.ProvideRegisterService(cfg, zapLogger, auditLogger, userCreateUseCase, userGetByEmailUseCase, userDeleteByIDUseCase, emailer)
userGetByVerificationCodeUseCase := user2.ProvideUserGetByVerificationCodeUseCase(cfg, zapLogger, repository)
verifyEmailService := auth.ProvideVerifyEmailService(zapLogger, auditLogger, userGetByVerificationCodeUseCase, userUpdateUseCase)
resendVerificationService := auth.ProvideResendVerificationService(cfg, zapLogger, userGetByEmailUseCase, userUpdateUseCase, emailer)
cassandraCacher := cassandracache.ProvideCassandraCacher(session, zapLogger)
requestOTTService := auth.ProvideRequestOTTService(cfg, zapLogger, userGetByEmailUseCase, cassandraCacher, emailer)
verifyOTTService := auth.ProvideVerifyOTTService(zapLogger, userGetByEmailUseCase, cassandraCacher)
completeLoginService := auth.ProvideCompleteLoginService(cfg, zapLogger, auditLogger, userGetByEmailUseCase, cassandraCacher, jwtProvider)
refreshTokenService := auth.ProvideRefreshTokenService(cfg, zapLogger, auditLogger, cassandraCacher, jwtProvider, userGetByIDUseCase)
recoveryInitiateService := auth.ProvideRecoveryInitiateService(zapLogger, auditLogger, userGetByEmailUseCase, cassandraCacher)
recoveryVerifyService := auth.ProvideRecoveryVerifyService(zapLogger, cassandraCacher, userGetByEmailUseCase)
recoveryCompleteService := auth.ProvideRecoveryCompleteService(zapLogger, auditLogger, userGetByEmailUseCase, userUpdateUseCase, cassandraCacher)
loginRateLimiter := ratelimit.ProvideLoginRateLimiter(universalClient, cfg, zapLogger)
rateLimitMiddleware := middleware.ProvideRateLimitMiddleware(zapLogger, loginRateLimiter)
securityHeadersMiddleware := middleware.ProvideSecurityHeadersMiddleware(cfg)
wireServer := http.ProvideServer(cfg, zapLogger, handlers, registerService, verifyEmailService, resendVerificationService, requestOTTService, verifyOTTService, completeLoginService, refreshTokenService, recoveryInitiateService, recoveryVerifyService, recoveryCompleteService, rateLimitMiddleware, securityHeadersMiddleware)
migrator := cassandradb.NewMigrator(cfg, zapLogger)
adapter := distributedmutex.ProvideDistributedMutexAdapter(cfg, zapLogger)
leaderElection, err := leaderelection.ProvideLeaderElection(cfg, adapter, universalClient, zapLogger)
if err != nil {
return nil, err
}
schedulerScheduler := scheduler.ProvideScheduler(cfg, zapLogger, leaderElection)
anonymizeOldIPsUseCase := user2.ProvideAnonymizeOldIPsUseCase(cfg, zapLogger, repository)
collectionAnonymizeOldIPsUseCase := collection2.ProvideAnonymizeOldIPsUseCase(cfg, zapLogger, collectionRepository)
filemetadataAnonymizeOldIPsUseCase := filemetadata2.ProvideAnonymizeOldIPsUseCase(cfg, zapLogger, fileMetadataRepository)
anonymizeOldIPsService := ipanonymization.ProvideAnonymizeOldIPsService(cfg, zapLogger, anonymizeOldIPsUseCase, collectionAnonymizeOldIPsUseCase, filemetadataAnonymizeOldIPsUseCase)
ipAnonymizationTask := tasks.ProvideIPAnonymizationTask(anonymizeOldIPsService, cfg, zapLogger)
application := ProvideApplication(cfg, wireServer, zapLogger, migrator, schedulerScheduler, ipAnonymizationTask, session)
return application, nil
}

View file

@ -0,0 +1,60 @@
package cmd
import (
"fmt"
"log"
"time"
"github.com/spf13/cobra"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/app"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
)
// formatBuildTime converts ISO 8601 timestamp to human-readable 12-hour format
func formatBuildTime(isoTime string) string {
t, err := time.Parse(time.RFC3339, isoTime)
if err != nil {
return isoTime // Return original if parsing fails
}
return t.Format("Jan 2, 2006 3:04:05 PM MST")
}
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "Start the MapleFile backend server",
Long: `Start the MapleFile backend HTTP server and listen for requests.`,
Run: runDaemon,
}
func runDaemon(cmd *cobra.Command, args []string) {
// Validate configuration on startup
cfg, err := config.Load()
if err != nil {
log.Fatalf("Failed to load configuration: %v", err)
}
if err := cfg.Validate(); err != nil {
log.Fatalf("Invalid configuration: %v", err)
}
fmt.Printf("🚀 Starting MapleFile Backend v%s\n", version)
fmt.Printf("📝 Git Commit: %s\n", gitCommit)
fmt.Printf("🕐 Build Time: %s\n", formatBuildTime(buildTime))
fmt.Printf("📝 Environment: %s\n", cfg.App.Environment)
fmt.Printf("🌐 Server will listen on %s:%d\n", cfg.Server.Host, cfg.Server.Port)
// Create and run the Wire-based application
application, err := app.InitializeApplication(cfg)
if err != nil {
log.Fatalf("Failed to initialize application: %v", err)
}
// Start the application
// Wire application handles lifecycle and graceful shutdown
if err := application.Start(); err != nil {
log.Fatalf("Application terminated with error: %v", err)
}
fmt.Println("👋 Server stopped gracefully")
}

View file

@ -0,0 +1,54 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: "Database migration commands",
Long: `Run database migrations up, down, or create new migrations.`,
}
var migrateUpCmd = &cobra.Command{
Use: "up",
Short: "Run migrations up",
Long: `Apply all pending database migrations.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Running migrations up...")
// TODO: Implement migration logic in Phase 4
fmt.Println("✅ Migrations completed")
},
}
var migrateDownCmd = &cobra.Command{
Use: "down",
Short: "Run migrations down",
Long: `Rollback the last database migration.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Running migrations down...")
// TODO: Implement migration logic in Phase 4
fmt.Println("✅ Migration rolled back")
},
}
var migrateCreateCmd = &cobra.Command{
Use: "create [name]",
Short: "Create a new migration file",
Long: `Create a new migration file with the given name.`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
name := args[0]
fmt.Printf("Creating migration: %s\n", name)
// TODO: Implement migration creation in Phase 4
fmt.Println("✅ Migration files created")
},
}
func init() {
migrateCmd.AddCommand(migrateUpCmd)
migrateCmd.AddCommand(migrateDownCmd)
migrateCmd.AddCommand(migrateCreateCmd)
}

View file

@ -0,0 +1,92 @@
package cmd
import (
"context"
"fmt"
"log"
"time"
"github.com/spf13/cobra"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/database/cassandradb"
)
var recalculateFileCountsCmd = &cobra.Command{
Use: "recalculate-file-counts",
Short: "Recalculate file counts for all collections",
Long: `Recalculates the file_count field for all collections by counting
the actual number of active files in each collection.
This command is useful for:
- Fixing collections created before file count tracking was implemented
- Repairing file counts that may have become out of sync
- Data migration and maintenance tasks
Example:
maplefile-backend recalculate-file-counts`,
Run: runRecalculateFileCounts,
}
func init() {
rootCmd.AddCommand(recalculateFileCountsCmd)
}
func runRecalculateFileCounts(cmd *cobra.Command, args []string) {
fmt.Println("🔧 Recalculating file counts for all collections...")
// Load configuration
cfg, err := config.Load()
if err != nil {
log.Fatalf("Failed to load configuration: %v", err)
}
// Create logger
logger, err := zap.NewProduction()
if err != nil {
log.Fatalf("Failed to create logger: %v", err)
}
defer logger.Sync()
// Connect to Cassandra
fmt.Println("📦 Connecting to database...")
session, err := cassandradb.NewCassandraConnection(cfg, logger)
if err != nil {
log.Fatalf("Failed to connect to Cassandra: %v", err)
}
defer session.Close()
// Create collection repository
collectionRepo := collection.NewRepository(cfg, session, logger)
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
defer cancel()
// Run recalculation
fmt.Println("🔄 Starting recalculation...")
startTime := time.Now()
result, err := collectionRepo.RecalculateAllFileCounts(ctx)
if err != nil {
log.Fatalf("Failed to recalculate file counts: %v", err)
}
duration := time.Since(startTime)
// Print results
fmt.Println("")
fmt.Println("✅ Recalculation completed!")
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Printf(" Total collections: %d\n", result.TotalCollections)
fmt.Printf(" Updated: %d\n", result.UpdatedCount)
fmt.Printf(" Errors: %d\n", result.ErrorCount)
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
if result.ErrorCount > 0 {
fmt.Println("⚠️ Some collections had errors. Check the logs for details.")
}
}

View file

@ -0,0 +1,28 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "maplefile-backend",
Short: "MapleFile Backend Server",
Long: `MapleFile - Standalone encrypted file storage backend server.`,
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func init() {
// Add subcommands
rootCmd.AddCommand(daemonCmd)
rootCmd.AddCommand(migrateCmd)
rootCmd.AddCommand(versionCmd)
}

View file

@ -0,0 +1,37 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
// Build information set at compile time
var (
version = "1.0.0"
gitCommit = "unknown"
buildTime = "unknown"
)
// SetBuildInfo sets the build information from main package
func SetBuildInfo(v, commit, time string) {
version = v
gitCommit = commit
buildTime = time
}
// GetBuildInfo returns the current build information
func GetBuildInfo() (string, string, string) {
return version, gitCommit, buildTime
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number",
Long: `Print the version number of MapleFile backend.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("MapleFile Backend v%s\n", version)
fmt.Printf("Git Commit: %s\n", gitCommit)
fmt.Printf("Build Time: %s\n", buildTime)
},
}

View file

@ -0,0 +1,32 @@
package main
import (
"log"
"os"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/app"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
)
func main() {
// Load configuration
cfg, err := config.Load()
if err != nil {
log.Fatalf("Failed to load configuration: %v", err)
os.Exit(1)
}
// Initialize application using Wire
application, err := app.InitializeApplication(cfg)
if err != nil {
log.Fatalf("Failed to initialize application: %v", err)
os.Exit(1)
}
// Start the application
log.Println("Starting MapleFile Backend with Wire DI...")
if err := application.Start(); err != nil {
log.Fatalf("Application failed: %v", err)
os.Exit(1)
}
}

View file

@ -0,0 +1,434 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/config.go
package config
import (
"fmt"
"os"
"strconv"
"strings"
"time"
)
type Config struct {
App AppConfig
Server ServerConfig
Database DatabaseConfig
Cache CacheConfig
S3 S3Config
JWT JWTConfig
Mailgun MailgunConfig
Observability ObservabilityConfig
Logging LoggingConfig
Security SecurityConfig
LeaderElection LeaderElectionConfig
InviteEmail InviteEmailConfig
LoginRateLimit LoginRateLimitConfig
}
// Configuration is an alias for Config for backward compatibility
type Configuration = Config
type AppConfig struct {
Environment string
Version string
DataDir string
}
type ServerConfig struct {
Host string
Port int
ReadTimeout time.Duration
WriteTimeout time.Duration
IdleTimeout time.Duration
ShutdownTimeout time.Duration
}
type DatabaseConfig struct {
Hosts []string
Keyspace string
Consistency string
Username string
Password string
MigrationsPath string
AutoMigrate bool // Run migrations automatically on startup
ConnectTimeout time.Duration
RequestTimeout time.Duration
ReplicationFactor int
MaxRetryAttempts int
RetryDelay time.Duration
}
type CacheConfig struct {
Host string
Port int
Password string
DB int
}
type S3Config struct {
Endpoint string
PublicEndpoint string // Public-facing endpoint for presigned URLs (e.g., http://localhost:8334)
AccessKey string
SecretKey string
BucketName string
Region string
UseSSL bool
UsePathStyle bool // Use path-style URLs (true for MinIO/SeaweedFS, false for AWS S3/DigitalOcean Spaces)
}
type JWTConfig struct {
Secret string
AccessTokenDuration time.Duration
RefreshTokenDuration time.Duration
SessionDuration time.Duration
SessionCleanupInterval time.Duration
}
type MailgunConfig struct {
APIKey string
Domain string
APIBase string
SenderEmail string
SenderName string
FrontendURL string
}
type ObservabilityConfig struct {
Enabled bool
Port int
HealthCheckTimeout time.Duration
MetricsEnabled bool
HealthChecksEnabled bool
DetailedHealthChecks bool
}
type LoggingConfig struct {
Level string
Format string
EnableStacktrace bool
EnableCaller bool
}
type SecurityConfig struct {
GeoLiteDBPath string
BannedCountries []string
RateLimitEnabled bool
IPBlockEnabled bool
AllowedOrigins []string // CORS allowed origins
TrustedProxies []string
IPAnonymizationEnabled bool
IPAnonymizationRetentionDays int
IPAnonymizationSchedule string
}
type LeaderElectionConfig struct {
Enabled bool
LockTTL time.Duration
HeartbeatInterval time.Duration
RetryInterval time.Duration
InstanceID string
Hostname string
}
// InviteEmailConfig holds configuration for invitation emails to non-registered users
type InviteEmailConfig struct {
MaxEmailsPerDay int // Maximum invitation emails a user can send per day
}
// LoginRateLimitConfig holds configuration for login rate limiting
type LoginRateLimitConfig struct {
MaxAttemptsPerIP int // Maximum login attempts per IP in the window
IPWindow time.Duration // Time window for IP-based rate limiting
MaxFailedAttemptsPerAccount int // Maximum failed attempts before account lockout
AccountLockoutDuration time.Duration // How long to lock an account after too many failures
}
func Load() (*Config, error) {
cfg := &Config{
// App
App: AppConfig{
Environment: getEnvString("APP_ENVIRONMENT", "development"),
Version: getEnvString("APP_VERSION", "0.1.0"),
DataDir: getEnvString("APP_DATA_DIRECTORY", "./data"),
},
// Server
Server: ServerConfig{
Host: getEnvString("SERVER_HOST", "0.0.0.0"),
Port: getEnvInt("SERVER_PORT", 8000),
ReadTimeout: getEnvDuration("SERVER_READ_TIMEOUT", 30*time.Second),
WriteTimeout: getEnvDuration("SERVER_WRITE_TIMEOUT", 30*time.Second),
IdleTimeout: getEnvDuration("SERVER_IDLE_TIMEOUT", 60*time.Second),
ShutdownTimeout: getEnvDuration("SERVER_SHUTDOWN_TIMEOUT", 10*time.Second),
},
// Database
Database: DatabaseConfig{
Hosts: strings.Split(getEnvString("DATABASE_HOSTS", "localhost:9042"), ","),
Keyspace: getEnvString("DATABASE_KEYSPACE", "maplefile"),
Consistency: getEnvString("DATABASE_CONSISTENCY", "QUORUM"),
Username: getEnvString("DATABASE_USERNAME", ""),
Password: getEnvString("DATABASE_PASSWORD", ""),
MigrationsPath: getEnvString("DATABASE_MIGRATIONS_PATH", "./migrations"),
AutoMigrate: getEnvBool("DATABASE_AUTO_MIGRATE", true),
ConnectTimeout: getEnvDuration("DATABASE_CONNECT_TIMEOUT", 10*time.Second),
RequestTimeout: getEnvDuration("DATABASE_REQUEST_TIMEOUT", 5*time.Second),
ReplicationFactor: getEnvInt("DATABASE_REPLICATION", 3),
MaxRetryAttempts: getEnvInt("DATABASE_MAX_RETRIES", 3),
RetryDelay: getEnvDuration("DATABASE_RETRY_DELAY", 1*time.Second),
},
// Cache
Cache: CacheConfig{
Host: getEnvString("CACHE_HOST", "localhost"),
Port: getEnvInt("CACHE_PORT", 6379),
Password: getEnvString("CACHE_PASSWORD", ""),
DB: getEnvInt("CACHE_DB", 0),
},
// S3
S3: S3Config{
Endpoint: getEnvString("S3_ENDPOINT", "http://localhost:9000"),
PublicEndpoint: getEnvString("S3_PUBLIC_ENDPOINT", ""), // Falls back to Endpoint if not set
// CWE-798: Remove default credentials - require explicit configuration
// SECURITY: Default 'minioadmin' credentials removed for production safety
AccessKey: getEnvString("S3_ACCESS_KEY", ""),
SecretKey: getEnvString("S3_SECRET_KEY", ""),
BucketName: getEnvString("S3_BUCKET", "maplefile"),
Region: getEnvString("S3_REGION", "us-east-1"),
UseSSL: getEnvBool("S3_USE_SSL", false),
UsePathStyle: getEnvBool("S3_USE_PATH_STYLE", true), // Default true for dev (SeaweedFS), false for prod (DO Spaces)
},
// JWT
JWT: JWTConfig{
// CWE-798: Remove default weak secret - require explicit configuration
// SECURITY: Default 'change-me-in-production' removed to force proper JWT secret setup
Secret: getEnvString("JWT_SECRET", ""),
AccessTokenDuration: getEnvDuration("JWT_ACCESS_TOKEN_DURATION", 15*time.Minute),
RefreshTokenDuration: getEnvDuration("JWT_REFRESH_TOKEN_DURATION", 7*24*time.Hour),
SessionDuration: getEnvDuration("JWT_SESSION_DURATION", 24*time.Hour),
SessionCleanupInterval: getEnvDuration("JWT_SESSION_CLEANUP_INTERVAL", 1*time.Hour),
},
// Mailgun
Mailgun: MailgunConfig{
APIKey: getEnvString("MAILGUN_API_KEY", ""),
Domain: getEnvString("MAILGUN_DOMAIN", ""),
APIBase: getEnvString("MAILGUN_API_BASE", "https://api.mailgun.net/v3"),
SenderEmail: getEnvString("MAILGUN_FROM_EMAIL", "noreply@maplefile.app"),
SenderName: getEnvString("MAILGUN_FROM_NAME", "MapleFile"),
FrontendURL: getEnvString("MAILGUN_FRONTEND_URL", "http://localhost:3000"),
},
// Observability
Observability: ObservabilityConfig{
Enabled: getEnvBool("OBSERVABILITY_ENABLED", true),
Port: getEnvInt("OBSERVABILITY_PORT", 9090),
HealthCheckTimeout: getEnvDuration("OBSERVABILITY_HEALTH_TIMEOUT", 5*time.Second),
MetricsEnabled: getEnvBool("OBSERVABILITY_METRICS_ENABLED", true),
HealthChecksEnabled: getEnvBool("OBSERVABILITY_HEALTH_ENABLED", true),
DetailedHealthChecks: getEnvBool("OBSERVABILITY_DETAILED_HEALTH", false),
},
// Logging
Logging: LoggingConfig{
Level: getEnvString("LOG_LEVEL", "info"),
Format: getEnvString("LOG_FORMAT", "json"),
EnableStacktrace: getEnvBool("LOG_STACKTRACE", false),
EnableCaller: getEnvBool("LOG_CALLER", true),
},
// Security
Security: SecurityConfig{
GeoLiteDBPath: getEnvString("SECURITY_GEOLITE_DB_PATH", "./data/GeoLite2-Country.mmdb"),
BannedCountries: strings.Split(getEnvString("SECURITY_BANNED_COUNTRIES", ""), ","),
RateLimitEnabled: getEnvBool("SECURITY_RATE_LIMIT_ENABLED", true),
IPBlockEnabled: getEnvBool("SECURITY_IP_BLOCK_ENABLED", true),
AllowedOrigins: strings.Split(getEnvString("SECURITY_ALLOWED_ORIGINS", ""), ","),
TrustedProxies: strings.Split(getEnvString("SECURITY_TRUSTED_PROXIES", ""), ","),
IPAnonymizationEnabled: getEnvBool("SECURITY_IP_ANONYMIZATION_ENABLED", true),
IPAnonymizationRetentionDays: getEnvInt("SECURITY_IP_ANONYMIZATION_RETENTION_DAYS", 90),
IPAnonymizationSchedule: getEnvString("SECURITY_IP_ANONYMIZATION_SCHEDULE", "0 2 * * *"), // Daily at 2 AM
},
// Leader Election
LeaderElection: LeaderElectionConfig{
Enabled: getEnvBool("LEADER_ELECTION_ENABLED", true),
LockTTL: getEnvDuration("LEADER_ELECTION_LOCK_TTL", 10*time.Second),
HeartbeatInterval: getEnvDuration("LEADER_ELECTION_HEARTBEAT_INTERVAL", 3*time.Second),
RetryInterval: getEnvDuration("LEADER_ELECTION_RETRY_INTERVAL", 2*time.Second),
InstanceID: getEnvString("LEADER_ELECTION_INSTANCE_ID", ""),
Hostname: getEnvString("LEADER_ELECTION_HOSTNAME", ""),
},
// Invite Email
InviteEmail: InviteEmailConfig{
MaxEmailsPerDay: getEnvInt("MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY", 3),
},
// Login Rate Limiting
LoginRateLimit: LoginRateLimitConfig{
MaxAttemptsPerIP: getEnvInt("LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP", 50),
IPWindow: getEnvDuration("LOGIN_RATE_LIMIT_IP_WINDOW", 15*time.Minute),
MaxFailedAttemptsPerAccount: getEnvInt("LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT", 10),
AccountLockoutDuration: getEnvDuration("LOGIN_RATE_LIMIT_LOCKOUT_DURATION", 30*time.Minute),
},
}
return cfg, nil
}
// Helper functions
func getEnvString(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
func getEnvInt(key string, defaultValue int) int {
if value := os.Getenv(key); value != "" {
if intValue, err := strconv.Atoi(value); err == nil {
return intValue
}
}
return defaultValue
}
func getEnvBool(key string, defaultValue bool) bool {
if value := os.Getenv(key); value != "" {
if boolValue, err := strconv.ParseBool(value); err == nil {
return boolValue
}
}
return defaultValue
}
func getEnvDuration(key string, defaultValue time.Duration) time.Duration {
if value := os.Getenv(key); value != "" {
if duration, err := time.ParseDuration(value); err == nil {
return duration
}
}
return defaultValue
}
func (c *Config) Validate() error {
// For backward compatibility, call ValidateProduction for production environments
if c.App.Environment == "production" {
return c.ValidateProduction()
}
return nil
}
// ValidateProduction performs comprehensive validation of all critical configuration
// parameters for production environments to prevent security misconfigurations.
// CWE-798: Use of Hard-coded Credentials
// OWASP A05:2021: Security Misconfiguration
func (c *Config) ValidateProduction() error {
var errors []string
// JWT Secret Validation
if c.JWT.Secret == "" {
errors = append(errors, "JWT_SECRET is required in production")
} else if len(c.JWT.Secret) < 32 {
errors = append(errors, "JWT_SECRET must be at least 32 characters for production security")
}
// Database Credentials Validation
if len(c.Database.Hosts) == 0 {
errors = append(errors, "DATABASE_HOSTS is required in production")
}
if c.Database.Keyspace == "" {
errors = append(errors, "DATABASE_KEYSPACE is required in production")
}
// Password is optional for some Cassandra setups, but username requires password
if c.Database.Username != "" && c.Database.Password == "" {
errors = append(errors, "DATABASE_PASSWORD is required when DATABASE_USERNAME is set")
}
// S3/Object Storage Credentials Validation
if c.S3.AccessKey == "" {
errors = append(errors, "S3_ACCESS_KEY is required in production")
}
if c.S3.SecretKey == "" {
errors = append(errors, "S3_SECRET_KEY is required in production")
}
if c.S3.BucketName == "" {
errors = append(errors, "S3_BUCKET is required in production")
}
if c.S3.Endpoint == "" {
errors = append(errors, "S3_ENDPOINT is required in production")
}
// Mailgun/Email Service Validation
if c.Mailgun.APIKey == "" {
errors = append(errors, "MAILGUN_API_KEY is required in production (email service needed)")
}
if c.Mailgun.Domain == "" {
errors = append(errors, "MAILGUN_DOMAIN is required in production")
}
if c.Mailgun.SenderEmail == "" {
errors = append(errors, "MAILGUN_FROM_EMAIL is required in production")
}
// Redis/Cache Configuration Validation
if c.Cache.Host == "" {
errors = append(errors, "CACHE_HOST is required in production")
}
// Note: Cache password is optional for some Redis setups
// Security Configuration Validation
if c.App.Environment != "production" {
errors = append(errors, "APP_ENVIRONMENT must be set to 'production' for production deployments")
}
// CORS Security - Warn if allowing all origins in production
for _, origin := range c.Security.AllowedOrigins {
if origin == "*" {
errors = append(errors, "SECURITY_ALLOWED_ORIGINS='*' is not recommended in production (security risk)")
}
}
// SSL/TLS Validation
if c.S3.UseSSL == false {
// This is a warning, not a hard error, as some internal networks don't use SSL
// errors = append(errors, "S3_USE_SSL should be 'true' in production for security")
}
// Return all validation errors
if len(errors) > 0 {
return fmt.Errorf("production configuration validation failed:\n - %s", strings.Join(errors, "\n - "))
}
return nil
}
// ValidateDevelopment validates configuration for development environments
// This is less strict but still checks for basic configuration issues
func (c *Config) ValidateDevelopment() error {
var errors []string
// Basic validations that apply to all environments
if c.JWT.Secret == "" {
errors = append(errors, "JWT_SECRET is required")
}
if c.Database.Keyspace == "" {
errors = append(errors, "DATABASE_KEYSPACE is required")
}
if c.S3.BucketName == "" {
errors = append(errors, "S3_BUCKET is required")
}
if len(errors) > 0 {
return fmt.Errorf("development configuration validation failed:\n - %s", strings.Join(errors, "\n - "))
}
return nil
}

View file

@ -0,0 +1,403 @@
package config
import (
"strings"
"testing"
)
// TestValidateProduction_AllValid tests that a fully configured production setup passes validation
func TestValidateProduction_AllValid(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "this-is-a-very-secure-secret-key-with-more-than-32-characters",
},
Database: DatabaseConfig{
Hosts: []string{"cassandra1.prod.example.com:9042"},
Keyspace: "maplefile_prod",
Username: "admin",
Password: "secure_password_123",
},
S3: S3Config{
AccessKey: "AKIAIOSFODNN7EXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
BucketName: "maplefile-production",
Endpoint: "https://s3.amazonaws.com",
},
Mailgun: MailgunConfig{
APIKey: "key-1234567890abcdef1234567890abcdef",
Domain: "mg.example.com",
SenderEmail: "noreply@example.com",
},
Cache: CacheConfig{
Host: "redis.prod.example.com",
},
Security: SecurityConfig{
AllowedOrigins: []string{"https://app.example.com"},
},
}
err := cfg.ValidateProduction()
if err != nil {
t.Errorf("Expected valid production config to pass validation, got error: %v", err)
}
}
// TestValidateProduction_MissingJWTSecret tests JWT secret validation
func TestValidateProduction_MissingJWTSecret(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "", // Missing
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "test",
},
S3: S3Config{
AccessKey: "test",
SecretKey: "test",
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "test",
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Error("Expected error for missing JWT_SECRET in production")
}
if !strings.Contains(err.Error(), "JWT_SECRET is required") {
t.Errorf("Expected JWT_SECRET error, got: %v", err)
}
}
// TestValidateProduction_ShortJWTSecret tests JWT secret length validation
func TestValidateProduction_ShortJWTSecret(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "short", // Too short (less than 32 chars)
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "test",
},
S3: S3Config{
AccessKey: "test",
SecretKey: "test",
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "test",
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Error("Expected error for short JWT_SECRET in production")
}
if !strings.Contains(err.Error(), "at least 32 characters") {
t.Errorf("Expected JWT_SECRET length error, got: %v", err)
}
}
// TestValidateProduction_MissingS3Credentials tests S3 credential validation
func TestValidateProduction_MissingS3Credentials(t *testing.T) {
tests := []struct {
name string
accessKey string
secretKey string
wantError string
}{
{
name: "missing access key",
accessKey: "",
secretKey: "valid-secret",
wantError: "S3_ACCESS_KEY is required",
},
{
name: "missing secret key",
accessKey: "valid-access",
secretKey: "",
wantError: "S3_SECRET_KEY is required",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "this-is-a-very-secure-secret-key-with-more-than-32-characters",
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "test",
},
S3: S3Config{
AccessKey: tt.accessKey,
SecretKey: tt.secretKey,
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "test",
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Errorf("Expected error for %s in production", tt.name)
}
if !strings.Contains(err.Error(), tt.wantError) {
t.Errorf("Expected error containing '%s', got: %v", tt.wantError, err)
}
})
}
}
// TestValidateProduction_MissingMailgunCredentials tests email service validation
func TestValidateProduction_MissingMailgunCredentials(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "this-is-a-very-secure-secret-key-with-more-than-32-characters",
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "test",
},
S3: S3Config{
AccessKey: "test",
SecretKey: "test",
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "", // Missing
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Error("Expected error for missing MAILGUN_API_KEY in production")
}
if !strings.Contains(err.Error(), "MAILGUN_API_KEY is required") {
t.Errorf("Expected MAILGUN_API_KEY error, got: %v", err)
}
}
// TestValidateProduction_MissingDatabaseConfig tests database configuration validation
func TestValidateProduction_MissingDatabaseConfig(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "this-is-a-very-secure-secret-key-with-more-than-32-characters",
},
Database: DatabaseConfig{
Hosts: []string{}, // Missing
Keyspace: "", // Missing
},
S3: S3Config{
AccessKey: "test",
SecretKey: "test",
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "test",
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Error("Expected error for missing database configuration in production")
}
if !strings.Contains(err.Error(), "DATABASE_HOSTS is required") {
t.Errorf("Expected DATABASE_HOSTS error, got: %v", err)
}
}
// TestValidateProduction_UnsafeOrigins tests CORS wildcard detection
func TestValidateProduction_UnsafeOrigins(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "this-is-a-very-secure-secret-key-with-more-than-32-characters",
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "test",
},
S3: S3Config{
AccessKey: "test",
SecretKey: "test",
BucketName: "test",
Endpoint: "http://localhost:9000",
},
Mailgun: MailgunConfig{
APIKey: "test",
Domain: "test.com",
SenderEmail: "test@test.com",
},
Cache: CacheConfig{
Host: "localhost",
},
Security: SecurityConfig{
AllowedOrigins: []string{"*"}, // Unsafe wildcard
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Error("Expected error for wildcard CORS origin in production")
}
if !strings.Contains(err.Error(), "SECURITY_ALLOWED_ORIGINS='*'") {
t.Errorf("Expected CORS wildcard warning, got: %v", err)
}
}
// TestValidateProduction_MultipleErrors tests that all validation errors are collected
func TestValidateProduction_MultipleErrors(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "", // Missing
},
Database: DatabaseConfig{
Hosts: []string{}, // Missing
Keyspace: "", // Missing
},
S3: S3Config{
AccessKey: "", // Missing
SecretKey: "", // Missing
BucketName: "",
Endpoint: "",
},
Mailgun: MailgunConfig{
APIKey: "", // Missing
Domain: "",
SenderEmail: "",
},
Cache: CacheConfig{
Host: "",
},
}
err := cfg.ValidateProduction()
if err == nil {
t.Fatal("Expected multiple validation errors")
}
errorMsg := err.Error()
expectedErrors := []string{
"JWT_SECRET is required",
"DATABASE_HOSTS is required",
"DATABASE_KEYSPACE is required",
"S3_ACCESS_KEY is required",
"S3_SECRET_KEY is required",
"S3_BUCKET is required",
"S3_ENDPOINT is required",
"MAILGUN_API_KEY is required",
"MAILGUN_DOMAIN is required",
"CACHE_HOST is required",
}
for _, expected := range expectedErrors {
if !strings.Contains(errorMsg, expected) {
t.Errorf("Expected error message to contain '%s', got: %v", expected, errorMsg)
}
}
}
// TestValidate_Development tests that development environments use basic validation
func TestValidate_Development(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "development",
},
JWT: JWTConfig{
Secret: "dev-secret", // Short secret OK in development
},
Database: DatabaseConfig{
Hosts: []string{"localhost:9042"},
Keyspace: "maplefile_dev",
},
S3: S3Config{
AccessKey: "", // OK in development
SecretKey: "", // OK in development
BucketName: "test",
},
}
// Should not fail with lenient development validation
err := cfg.Validate()
if err != nil {
t.Errorf("Development environment should not require strict validation, got: %v", err)
}
}
// TestValidate_ProductionCallsValidateProduction tests integration
func TestValidate_ProductionCallsValidateProduction(t *testing.T) {
cfg := &Config{
App: AppConfig{
Environment: "production",
},
JWT: JWTConfig{
Secret: "", // This should trigger production validation
},
}
err := cfg.Validate()
if err == nil {
t.Error("Expected production Validate() to call ValidateProduction() and fail")
}
if !strings.Contains(err.Error(), "JWT_SECRET is required") {
t.Errorf("Expected ValidateProduction error, got: %v", err)
}
}

View file

@ -0,0 +1,6 @@
package constants
const (
MonolithModuleMapleFile key = iota + 1 // Start numbering at 1
MonolithModulePaperCloud
)

View file

@ -0,0 +1,23 @@
package constants
type key int
const (
SessionIsAuthorized key = iota
SessionSkipAuthorization
SessionID
SessionIPAddress
SessionProxies
SessionUser
SessionUserCompanyName
SessionUserRole
SessionUserID
SessionUserTimezone
SessionUserName
SessionUserFirstName
SessionUserLastName
SessionUserStoreID
SessionUserStoreName
SessionUserStoreLevel
SessionUserStoreTimezone
)

View file

@ -0,0 +1,64 @@
# ============================================================================
# DEVELOPERS NOTE:
# THE PURPOSE OF THIS DOCKERFILE IS TO BUILD THE MAPLEFILE BACKEND
# EXECUTABLE IN A CONTAINER FOR DEVELOPMENT PURPOSES ON YOUR
# MACHINE. DO NOT RUN THIS IN PRODUCTION ENVIRONMENT.
# ============================================================================
# Start with the official Golang image
FROM golang:1.25.4
# ============================================================================
# SETUP PROJECT DIRECTORY STRUCTURE
# ============================================================================
# Set the working directory first
WORKDIR /go/src/codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend
# ============================================================================
# DEPENDENCY MANAGEMENT (DO THIS FIRST FOR BETTER CACHING)
# ============================================================================
# Copy dependency files first to take advantage of Docker layer caching
COPY go.mod go.sum ./
# Download all dependencies
RUN go mod download
# ============================================================================
# INSTALL DEVELOPMENT TOOLS
# ============================================================================
# Install CompileDaemon for hot reloading
RUN go install github.com/githubnemo/CompileDaemon@latest
# Install curl for healthcheck
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
# ============================================================================
# CREATE SIMPLIFIED BUILD SCRIPT
# ============================================================================
RUN echo '#!/bin/sh\n\
echo "============================================================"\n\
echo "BEGINNING BUILD PROCESS"\n\
echo "============================================================"\n\
\n\
echo "[1/1] Building application..."\n\
go build -o maplefile-backend .\n\
if [ $? -ne 0 ]; then\n\
echo "Build failed!"\n\
exit 1\n\
fi\n\
\n\
echo "Build completed successfully!"\n\
' > /go/bin/build.sh && chmod +x /go/bin/build.sh
# ============================================================================
# COPY SOURCE CODE (AFTER DEPENDENCIES)
# ============================================================================
# Copy all source code
COPY . .
# ============================================================================
# SET UP CONTINUOUS DEVELOPMENT ENVIRONMENT
# ============================================================================
# Use CompileDaemon with simpler configuration
# Automatically builds and starts the daemon with auto-migration
# Exclude the binary to prevent infinite rebuild loops
ENTRYPOINT ["CompileDaemon", "-polling=true", "-log-prefix=false", "-build=/go/bin/build.sh", "-command=./maplefile-backend daemon", "-directory=./", "-exclude-dir=.git", "-exclude=maplefile-backend"]

View file

@ -0,0 +1,120 @@
# Use external network from infrastructure
networks:
maple-dev:
external: true
services:
app:
container_name: maplefile-backend-dev
stdin_open: true
build:
context: .
dockerfile: ./dev.Dockerfile
ports:
- "${SERVER_PORT:-8000}:${SERVER_PORT:-8000}"
env_file:
- .env
environment:
# Application Configuration
APP_ENVIRONMENT: ${APP_ENVIRONMENT:-development}
APP_VERSION: ${APP_VERSION:-0.1.0}
APP_DATA_DIRECTORY: ${APP_DATA_DIRECTORY:-/app/data}
# HTTP Server Configuration
SERVER_HOST: ${SERVER_HOST:-0.0.0.0}
SERVER_PORT: ${SERVER_PORT:-8000}
SERVER_READ_TIMEOUT: ${SERVER_READ_TIMEOUT:-30s}
SERVER_WRITE_TIMEOUT: ${SERVER_WRITE_TIMEOUT:-30s}
SERVER_IDLE_TIMEOUT: ${SERVER_IDLE_TIMEOUT:-60s}
SERVER_SHUTDOWN_TIMEOUT: ${SERVER_SHUTDOWN_TIMEOUT:-10s}
# Cassandra Database Configuration
# Connect to external infrastructure (use all 3 nodes in cluster)
DATABASE_HOSTS: ${DATABASE_HOSTS:-cassandra-1:9042,cassandra-2:9042,cassandra-3:9042}
DATABASE_KEYSPACE: ${DATABASE_KEYSPACE:-maplefile}
DATABASE_CONSISTENCY: ${DATABASE_CONSISTENCY:-QUORUM}
DATABASE_REPLICATION: ${DATABASE_REPLICATION:-3}
DATABASE_MIGRATIONS_PATH: ${DATABASE_MIGRATIONS_PATH:-file://migrations}
DATABASE_CONNECT_TIMEOUT: ${DATABASE_CONNECT_TIMEOUT:-10s}
DATABASE_REQUEST_TIMEOUT: ${DATABASE_REQUEST_TIMEOUT:-5s}
DATABASE_MAX_RETRIES: ${DATABASE_MAX_RETRIES:-3}
DATABASE_RETRY_DELAY: ${DATABASE_RETRY_DELAY:-1s}
# Redis Cache Configuration
# Connect to external infrastructure
CACHE_HOST: ${CACHE_HOST:-redis}
CACHE_PORT: ${CACHE_PORT:-6379}
CACHE_PASSWORD: ${CACHE_PASSWORD:-}
CACHE_DB: ${CACHE_DB:-0}
# S3 Configuration (SeaweedFS - S3-compatible storage)
# Using nginx-s3-proxy on port 8334 for CORS-enabled access
S3_ENDPOINT: ${S3_ENDPOINT:-http://nginx-s3-proxy:8334}
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-any}
S3_SECRET_KEY: ${S3_SECRET_KEY:-any}
S3_BUCKET: ${S3_BUCKET:-maplefile}
S3_REGION: ${S3_REGION:-us-east-1}
S3_USE_SSL: ${S3_USE_SSL:-false}
S3_USE_PATH_STYLE: ${S3_USE_PATH_STYLE:-true}
# JWT Authentication
JWT_SECRET: ${JWT_SECRET:-change-me-in-production}
JWT_ACCESS_TOKEN_DURATION: ${JWT_ACCESS_TOKEN_DURATION:-15m}
JWT_REFRESH_TOKEN_DURATION: ${JWT_REFRESH_TOKEN_DURATION:-168h}
JWT_SESSION_DURATION: ${JWT_SESSION_DURATION:-24h}
JWT_SESSION_CLEANUP_INTERVAL: ${JWT_SESSION_CLEANUP_INTERVAL:-1h}
# Email (Mailgun)
MAILGUN_API_KEY: ${MAILGUN_API_KEY:-}
MAILGUN_DOMAIN: ${MAILGUN_DOMAIN:-}
MAILGUN_API_BASE: ${MAILGUN_API_BASE:-https://api.mailgun.net/v3}
MAILGUN_FROM_EMAIL: ${MAILGUN_FROM_EMAIL:-noreply@maplefile.app}
MAILGUN_FROM_NAME: ${MAILGUN_FROM_NAME:-MapleFile}
MAILGUN_FRONTEND_URL: ${MAILGUN_FRONTEND_URL:-http://localhost:3000}
# Invite Email Configuration
MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY: ${MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY:-3}
# Login Rate Limiting
LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP: ${LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP:-50}
LOGIN_RATE_LIMIT_IP_WINDOW: ${LOGIN_RATE_LIMIT_IP_WINDOW:-15m}
LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT: ${LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT:-10}
LOGIN_RATE_LIMIT_LOCKOUT_DURATION: ${LOGIN_RATE_LIMIT_LOCKOUT_DURATION:-30m}
# Observability
OBSERVABILITY_ENABLED: ${OBSERVABILITY_ENABLED:-true}
OBSERVABILITY_PORT: ${OBSERVABILITY_PORT:-9090}
OBSERVABILITY_HEALTH_TIMEOUT: ${OBSERVABILITY_HEALTH_TIMEOUT:-5s}
OBSERVABILITY_METRICS_ENABLED: ${OBSERVABILITY_METRICS_ENABLED:-true}
OBSERVABILITY_HEALTH_ENABLED: ${OBSERVABILITY_HEALTH_ENABLED:-true}
OBSERVABILITY_DETAILED_HEALTH: ${OBSERVABILITY_DETAILED_HEALTH:-false}
# Logging
LOG_LEVEL: ${LOG_LEVEL:-info}
LOG_FORMAT: ${LOG_FORMAT:-json}
LOG_STACKTRACE: ${LOG_STACKTRACE:-false}
LOG_CALLER: ${LOG_CALLER:-true}
# Security
SECURITY_GEOLITE_DB_PATH: ${SECURITY_GEOLITE_DB_PATH:-./data/GeoLite2-Country.mmdb}
SECURITY_BANNED_COUNTRIES: ${SECURITY_BANNED_COUNTRIES:-}
SECURITY_RATE_LIMIT_ENABLED: ${SECURITY_RATE_LIMIT_ENABLED:-true}
SECURITY_IP_BLOCK_ENABLED: ${SECURITY_IP_BLOCK_ENABLED:-true}
# Leader Election
LEADER_ELECTION_ENABLED: ${LEADER_ELECTION_ENABLED:-true}
LEADER_ELECTION_LOCK_TTL: ${LEADER_ELECTION_LOCK_TTL:-10s}
LEADER_ELECTION_HEARTBEAT_INTERVAL: ${LEADER_ELECTION_HEARTBEAT_INTERVAL:-3s}
LEADER_ELECTION_RETRY_INTERVAL: ${LEADER_ELECTION_RETRY_INTERVAL:-2s}
volumes:
- ./:/go/src/codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend
networks:
- maple-dev
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:${SERVER_PORT:-8000}/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s

View file

@ -0,0 +1,212 @@
# Docker Compose for MapleFile Backend - Production
version: '3.8'
services:
# MapleFile Backend Application
backend:
build:
context: .
dockerfile: Dockerfile
container_name: maplefile-backend
restart: unless-stopped
ports:
- "${SERVER_PORT:-8000}:8000"
environment:
# Application
- APP_ENVIRONMENT=${APP_ENVIRONMENT:-production}
- APP_VERSION=${APP_VERSION:-0.1.0}
- APP_DATA_DIRECTORY=/app/data
# Server
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8000
- SERVER_READ_TIMEOUT=${SERVER_READ_TIMEOUT:-30s}
- SERVER_WRITE_TIMEOUT=${SERVER_WRITE_TIMEOUT:-30s}
- SERVER_IDLE_TIMEOUT=${SERVER_IDLE_TIMEOUT:-60s}
- SERVER_SHUTDOWN_TIMEOUT=${SERVER_SHUTDOWN_TIMEOUT:-10s}
# Database (Cassandra)
- DATABASE_HOSTS=cassandra:9042
- DATABASE_KEYSPACE=${DATABASE_KEYSPACE:-maplefile}
- DATABASE_CONSISTENCY=${DATABASE_CONSISTENCY:-QUORUM}
- DATABASE_USERNAME=${DATABASE_USERNAME:-}
- DATABASE_PASSWORD=${DATABASE_PASSWORD:-}
- DATABASE_MIGRATIONS_PATH=./migrations
# Cache (Redis)
- CACHE_HOST=redis
- CACHE_PORT=6379
- CACHE_PASSWORD=${CACHE_PASSWORD:-}
- CACHE_DB=${CACHE_DB:-0}
# S3 Storage
- S3_ENDPOINT=${S3_ENDPOINT:-http://minio:9000}
- S3_ACCESS_KEY=${S3_ACCESS_KEY:-minioadmin}
- S3_SECRET_KEY=${S3_SECRET_KEY:-minioadmin}
- S3_BUCKET=${S3_BUCKET:-maplefile}
- S3_REGION=${S3_REGION:-us-east-1}
- S3_USE_SSL=${S3_USE_SSL:-false}
# JWT
- JWT_SECRET=${JWT_SECRET:-change-me-in-production}
- JWT_ACCESS_TOKEN_DURATION=${JWT_ACCESS_TOKEN_DURATION:-15m}
- JWT_REFRESH_TOKEN_DURATION=${JWT_REFRESH_TOKEN_DURATION:-168h}
- JWT_SESSION_DURATION=${JWT_SESSION_DURATION:-24h}
# Email (Mailgun)
- MAILGUN_API_KEY=${MAILGUN_API_KEY}
- MAILGUN_DOMAIN=${MAILGUN_DOMAIN}
- MAILGUN_API_BASE=${MAILGUN_API_BASE:-https://api.mailgun.net/v3}
- MAILGUN_FROM_EMAIL=${MAILGUN_FROM_EMAIL:-noreply@maplefile.app}
- MAILGUN_FROM_NAME=${MAILGUN_FROM_NAME:-MapleFile}
- MAILGUN_FRONTEND_URL=${MAILGUN_FRONTEND_URL}
# Invite Email Configuration
- MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY=${MAPLEFILE_INVITE_MAX_EMAILS_PER_DAY:-3}
# Login Rate Limiting (production defaults - more restrictive)
- LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP=${LOGIN_RATE_LIMIT_MAX_ATTEMPTS_PER_IP:-50}
- LOGIN_RATE_LIMIT_IP_WINDOW=${LOGIN_RATE_LIMIT_IP_WINDOW:-15m}
- LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT=${LOGIN_RATE_LIMIT_MAX_FAILED_PER_ACCOUNT:-10}
- LOGIN_RATE_LIMIT_LOCKOUT_DURATION=${LOGIN_RATE_LIMIT_LOCKOUT_DURATION:-30m}
# Logging
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_FORMAT=${LOG_FORMAT:-json}
# Leader Election
- LEADER_ELECTION_ENABLED=${LEADER_ELECTION_ENABLED:-true}
- LEADER_ELECTION_LOCK_TTL=${LEADER_ELECTION_LOCK_TTL:-10s}
- LEADER_ELECTION_HEARTBEAT_INTERVAL=${LEADER_ELECTION_HEARTBEAT_INTERVAL:-3s}
- LEADER_ELECTION_RETRY_INTERVAL=${LEADER_ELECTION_RETRY_INTERVAL:-2s}
volumes:
- backend_data:/app/data
depends_on:
cassandra:
condition: service_healthy
redis:
condition: service_healthy
minio:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- maplefile-net
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Cassandra Database
cassandra:
image: cassandra:4.1
container_name: maplefile-cassandra
restart: unless-stopped
environment:
- CASSANDRA_CLUSTER_NAME=maplefile-cluster
- CASSANDRA_DC=${CASSANDRA_DC:-datacenter1}
- CASSANDRA_RACK=${CASSANDRA_RACK:-rack1}
- CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch
- MAX_HEAP_SIZE=${CASSANDRA_MAX_HEAP_SIZE:-2G}
- HEAP_NEWSIZE=${CASSANDRA_HEAP_NEWSIZE:-512M}
volumes:
- cassandra_data:/var/lib/cassandra
healthcheck:
test: ["CMD-SHELL", "cqlsh -e 'describe cluster' || exit 1"]
interval: 30s
timeout: 10s
retries: 10
start_period: 90s
networks:
- maplefile-net
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Redis Cache
redis:
image: redis:7-alpine
container_name: maplefile-redis
restart: unless-stopped
command: >
redis-server
--appendonly yes
--maxmemory ${REDIS_MAX_MEMORY:-512mb}
--maxmemory-policy allkeys-lru
--requirepass ${CACHE_PASSWORD:-}
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- maplefile-net
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# MinIO S3-compatible storage
minio:
image: minio/minio:latest
container_name: maplefile-minio
restart: unless-stopped
environment:
- MINIO_ROOT_USER=${S3_ACCESS_KEY:-minioadmin}
- MINIO_ROOT_PASSWORD=${S3_SECRET_KEY:-minioadmin}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 15s
timeout: 10s
retries: 5
start_period: 20s
networks:
- maplefile-net
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# MinIO Initialization
minio-init:
image: minio/mc:latest
container_name: maplefile-minio-init
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set myminio http://minio:9000 ${S3_ACCESS_KEY:-minioadmin} ${S3_SECRET_KEY:-minioadmin};
mc mb myminio/${S3_BUCKET:-maplefile} --ignore-existing;
echo 'MinIO initialization complete';
"
networks:
- maplefile-net
volumes:
backend_data:
driver: local
cassandra_data:
driver: local
redis_data:
driver: local
minio_data:
driver: local
networks:
maplefile-net:
driver: bridge

View file

@ -0,0 +1,5 @@
module codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend
go 1.25.4
require go.uber.org/mock v0.6.0 // indirect

View file

@ -0,0 +1,2 @@
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=

View file

@ -0,0 +1,17 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/blockedemail/entity.go
package blockedemail
import (
"time"
"github.com/gocql/gocql"
)
// BlockedEmail represents a blocked email entry for a user
type BlockedEmail struct {
UserID gocql.UUID `json:"user_id"`
BlockedEmail string `json:"blocked_email"`
BlockedUserID gocql.UUID `json:"blocked_user_id"`
Reason string `json:"reason"`
CreatedAt time.Time `json:"created_at"`
}

View file

@ -0,0 +1,29 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/blockedemail/interface.go
package blockedemail
import (
"context"
"github.com/gocql/gocql"
)
// BlockedEmailRepository defines the interface for blocked email data access
type BlockedEmailRepository interface {
// Create adds a new blocked email entry
Create(ctx context.Context, blockedEmail *BlockedEmail) error
// Get retrieves a specific blocked email entry
Get(ctx context.Context, userID gocql.UUID, blockedEmail string) (*BlockedEmail, error)
// List retrieves all blocked emails for a user
List(ctx context.Context, userID gocql.UUID) ([]*BlockedEmail, error)
// Delete removes a blocked email entry
Delete(ctx context.Context, userID gocql.UUID, blockedEmail string) error
// IsBlocked checks if an email is blocked by a user
IsBlocked(ctx context.Context, userID gocql.UUID, email string) (bool, error)
// Count returns the number of blocked emails for a user
Count(ctx context.Context, userID gocql.UUID) (int, error)
}

View file

@ -0,0 +1,24 @@
// monorepo/cloud/backend/internal/maplefile/domain/collection/constants.go
package collection
const (
CollectionTypeFolder = "folder"
CollectionTypeAlbum = "album"
)
const ( // Permission levels
CollectionPermissionReadOnly = "read_only"
CollectionPermissionReadWrite = "read_write"
CollectionPermissionAdmin = "admin"
)
const (
CollectionStateActive = "active"
CollectionStateDeleted = "deleted"
CollectionStateArchived = "archived"
)
const (
CollectionAccessTypeOwner = "owner"
CollectionAccessTypeMember = "member"
)

View file

@ -0,0 +1,43 @@
// monorepo/cloud/backend/internal/maplefile/domain/collection/filter.go
package collection
import "github.com/gocql/gocql"
// CollectionFilterOptions defines the filtering options for retrieving collections
type CollectionFilterOptions struct {
// IncludeOwned includes collections where the user is the owner
IncludeOwned bool `json:"include_owned"`
// IncludeShared includes collections where the user is a member (shared with them)
IncludeShared bool `json:"include_shared"`
// UserID is the user for whom we're filtering collections
UserID gocql.UUID `json:"user_id"`
}
// CollectionFilterResult represents the result of a filtered collection query
type CollectionFilterResult struct {
// OwnedCollections are collections where the user is the owner
OwnedCollections []*Collection `json:"owned_collections"`
// SharedCollections are collections shared with the user
SharedCollections []*Collection `json:"shared_collections"`
// TotalCount is the total number of collections returned
TotalCount int `json:"total_count"`
}
// GetAllCollections returns all collections (owned + shared) in a single slice
func (r *CollectionFilterResult) GetAllCollections() []*Collection {
allCollections := make([]*Collection, 0, len(r.OwnedCollections)+len(r.SharedCollections))
allCollections = append(allCollections, r.OwnedCollections...)
allCollections = append(allCollections, r.SharedCollections...)
return allCollections
}
// IsValid checks if the filter options are valid
func (options *CollectionFilterOptions) IsValid() bool {
// At least one filter option must be enabled
return options.IncludeOwned || options.IncludeShared
}
// ShouldIncludeAll returns true if both owned and shared collections should be included
func (options *CollectionFilterOptions) ShouldIncludeAll() bool {
return options.IncludeOwned && options.IncludeShared
}

View file

@ -0,0 +1,89 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/collection/interface.go
package collection
import (
"context"
"time"
"github.com/gocql/gocql"
)
// CollectionRepository defines the interface for collection persistence operations
type CollectionRepository interface {
// Collection CRUD operations
Create(ctx context.Context, collection *Collection) error
Get(ctx context.Context, id gocql.UUID) (*Collection, error)
Update(ctx context.Context, collection *Collection) error
SoftDelete(ctx context.Context, id gocql.UUID) error // Now soft delete
HardDelete(ctx context.Context, id gocql.UUID) error
// State management operations
Archive(ctx context.Context, id gocql.UUID) error
Restore(ctx context.Context, id gocql.UUID) error
// Hierarchical queries (now state-aware)
FindByParent(ctx context.Context, parentID gocql.UUID) ([]*Collection, error)
FindRootCollections(ctx context.Context, ownerID gocql.UUID) ([]*Collection, error)
FindDescendants(ctx context.Context, collectionID gocql.UUID) ([]*Collection, error)
// GetFullHierarchy(ctx context.Context, rootID gocql.UUID) (*Collection, error) // DEPRECATED AND WILL BE REMOVED
// Move collection to a new parent
MoveCollection(ctx context.Context, collectionID, newParentID gocql.UUID, updatedAncestors []gocql.UUID, updatedPathSegments []string) error
// Collection ownership and access queries (now state-aware)
CheckIfExistsByID(ctx context.Context, id gocql.UUID) (bool, error)
GetAllByUserID(ctx context.Context, ownerID gocql.UUID) ([]*Collection, error)
GetCollectionsSharedWithUser(ctx context.Context, userID gocql.UUID) ([]*Collection, error)
IsCollectionOwner(ctx context.Context, collectionID, userID gocql.UUID) (bool, error)
CheckAccess(ctx context.Context, collectionID, userID gocql.UUID, requiredPermission string) (bool, error)
GetUserPermissionLevel(ctx context.Context, collectionID, userID gocql.UUID) (string, error)
// Filtered collection queries (now state-aware)
GetCollectionsWithFilter(ctx context.Context, options CollectionFilterOptions) (*CollectionFilterResult, error)
// Collection membership operations
AddMember(ctx context.Context, collectionID gocql.UUID, membership *CollectionMembership) error
RemoveMember(ctx context.Context, collectionID, recipientID gocql.UUID) error
RemoveUserFromAllCollections(ctx context.Context, userID gocql.UUID, userEmail string) ([]gocql.UUID, error)
UpdateMemberPermission(ctx context.Context, collectionID, recipientID gocql.UUID, newPermission string) error
GetCollectionMembership(ctx context.Context, collectionID, recipientID gocql.UUID) (*CollectionMembership, error)
// Hierarchical sharing
AddMemberToHierarchy(ctx context.Context, rootID gocql.UUID, membership *CollectionMembership) error
RemoveMemberFromHierarchy(ctx context.Context, rootID, recipientID gocql.UUID) error
// GetCollectionSyncData retrieves collection sync data with pagination for the specified user
GetCollectionSyncData(ctx context.Context, userID gocql.UUID, cursor *CollectionSyncCursor, limit int64) (*CollectionSyncResponse, error)
GetCollectionSyncDataByAccessType(ctx context.Context, userID gocql.UUID, cursor *CollectionSyncCursor, limit int64, accessType string) (*CollectionSyncResponse, error)
// Count operations for all collection types (folders + albums)
CountOwnedCollections(ctx context.Context, userID gocql.UUID) (int, error)
CountSharedCollections(ctx context.Context, userID gocql.UUID) (int, error)
CountOwnedFolders(ctx context.Context, userID gocql.UUID) (int, error)
CountSharedFolders(ctx context.Context, userID gocql.UUID) (int, error)
CountTotalUniqueFolders(ctx context.Context, userID gocql.UUID) (int, error)
// IP Anonymization for GDPR compliance
AnonymizeOldIPs(ctx context.Context, cutoffDate time.Time) (int, error)
AnonymizeCollectionIPsByOwner(ctx context.Context, ownerID gocql.UUID) (int, error) // For GDPR right-to-be-forgotten
// File count maintenance operations
IncrementFileCount(ctx context.Context, collectionID gocql.UUID) error
DecrementFileCount(ctx context.Context, collectionID gocql.UUID) error
// RecalculateAllFileCounts recalculates file_count for all collections
// by counting active files. Used for data migration/repair.
RecalculateAllFileCounts(ctx context.Context) (*RecalculateAllFileCountsResult, error)
// Tag-related operations
// ListByTagID retrieves all collections that have the specified tag assigned
// Used for tag update propagation (updating embedded tag data across all collections)
ListByTagID(ctx context.Context, tagID gocql.UUID) ([]*Collection, error)
}
// RecalculateAllFileCountsResult holds the results of the recalculation operation
type RecalculateAllFileCountsResult struct {
TotalCollections int
UpdatedCount int
ErrorCount int
}

View file

@ -0,0 +1,124 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/collection/model.go
package collection
import (
"time"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
"github.com/gocql/gocql"
)
// Collection represents a folder or album.
// Can be used for both root collections and embedded subcollections
type Collection struct {
// Identifiers
// ID is the unique identifier for the collection in the cloud backend.
ID gocql.UUID `bson:"_id" json:"id"`
// OwnerID is the ID of the user who originally created and owns this collection.
// The owner has administrative privileges by default.
OwnerID gocql.UUID `bson:"owner_id" json:"owner_id"`
// Encryption and Content Details
// EncryptedName is the name of the collection, encrypted using the collection's unique key.
// Stored and transferred in encrypted form.
EncryptedName string `bson:"encrypted_name" json:"encrypted_name"`
// CollectionType indicates the nature of the collection, either "folder" or "album".
// Defined by CollectionTypeFolder and CollectionTypeAlbum constants.
CollectionType string `bson:"collection_type" json:"collection_type"` // "folder" or "album"
// EncryptedCollectionKey is the unique symmetric key used to encrypt the collection's data (like name and file metadata).
// This key is encrypted with the owner's master key for storage and transmission,
// allowing the owner's device to decrypt it using their master key.
EncryptedCollectionKey *crypto.EncryptedCollectionKey `bson:"encrypted_collection_key" json:"encrypted_collection_key"`
// EncryptedCustomIcon stores the custom icon for this collection, encrypted with the collection key.
// Empty string means use default folder/album icon.
// Contains either an emoji character (e.g., "📷") or "icon:<identifier>" for predefined icons.
EncryptedCustomIcon string `bson:"encrypted_custom_icon" json:"encrypted_custom_icon"`
// Sharing
// Collection members (users with access)
Members []CollectionMembership `bson:"members" json:"members"`
// Hierarchical structure fields
// ParentID is the ID of the parent collection if this is a subcollection.
// It is omitted (nil) for root collections. Used to reconstruct the hierarchy.
ParentID gocql.UUID `bson:"parent_id,omitempty" json:"parent_id,omitempty"` // Parent collection ID, not stored for root collections
// AncestorIDs is an array containing the IDs of all parent collections up to the root.
// This field is used for efficient querying and traversal of the collection hierarchy without joins.
AncestorIDs []gocql.UUID `bson:"ancestor_ids,omitempty" json:"ancestor_ids,omitempty"` // Array of ancestor IDs for efficient querying
// File count for performance optimization
// FileCount stores the number of active files in this collection.
// This denormalized field eliminates N+1 queries when listing collections.
FileCount int64 `bson:"file_count" json:"file_count"`
// DEPRECATED: Replaced by Tags field below
// TagIDs []gocql.UUID `bson:"tag_ids,omitempty" json:"tag_ids,omitempty"`
// Tags stores full embedded tag data (eliminates frontend API lookups)
// Stored as JSON text in database, marshaled/unmarshaled automatically
Tags []tag.EmbeddedTag `bson:"tags,omitempty" json:"tags,omitempty"`
// Ownership, timestamps and conflict resolution
// CreatedAt is the timestamp when the collection was initially created.
// Recorded on the local device and synced.
CreatedAt time.Time `bson:"created_at" json:"created_at"`
// CreatedByUserID is the ID of the user who created this file.
CreatedByUserID gocql.UUID `bson:"created_by_user_id" json:"created_by_user_id"`
// ModifiedAt is the timestamp of the last modification to the collection's metadata or content.
// Updated on the local device and synced.
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
ModifiedByUserID gocql.UUID `bson:"modified_by_user_id" json:"modified_by_user_id"`
// The current version of the file.
Version uint64 `bson:"version" json:"version"` // Every mutation (create, update, delete, etc) is a versioned operation, keep track of the version number with this variable
// State management
State string `bson:"state" json:"state"` // active, deleted, archived
TombstoneVersion uint64 `bson:"tombstone_version" json:"tombstone_version"` // The `version` number that this collection was deleted at.
TombstoneExpiry time.Time `bson:"tombstone_expiry" json:"tombstone_expiry"`
}
// CollectionMembership represents a user's access to a collection
type CollectionMembership struct {
ID gocql.UUID `bson:"_id" json:"id"`
CollectionID gocql.UUID `bson:"collection_id" json:"collection_id"` // ID of the collection (redundant but helpful for queries)
RecipientID gocql.UUID `bson:"recipient_id" json:"recipient_id"` // User receiving access
RecipientEmail string `bson:"recipient_email" json:"recipient_email"` // Email for display purposes
GrantedByID gocql.UUID `bson:"granted_by_id" json:"granted_by_id"` // User who shared the collection
// Collection key encrypted with recipient's public key using box_seal. This matches the box_seal format which doesn't need a separate nonce.
EncryptedCollectionKey []byte `bson:"encrypted_collection_key" json:"encrypted_collection_key"`
// Access details
PermissionLevel string `bson:"permission_level" json:"permission_level"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
// Sharing origin tracking
IsInherited bool `bson:"is_inherited" json:"is_inherited"` // Tracks whether access was granted directly or inherited from a parent
InheritedFromID gocql.UUID `bson:"inherited_from_id,omitempty" json:"inherited_from_id,omitempty"` // InheritedFromID identifies which parent collection granted this access
}
// CollectionSyncCursor represents cursor-based pagination for sync operations
type CollectionSyncCursor struct {
LastModified time.Time `json:"last_modified" bson:"last_modified"`
LastID gocql.UUID `json:"last_id" bson:"last_id"`
}
// CollectionSyncItem represents minimal collection data for sync operations
type CollectionSyncItem struct {
ID gocql.UUID `json:"id" bson:"_id"`
Version uint64 `json:"version" bson:"version"`
ModifiedAt time.Time `json:"modified_at" bson:"modified_at"`
State string `json:"state" bson:"state"`
ParentID *gocql.UUID `json:"parent_id,omitempty" bson:"parent_id,omitempty"`
TombstoneVersion uint64 `bson:"tombstone_version" json:"tombstone_version"`
TombstoneExpiry time.Time `bson:"tombstone_expiry" json:"tombstone_expiry"`
EncryptedCustomIcon string `json:"encrypted_custom_icon,omitempty" bson:"encrypted_custom_icon,omitempty"`
}
// CollectionSyncResponse represents the response for collection sync data
type CollectionSyncResponse struct {
Collections []CollectionSyncItem `json:"collections"`
NextCursor *CollectionSyncCursor `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more"`
}

View file

@ -0,0 +1,37 @@
// monorepo/cloud/backend/internal/maplefile/domain/collection/state_validator.go
package collection
import "errors"
// StateTransition validates collection state transitions
type StateTransition struct {
From string
To string
}
// IsValidStateTransition checks if a state transition is allowed
func IsValidStateTransition(from, to string) error {
validTransitions := map[StateTransition]bool{
// From active
{CollectionStateActive, CollectionStateDeleted}: true,
{CollectionStateActive, CollectionStateArchived}: true,
// From deleted (cannot be restored nor archived)
{CollectionStateDeleted, CollectionStateActive}: false,
{CollectionStateDeleted, CollectionStateArchived}: false,
// From archived (can only be restored to active)
{CollectionStateArchived, CollectionStateActive}: true,
// Same state transitions (no-op)
{CollectionStateActive, CollectionStateActive}: true,
{CollectionStateDeleted, CollectionStateDeleted}: true,
{CollectionStateArchived, CollectionStateArchived}: true,
}
if !validTransitions[StateTransition{from, to}] {
return errors.New("invalid state transition from " + from + " to " + to)
}
return nil
}

View file

@ -0,0 +1,69 @@
// monorepo/cloud/maplefile-backend/internal/domain/crypto/domain/keys/kdf.go
package crypto
import (
"fmt"
"time"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/crypto"
)
// KDFParams stores the key derivation function parameters
type KDFParams struct {
Algorithm string `json:"algorithm" bson:"algorithm"` // "argon2id", "pbkdf2", "scrypt"
Version string `json:"version" bson:"version"` // "1.0", "1.1", etc.
Iterations uint32 `json:"iterations" bson:"iterations"` // For PBKDF2 or Argon2 time cost
Memory uint32 `json:"memory" bson:"memory"` // For Argon2 memory in KB
Parallelism uint8 `json:"parallelism" bson:"parallelism"` // For Argon2 threads
SaltLength uint32 `json:"salt_length" bson:"salt_length"` // Salt size in bytes
KeyLength uint32 `json:"key_length" bson:"key_length"` // Output key size in bytes
}
// DefaultKDFParams returns the current recommended KDF parameters
func DefaultKDFParams() KDFParams {
return KDFParams{
Algorithm: crypto.Argon2IDAlgorithm,
Version: "1.0", // Always starts at 1.0
Iterations: crypto.Argon2OpsLimit, // Time cost
Memory: crypto.Argon2MemLimit,
Parallelism: crypto.Argon2Parallelism,
SaltLength: crypto.Argon2SaltSize,
KeyLength: crypto.Argon2KeySize,
}
}
// Validate checks if KDF parameters are valid
func (k KDFParams) Validate() error {
switch k.Algorithm {
case crypto.Argon2IDAlgorithm:
if k.Iterations < 1 {
return fmt.Errorf("argon2id time cost must be >= 1")
}
if k.Memory < 1024 {
return fmt.Errorf("argon2id memory must be >= 1024 KB")
}
if k.Parallelism < 1 {
return fmt.Errorf("argon2id parallelism must be >= 1")
}
default:
return fmt.Errorf("unsupported KDF algorithm: %s", k.Algorithm)
}
if k.SaltLength < 8 {
return fmt.Errorf("salt length must be >= 8 bytes")
}
if k.KeyLength < 16 {
return fmt.Errorf("key length must be >= 16 bytes")
}
return nil
}
// KDFUpgradePolicy defines when to upgrade KDF parameters
type KDFUpgradePolicy struct {
MinimumParams KDFParams `json:"minimum_params" bson:"minimum_params"`
RecommendedParams KDFParams `json:"recommended_params" bson:"recommended_params"`
MaxPasswordAge time.Duration `json:"max_password_age" bson:"max_password_age"`
UpgradeOnNextLogin bool `json:"upgrade_on_next_login" bson:"upgrade_on_next_login"`
LastUpgradeCheck time.Time `json:"last_upgrade_check" bson:"last_upgrade_check"`
}

View file

@ -0,0 +1,355 @@
package crypto
import (
"encoding/base64"
"encoding/json"
"fmt"
"time"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/crypto"
)
// tryDecodeBase64 attempts to decode a base64 string using multiple encodings.
// It tries URL-safe without padding first (libsodium's URLSAFE_NO_PADDING),
// then standard base64 with padding, then standard without padding.
func tryDecodeBase64(s string) ([]byte, error) {
var lastErr error
// Try URL-safe base64 without padding (libsodium's URLSAFE_NO_PADDING)
if data, err := base64.RawURLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 with padding (Go's default for []byte)
if data, err := base64.StdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 without padding
if data, err := base64.RawStdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try URL-safe base64 with padding
if data, err := base64.URLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
return nil, fmt.Errorf("failed to decode base64 with any encoding: %w", lastErr)
}
// MasterKey represents the root encryption key for a user
type MasterKey struct {
Key []byte `json:"key" bson:"key"`
}
// EncryptedMasterKey is the master key encrypted with the key encryption key
type EncryptedMasterKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
KeyVersion int `json:"key_version" bson:"key_version"`
RotatedAt *time.Time `json:"rotated_at,omitempty" bson:"rotated_at,omitempty"`
PreviousKeys []EncryptedHistoricalKey `json:"previous_keys,omitempty" bson:"previous_keys,omitempty"`
}
func (emk *EncryptedMasterKey) GetCurrentVersion() int {
return emk.KeyVersion
}
func (emk *EncryptedMasterKey) GetKeyByVersion(version int) *EncryptedHistoricalKey {
if version == emk.KeyVersion {
// Return current key as historical format
return &EncryptedHistoricalKey{
KeyVersion: emk.KeyVersion,
Ciphertext: emk.Ciphertext,
Nonce: emk.Nonce,
Algorithm: crypto.ChaCha20Poly1305Algorithm, // ✅ Updated to ChaCha20-Poly1305
}
}
for _, key := range emk.PreviousKeys {
if key.KeyVersion == version {
return &key
}
}
return nil
}
// KeyEncryptionKey derived from user password
type KeyEncryptionKey struct {
Key []byte `json:"key" bson:"key"`
Salt []byte `json:"salt" bson:"salt"`
}
// PublicKey for asymmetric encryption
type PublicKey struct {
Key []byte `json:"key" bson:"key"`
VerificationID string `json:"verification_id" bson:"verification_id"`
}
// PrivateKey for asymmetric decryption
type PrivateKey struct {
Key []byte `json:"key" bson:"key"`
}
// EncryptedPrivateKey is the private key encrypted with the master key
type EncryptedPrivateKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
}
// RecoveryKey for account recovery
type RecoveryKey struct {
Key []byte `json:"key" bson:"key"`
}
// EncryptedRecoveryKey is the recovery key encrypted with the master key
type EncryptedRecoveryKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
}
// CollectionKey encrypts files in a collection
type CollectionKey struct {
Key []byte `json:"key" bson:"key"`
CollectionID string `json:"collection_id" bson:"collection_id"`
}
// EncryptedCollectionKey is the collection key encrypted with master key
type EncryptedCollectionKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
KeyVersion int `json:"key_version" bson:"key_version"`
RotatedAt *time.Time `json:"rotated_at,omitempty" bson:"rotated_at,omitempty"`
PreviousKeys []EncryptedHistoricalKey `json:"previous_keys,omitempty" bson:"previous_keys,omitempty"`
}
func (eck *EncryptedCollectionKey) NeedsRotation(policy KeyRotationPolicy) bool {
if eck.RotatedAt == nil {
return true // Never rotated
}
keyAge := time.Since(*eck.RotatedAt)
return keyAge > policy.MaxKeyAge
}
// MarshalJSON custom marshaller for EncryptedCollectionKey to serialize bytes as base64 strings.
func (eck *EncryptedCollectionKey) MarshalJSON() ([]byte, error) {
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
alias := Alias{
Ciphertext: base64.StdEncoding.EncodeToString(eck.Ciphertext),
Nonce: base64.StdEncoding.EncodeToString(eck.Nonce),
KeyVersion: eck.KeyVersion,
}
return json.Marshal(alias)
}
// UnmarshalJSON custom unmarshaller for EncryptedCollectionKey to handle URL-safe base64 strings.
func (eck *EncryptedCollectionKey) UnmarshalJSON(data []byte) error {
// Temporary struct to unmarshal into string fields
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
var alias Alias
if err := json.Unmarshal(data, &alias); err != nil {
return fmt.Errorf("failed to unmarshal EncryptedCollectionKey into alias: %w", err)
}
// Set KeyVersion
eck.KeyVersion = alias.KeyVersion
// Decode Ciphertext - try multiple base64 encodings
if alias.Ciphertext != "" {
ciphertextBytes, err := tryDecodeBase64(alias.Ciphertext)
if err != nil {
return fmt.Errorf("failed to decode EncryptedCollectionKey.Ciphertext: %w", err)
}
eck.Ciphertext = ciphertextBytes
}
// Decode Nonce - try multiple base64 encodings
if alias.Nonce != "" {
nonceBytes, err := tryDecodeBase64(alias.Nonce)
if err != nil {
return fmt.Errorf("failed to decode EncryptedCollectionKey.Nonce: %w", err)
}
eck.Nonce = nonceBytes
}
return nil
}
// FileKey encrypts a specific file
type FileKey struct {
Key []byte `json:"key" bson:"key"`
FileID string `json:"file_id" bson:"file_id"`
}
// EncryptedFileKey is the file key encrypted with collection key
type EncryptedFileKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
KeyVersion int `json:"key_version" bson:"key_version"`
RotatedAt *time.Time `json:"rotated_at,omitempty" bson:"rotated_at,omitempty"`
PreviousKeys []EncryptedHistoricalKey `json:"previous_keys,omitempty" bson:"previous_keys,omitempty"`
}
func (eck *EncryptedFileKey) NeedsRotation(policy KeyRotationPolicy) bool {
if eck.RotatedAt == nil {
return true // Never rotated
}
keyAge := time.Since(*eck.RotatedAt)
return keyAge > policy.MaxKeyAge
}
// MarshalJSON custom marshaller for EncryptedFileKey to serialize bytes as base64 strings.
func (efk *EncryptedFileKey) MarshalJSON() ([]byte, error) {
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
alias := Alias{
Ciphertext: base64.StdEncoding.EncodeToString(efk.Ciphertext),
Nonce: base64.StdEncoding.EncodeToString(efk.Nonce),
KeyVersion: efk.KeyVersion,
}
return json.Marshal(alias)
}
// UnmarshalJSON custom unmarshaller for EncryptedFileKey to handle URL-safe base64 strings.
func (efk *EncryptedFileKey) UnmarshalJSON(data []byte) error {
// Temporary struct to unmarshal into string fields
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
var alias Alias
if err := json.Unmarshal(data, &alias); err != nil {
return fmt.Errorf("failed to unmarshal EncryptedFileKey into alias: %w", err)
}
// Set KeyVersion
efk.KeyVersion = alias.KeyVersion
// Decode Ciphertext - try multiple base64 encodings
if alias.Ciphertext != "" {
ciphertextBytes, err := tryDecodeBase64(alias.Ciphertext)
if err != nil {
return fmt.Errorf("failed to decode EncryptedFileKey.Ciphertext: %w", err)
}
efk.Ciphertext = ciphertextBytes
}
// Decode Nonce - try multiple base64 encodings
if alias.Nonce != "" {
nonceBytes, err := tryDecodeBase64(alias.Nonce)
if err != nil {
return fmt.Errorf("failed to decode EncryptedFileKey.Nonce: %w", err)
}
efk.Nonce = nonceBytes
}
return nil
}
// TagKey encrypts tag data (name and color)
type TagKey struct {
Key []byte `json:"key" bson:"key"`
TagID string `json:"tag_id" bson:"tag_id"`
}
// EncryptedTagKey is the tag key encrypted with user's master key
type EncryptedTagKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
KeyVersion int `json:"key_version" bson:"key_version"`
RotatedAt *time.Time `json:"rotated_at,omitempty" bson:"rotated_at,omitempty"`
PreviousKeys []EncryptedHistoricalKey `json:"previous_keys,omitempty" bson:"previous_keys,omitempty"`
}
func (etk *EncryptedTagKey) NeedsRotation(policy KeyRotationPolicy) bool {
if etk.RotatedAt == nil {
return true // Never rotated
}
keyAge := time.Since(*etk.RotatedAt)
return keyAge > policy.MaxKeyAge
}
// MarshalJSON custom marshaller for EncryptedTagKey to serialize bytes as base64 strings.
func (etk *EncryptedTagKey) MarshalJSON() ([]byte, error) {
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
alias := Alias{
Ciphertext: base64.StdEncoding.EncodeToString(etk.Ciphertext),
Nonce: base64.StdEncoding.EncodeToString(etk.Nonce),
KeyVersion: etk.KeyVersion,
}
return json.Marshal(alias)
}
// UnmarshalJSON custom unmarshaller for EncryptedTagKey to handle URL-safe base64 strings.
func (etk *EncryptedTagKey) UnmarshalJSON(data []byte) error {
// Temporary struct to unmarshal into string fields
type Alias struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
KeyVersion int `json:"key_version"`
}
var alias Alias
if err := json.Unmarshal(data, &alias); err != nil {
return fmt.Errorf("failed to unmarshal EncryptedTagKey into alias: %w", err)
}
// Set KeyVersion
etk.KeyVersion = alias.KeyVersion
// Decode Ciphertext - try multiple base64 encodings
if alias.Ciphertext != "" {
ciphertextBytes, err := tryDecodeBase64(alias.Ciphertext)
if err != nil {
return fmt.Errorf("failed to decode EncryptedTagKey.Ciphertext: %w", err)
}
etk.Ciphertext = ciphertextBytes
}
// Decode Nonce - try multiple base64 encodings
if alias.Nonce != "" {
nonceBytes, err := tryDecodeBase64(alias.Nonce)
if err != nil {
return fmt.Errorf("failed to decode EncryptedTagKey.Nonce: %w", err)
}
etk.Nonce = nonceBytes
}
return nil
}
// MasterKeyEncryptedWithRecoveryKey allows account recovery
type MasterKeyEncryptedWithRecoveryKey struct {
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
}

View file

@ -0,0 +1,39 @@
// monorepo/cloud/maplefile-backend/internal/domain/crypto/domain/keys/rotation.go
package crypto
import (
"time"
"github.com/gocql/gocql"
)
// EncryptedHistoricalKey represents a previous version of a key
type EncryptedHistoricalKey struct {
KeyVersion int `json:"key_version" bson:"key_version"`
Ciphertext []byte `json:"ciphertext" bson:"ciphertext"`
Nonce []byte `json:"nonce" bson:"nonce"`
RotatedAt time.Time `json:"rotated_at" bson:"rotated_at"`
RotatedReason string `json:"rotated_reason" bson:"rotated_reason"`
// Algorithm used for this key version
Algorithm string `json:"algorithm" bson:"algorithm"`
}
// KeyRotationPolicy defines when and how to rotate keys
type KeyRotationPolicy struct {
MaxKeyAge time.Duration `json:"max_key_age" bson:"max_key_age"`
MaxKeyUsageCount int64 `json:"max_key_usage_count" bson:"max_key_usage_count"`
ForceRotateOnBreach bool `json:"force_rotate_on_breach" bson:"force_rotate_on_breach"`
}
// KeyRotationRecord tracks rotation events
type KeyRotationRecord struct {
ID gocql.UUID `bson:"_id" json:"id"`
EntityType string `bson:"entity_type" json:"entity_type"` // "user", "collection", "file"
EntityID gocql.UUID `bson:"entity_id" json:"entity_id"`
FromVersion int `bson:"from_version" json:"from_version"`
ToVersion int `bson:"to_version" json:"to_version"`
RotatedAt time.Time `bson:"rotated_at" json:"rotated_at"`
RotatedBy gocql.UUID `bson:"rotated_by" json:"rotated_by"`
Reason string `bson:"reason" json:"reason"`
AffectedItems int64 `bson:"affected_items" json:"affected_items"`
}

View file

@ -0,0 +1,54 @@
// cloud/maplefile-backend/internal/maplefile/domain/dashboard/model.go
package dashboard
import (
"time"
)
// Dashboard represents the main dashboard data structure
type Dashboard struct {
Dashboard DashboardData `json:"dashboard"`
}
// DashboardData contains all the dashboard information
type DashboardData struct {
Summary Summary `json:"summary"`
StorageUsageTrend StorageUsageTrend `json:"storageUsageTrend"`
RecentFiles []RecentFile `json:"recentFiles"`
}
// Summary contains the main dashboard statistics
type Summary struct {
TotalFiles int `json:"totalFiles"`
TotalFolders int `json:"totalFolders"`
StorageUsed StorageAmount `json:"storageUsed"`
StorageLimit StorageAmount `json:"storageLimit"`
StorageUsagePercentage int `json:"storageUsagePercentage"`
}
// StorageAmount represents a storage value with its unit
type StorageAmount struct {
Value float64 `json:"value"`
Unit string `json:"unit"`
}
// StorageUsageTrend contains the trend chart data
type StorageUsageTrend struct {
Period string `json:"period"`
DataPoints []DataPoint `json:"dataPoints"`
}
// DataPoint represents a single point in the storage usage trend
type DataPoint struct {
Date string `json:"date"`
Usage StorageAmount `json:"usage"`
}
// RecentFile represents a file in the recent files list
type RecentFile struct {
FileName string `json:"fileName"`
Uploaded string `json:"uploaded"`
UploadedTimestamp time.Time `json:"uploadedTimestamp"`
Type string `json:"type"`
Size StorageAmount `json:"size"`
}

View file

@ -0,0 +1,13 @@
// monorepo/cloud/backend/internal/maplefile/domain/file/constants.go
package file
const (
// FileStatePending is the initial state of a file before it is uploaded.
FileStatePending = "pending"
// FileStateActive indicates that the file is fully uploaded and ready for use.
FileStateActive = "active"
// FileStateDeleted marks the file as deleted, but still accessible for a period but will eventually be permanently removed.
FileStateDeleted = "deleted"
// FileStateArchived indicates that the file is no longer accessible.
FileStateArchived = "archived"
)

View file

@ -0,0 +1,95 @@
// monorepo/cloud/backend/internal/maplefile/domain/file/interface.go
package file
import (
"context"
"time"
"github.com/gocql/gocql"
)
// FileMetadataRepository defines the interface for interacting with file metadata storage.
// It handles operations related to storing, retrieving, updating, and deleting file information (metadata).
type FileMetadataRepository interface {
// Create saves a single File metadata record to the storage.
Create(file *File) error
// CreateMany saves multiple File metadata records to the storage.
CreateMany(files []*File) error
// Get retrieves a single File metadata record (regardless of its state) by its unique identifier (ID) .
Get(id gocql.UUID) (*File, error)
// GetByIDs retrieves multiple File metadata records by their unique identifiers (IDs).
GetByIDs(ids []gocql.UUID) ([]*File, error)
// GetByCollection retrieves all File metadata records associated with a specific collection ID.
GetByCollection(collectionID gocql.UUID) ([]*File, error)
// Update modifies an existing File metadata record in the storage.
Update(file *File) error
// SoftDelete removes a single File metadata record by its unique identifier (ID) by setting its state to deleted.
SoftDelete(id gocql.UUID) error
// HardDelete permanently removes a file metadata record
HardDelete(id gocql.UUID) error
// SoftDeleteMany removes multiple File metadata records by their unique identifiers (IDs) by setting its state to deleted.
SoftDeleteMany(ids []gocql.UUID) error
// HardDeleteMany permanently removes multiple file metadata records
HardDeleteMany(ids []gocql.UUID) error
// CheckIfExistsByID verifies if a File metadata record with the given ID exists in the storage.
CheckIfExistsByID(id gocql.UUID) (bool, error)
// CheckIfUserHasAccess determines if a specific user (userID) has access permissions for a given file (fileID).
CheckIfUserHasAccess(fileID gocql.UUID, userID gocql.UUID) (bool, error)
GetByCreatedByUserID(createdByUserID gocql.UUID) ([]*File, error)
GetByOwnerID(ownerID gocql.UUID) ([]*File, error)
// State management operations
Archive(id gocql.UUID) error
Restore(id gocql.UUID) error
RestoreMany(ids []gocql.UUID) error
// ListSyncData retrieves file sync data with pagination for the specified user and accessible collections
ListSyncData(ctx context.Context, userID gocql.UUID, cursor *FileSyncCursor, limit int64, accessibleCollectionIDs []gocql.UUID) (*FileSyncResponse, error)
// ListRecentFiles retrieves recent files with pagination for the specified user and accessible collections
ListRecentFiles(ctx context.Context, userID gocql.UUID, cursor *RecentFilesCursor, limit int64, accessibleCollectionIDs []gocql.UUID) (*RecentFilesResponse, error)
// CountFilesByUser counts all active files accessible to the user
CountFilesByUser(ctx context.Context, userID gocql.UUID, accessibleCollectionIDs []gocql.UUID) (int, error)
// CountFilesByCollection counts active files in a specific collection
CountFilesByCollection(ctx context.Context, collectionID gocql.UUID) (int, error)
// Storage size calculation methods
GetTotalStorageSizeByOwner(ctx context.Context, ownerID gocql.UUID) (int64, error)
GetTotalStorageSizeByUser(ctx context.Context, userID gocql.UUID, accessibleCollectionIDs []gocql.UUID) (int64, error)
GetTotalStorageSizeByCollection(ctx context.Context, collectionID gocql.UUID) (int64, error)
// IP Anonymization for GDPR compliance
AnonymizeOldIPs(ctx context.Context, cutoffDate time.Time) (int, error)
AnonymizeFileIPsByOwner(ctx context.Context, ownerID gocql.UUID) (int, error) // For GDPR right-to-be-forgotten
// Tag-related operations
// ListByTagID retrieves all files that have the specified tag assigned
// Used for tag update propagation (updating embedded tag data across all files)
ListByTagID(ctx context.Context, tagID gocql.UUID) ([]*File, error)
}
// FileObjectStorageRepository defines the interface for interacting with the actual encrypted file data storage.
// It handles operations related to storing, retrieving, deleting, and generating access URLs for encrypted data.
type FileObjectStorageRepository interface {
// StoreEncryptedData saves encrypted file data to the storage system. It takes the owner's ID,
// the file's ID (metadata ID), and the encrypted byte slice. It returns the storage path
// where the data was saved, or an error.
StoreEncryptedData(ownerID string, fileID string, encryptedData []byte) (string, error)
// GetEncryptedData retrieves encrypted file data from the storage system using its storage path.
// It returns the encrypted data as a byte slice, or an error.
GetEncryptedData(storagePath string) ([]byte, error)
// DeleteEncryptedData removes encrypted file data from the storage system using its storage path.
DeleteEncryptedData(storagePath string) error
// GeneratePresignedDownloadURL creates a temporary, time-limited URL that allows direct download
// of the file data located at the given storage path, with proper content disposition headers.
GeneratePresignedDownloadURL(storagePath string, duration time.Duration) (string, error)
// GeneratePresignedUploadURL creates a temporary, time-limited URL that allows clients to upload
// encrypted file data directly to the storage system at the specified storage path.
GeneratePresignedUploadURL(storagePath string, duration time.Duration) (string, error)
// VerifyObjectExists checks if an object exists at the given storage path.
VerifyObjectExists(storagePath string) (bool, error)
// GetObjectSize returns the size in bytes of the object at the given storage path.
GetObjectSize(storagePath string) (int64, error)
}

View file

@ -0,0 +1,136 @@
// monorepo/cloud/backend/internal/maplefile/domain/file/model.go
package file
import (
"time"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
)
// File represents an encrypted file entity stored in the backend database (MongoDB).
// This entity holds metadata and pointers to the actual file content and thumbnail,
// which are stored separately in S3. All sensitive file metadata and the file itself
// are encrypted client-side before being uploaded. The backend stores only encrypted
// data and necessary non-sensitive identifiers or sizes for management.
type File struct {
// Identifiers
// Unique identifier for this specific file entity.
ID gocql.UUID `bson:"_id" json:"id"`
// Identifier of the collection this file belongs to. Used for grouping and key management.
CollectionID gocql.UUID `bson:"collection_id" json:"collection_id"`
// Identifier of the user who owns this file.
OwnerID gocql.UUID `bson:"owner_id" json:"owner_id"`
// Encryption and Content Details
// Client-side encrypted JSON blob containing file-specific metadata like the original file name,
// MIME type, size of the *unencrypted* data, etc. Encrypted by the client using the file key.
EncryptedMetadata string `bson:"encrypted_metadata" json:"encrypted_metadata"`
// The file-specific data encryption key (DEK) used to encrypt the file content and metadata.
// This key is encrypted by the client using the collection's key (a KEK). The backend
// stores this encrypted key; only a user with access to the KEK can decrypt it.
EncryptedFileKey crypto.EncryptedFileKey `bson:"encrypted_file_key" json:"encrypted_file_key"`
// Version identifier for the encryption scheme or client application version used to
// encrypt this file. Useful for migration or compatibility checks.
EncryptionVersion string `bson:"encryption_version" json:"encryption_version"`
// Cryptographic hash of the *encrypted* file content stored in S3. Used for integrity
// verification upon download *before* decryption.
EncryptedHash string `bson:"encrypted_hash" json:"encrypted_hash"`
// File Storage Object Details
// The unique key or path within the S3 bucket where the main encrypted file content is stored.
// This is an internal backend detail and is not exposed to the client API.
EncryptedFileObjectKey string `bson:"encrypted_file_object_key" json:"-"`
// The size of the *encrypted* file content stored in S3, in bytes. This size is not sensitive
// and is used by the backend for storage accounting, billing, and transfer management.
EncryptedFileSizeInBytes int64 `bson:"encrypted_file_size_in_bytes" json:"encrypted_file_size_in_bytes"`
// Thumbnail Storage Object Details (Optional)
// The unique key or path within the S3 bucket where the encrypted thumbnail image (if generated
// and uploaded) is stored. Internal backend detail, not exposed to the client API.
EncryptedThumbnailObjectKey string `bson:"encrypted_thumbnail_object_key" json:"-"`
// The size of the *encrypted* thumbnail image stored in S3, in bytes. Used for accounting.
// Value will be 0 if no thumbnail exists.
EncryptedThumbnailSizeInBytes int64 `bson:"encrypted_thumbnail_size_in_bytes" json:"encrypted_thumbnail_size_in_bytes"`
// DEPRECATED: Replaced by Tags field below
// TagIDs []gocql.UUID `bson:"tag_ids,omitempty" json:"tag_ids,omitempty"`
// Tags stores full embedded tag data (eliminates frontend API lookups)
// Stored as JSON text in database, marshaled/unmarshaled automatically
Tags []tag.EmbeddedTag `bson:"tags,omitempty" json:"tags,omitempty"`
// Timestamps and conflict resolution
// Timestamp when this file entity was created/uploaded.
CreatedAt time.Time `bson:"created_at" json:"created_at"`
// CreatedByUserID is the ID of the user who created this file.
CreatedByUserID gocql.UUID `bson:"created_by_user_id" json:"created_by_user_id"`
// Timestamp when this file entity's metadata or content was last modified.
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
// ModifiedByUserID is the ID of the user whom has last modified this file.
ModifiedByUserID gocql.UUID `bson:"modified_by_user_id" json:"modified_by_user_id"`
// The current version of the file.
Version uint64 `bson:"version" json:"version"` // Every mutation (create, update, delete) is a versioned operation, keep track of the version number with this variable
// State management.
State string `bson:"state" json:"state"` // pending, active, deleted, archived
TombstoneVersion uint64 `bson:"tombstone_version" json:"tombstone_version"` // The `version` number that this collection was deleted at.
TombstoneExpiry time.Time `bson:"tombstone_expiry" json:"tombstone_expiry"`
}
// FileSyncCursor represents cursor-based pagination for sync operations
type FileSyncCursor struct {
LastModified time.Time `json:"last_modified" bson:"last_modified"`
LastID gocql.UUID `json:"last_id" bson:"last_id"`
}
// FileSyncItem represents minimal file data for sync operations
type FileSyncItem struct {
ID gocql.UUID `json:"id" bson:"_id"`
CollectionID gocql.UUID `json:"collection_id" bson:"collection_id"`
Version uint64 `json:"version" bson:"version"`
ModifiedAt time.Time `json:"modified_at" bson:"modified_at"`
State string `json:"state" bson:"state"`
TombstoneVersion uint64 `bson:"tombstone_version" json:"tombstone_version"`
TombstoneExpiry time.Time `bson:"tombstone_expiry" json:"tombstone_expiry"`
EncryptedFileSizeInBytes int64 `bson:"encrypted_file_size_in_bytes" json:"encrypted_file_size_in_bytes"`
}
// FileSyncResponse represents the response for file sync data
type FileSyncResponse struct {
Files []FileSyncItem `json:"files"`
NextCursor *FileSyncCursor `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more"`
}
// RecentFilesCursor represents cursor-based pagination for recent files
type RecentFilesCursor struct {
LastModified time.Time `json:"last_modified" bson:"last_modified"`
LastID gocql.UUID `json:"last_id" bson:"last_id"`
}
// RecentFilesItem represents a file item for recent files listing
type RecentFilesItem struct {
ID gocql.UUID `json:"id" bson:"_id"`
CollectionID gocql.UUID `json:"collection_id" bson:"collection_id"`
OwnerID gocql.UUID `json:"owner_id" bson:"owner_id"`
EncryptedMetadata string `json:"encrypted_metadata" bson:"encrypted_metadata"`
EncryptedFileKey string `json:"encrypted_file_key" bson:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version" bson:"encryption_version"`
EncryptedHash string `json:"encrypted_hash" bson:"encrypted_hash"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes" bson:"encrypted_file_size_in_bytes"`
EncryptedThumbnailSizeInBytes int64 `json:"encrypted_thumbnail_size_in_bytes" bson:"encrypted_thumbnail_size_in_bytes"`
Tags []tag.EmbeddedTag `json:"tags,omitempty" bson:"tags,omitempty"`
CreatedAt time.Time `json:"created_at" bson:"created_at"`
ModifiedAt time.Time `json:"modified_at" bson:"modified_at"`
Version uint64 `json:"version" bson:"version"`
State string `json:"state" bson:"state"`
}
// RecentFilesResponse represents the response for recent files listing
type RecentFilesResponse struct {
Files []RecentFilesItem `json:"files"`
NextCursor *RecentFilesCursor `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more"`
}

View file

@ -0,0 +1,45 @@
// monorepo/cloud/backend/internal/maplefile/domain/file/state_validator.go
package file
import "errors"
// StateTransition validates file state transitions
type StateTransition struct {
From string
To string
}
// IsValidStateTransition checks if a file state transition is allowed
func IsValidStateTransition(from, to string) error {
validTransitions := map[StateTransition]bool{
// From pending
{FileStatePending, FileStateActive}: true,
{FileStatePending, FileStateDeleted}: true,
{FileStatePending, FileStateArchived}: false,
// From active
{FileStateActive, FileStatePending}: false,
{FileStateActive, FileStateDeleted}: true,
{FileStateActive, FileStateArchived}: true,
// From deleted (cannot be restored nor archived)
{FileStateDeleted, FileStatePending}: false,
{FileStateDeleted, FileStateActive}: false,
{FileStateDeleted, FileStateArchived}: false,
// From archived (can only be restored to active)
{FileStateArchived, FileStateActive}: true,
// Same state transitions (no-op)
{FileStatePending, FileStatePending}: true,
{FileStateActive, FileStateActive}: true,
{FileStateDeleted, FileStateDeleted}: true,
{FileStateArchived, FileStateArchived}: true,
}
if !validTransitions[StateTransition{from, to}] {
return errors.New("invalid state transition from " + from + " to " + to)
}
return nil
}

View file

@ -0,0 +1,7 @@
// Package inviteemail provides domain types and constants for invitation emails
// sent to non-registered users when someone wants to share a collection with them.
package inviteemail
// DefaultMaxInviteEmailsPerDay is the fallback limit if the environment variable is not set.
// This conservative limit protects email domain reputation.
const DefaultMaxInviteEmailsPerDay = 3

View file

@ -0,0 +1,53 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/storagedailyusage/interface.go
package storagedailyusage
import (
"context"
"time"
"github.com/gocql/gocql"
)
// StorageDailyUsageRepository defines the interface for daily storage usage aggregates
type StorageDailyUsageRepository interface {
Create(ctx context.Context, usage *StorageDailyUsage) error
CreateMany(ctx context.Context, usages []*StorageDailyUsage) error
GetByUserAndDay(ctx context.Context, userID gocql.UUID, usageDay time.Time) (*StorageDailyUsage, error)
GetByUserDateRange(ctx context.Context, userID gocql.UUID, startDay, endDay time.Time) ([]*StorageDailyUsage, error)
UpdateOrCreate(ctx context.Context, usage *StorageDailyUsage) error
IncrementUsage(ctx context.Context, userID gocql.UUID, usageDay time.Time, totalBytes, addBytes, removeBytes int64) error
DeleteByUserAndDay(ctx context.Context, userID gocql.UUID, usageDay time.Time) error
DeleteByUserID(ctx context.Context, userID gocql.UUID) error
GetLast7DaysTrend(ctx context.Context, userID gocql.UUID) (*StorageUsageTrend, error)
GetMonthlyTrend(ctx context.Context, userID gocql.UUID, year int, month time.Month) (*StorageUsageTrend, error)
GetYearlyTrend(ctx context.Context, userID gocql.UUID, year int) (*StorageUsageTrend, error)
GetCurrentMonthUsage(ctx context.Context, userID gocql.UUID) (*StorageUsageSummary, error)
GetCurrentYearUsage(ctx context.Context, userID gocql.UUID) (*StorageUsageSummary, error)
}
// StorageUsageTrend represents usage trend over a period
type StorageUsageTrend struct {
UserID gocql.UUID `json:"user_id"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
DailyUsages []*StorageDailyUsage `json:"daily_usages"`
TotalAdded int64 `json:"total_added"`
TotalRemoved int64 `json:"total_removed"`
NetChange int64 `json:"net_change"`
AverageDailyAdd int64 `json:"average_daily_add"`
PeakUsageDay *time.Time `json:"peak_usage_day,omitempty"`
PeakUsageBytes int64 `json:"peak_usage_bytes"`
}
// StorageUsageSummary represents a summary of storage usage
type StorageUsageSummary struct {
UserID gocql.UUID `json:"user_id"`
Period string `json:"period"` // "month" or "year"
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
CurrentUsage int64 `json:"current_usage_bytes"`
TotalAdded int64 `json:"total_added_bytes"`
TotalRemoved int64 `json:"total_removed_bytes"`
NetChange int64 `json:"net_change_bytes"`
DaysWithData int `json:"days_with_data"`
}

View file

@ -0,0 +1,26 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/storagedailyusage/model.go
package storagedailyusage
import (
"time"
"github.com/gocql/gocql"
)
type StorageDailyUsage struct {
UserID gocql.UUID `json:"user_id"` // Partition key
UsageDay time.Time `json:"usage_day"` // Clustering key (date only)
TotalBytes int64 `json:"total_bytes"`
TotalAddBytes int64 `json:"total_add_bytes"`
TotalRemoveBytes int64 `json:"total_remove_bytes"`
}
//
// Use gocql.UUID from the github.com/gocql/gocql driver.
//
// For consistency, always store and retrieve DATE fields (like event_day and usage_day) as time.Time, but truncate to date only before inserting:
//
// ```go
// usageDay := time.Now().Truncate(24 * time.Hour)
// ```
//

View file

@ -0,0 +1,23 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/storageusageevent/interface.go
package storageusageevent
import (
"context"
"time"
"github.com/gocql/gocql"
)
// StorageUsageEventRepository defines the interface for storage usage events
type StorageUsageEventRepository interface {
Create(ctx context.Context, event *StorageUsageEvent) error
CreateMany(ctx context.Context, events []*StorageUsageEvent) error
GetByUserAndDay(ctx context.Context, userID gocql.UUID, eventDay time.Time) ([]*StorageUsageEvent, error)
GetByUserDateRange(ctx context.Context, userID gocql.UUID, startDay, endDay time.Time) ([]*StorageUsageEvent, error)
DeleteByUserAndDay(ctx context.Context, userID gocql.UUID, eventDay time.Time) error
DeleteByUserID(ctx context.Context, userID gocql.UUID) error
GetLast7DaysEvents(ctx context.Context, userID gocql.UUID) ([]*StorageUsageEvent, error)
GetLastNDaysEvents(ctx context.Context, userID gocql.UUID, days int) ([]*StorageUsageEvent, error)
GetMonthlyEvents(ctx context.Context, userID gocql.UUID, year int, month time.Month) ([]*StorageUsageEvent, error)
GetYearlyEvents(ctx context.Context, userID gocql.UUID, year int) ([]*StorageUsageEvent, error)
}

View file

@ -0,0 +1,16 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/domain/storageusageevent/model.go
package storageusageevent
import (
"time"
"github.com/gocql/gocql"
)
type StorageUsageEvent struct {
UserID gocql.UUID `json:"user_id"` // Partition key
EventDay time.Time `json:"event_day"` // Partition key (date only)
EventTime time.Time `json:"event_time"` // Clustering key
FileSize int64 `json:"file_size"` // Bytes
Operation string `json:"operation"` // "add" or "remove"
}

View file

@ -0,0 +1,23 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag/constants.go
package tag
const (
// Tag States
TagStateActive = "active"
TagStateDeleted = "deleted"
TagStateArchived = "archived"
// Entity Types
EntityTypeCollection = "collection"
EntityTypeFile = "file"
// Default Tag Names
DefaultTagImportant = "Important"
DefaultTagWork = "Work"
DefaultTagPersonal = "Personal"
// Default Tag Colors (hex format)
DefaultColorImportant = "#EF4444" // Red
DefaultColorWork = "#3B82F6" // Blue
DefaultColorPersonal = "#10B981" // Green
)

View file

@ -0,0 +1,26 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag/interface.go
package tag
import (
"context"
"github.com/gocql/gocql"
)
// Repository defines the interface for tag data access operations
type Repository interface {
// Tag CRUD operations
Create(ctx context.Context, tag *Tag) error
GetByID(ctx context.Context, id gocql.UUID) (*Tag, error)
ListByUser(ctx context.Context, userID gocql.UUID) ([]*Tag, error)
Update(ctx context.Context, tag *Tag) error
DeleteByID(ctx context.Context, userID, id gocql.UUID) error
// Tag Assignment operations
AssignTag(ctx context.Context, assignment *TagAssignment) error
UnassignTag(ctx context.Context, tagID, entityID gocql.UUID, entityType string) error
GetTagsForEntity(ctx context.Context, entityID gocql.UUID, entityType string) ([]*Tag, error)
GetEntitiesWithTag(ctx context.Context, tagID gocql.UUID, entityType string) ([]gocql.UUID, error)
GetAssignmentsByTag(ctx context.Context, tagID gocql.UUID) ([]*TagAssignment, error)
GetAssignmentsByEntity(ctx context.Context, entityID gocql.UUID, entityType string) ([]*TagAssignment, error)
}

View file

@ -0,0 +1,89 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag/model.go
package tag
import (
"time"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
)
// Tag represents a user-defined label with color that can be assigned to collections or files
// All sensitive data (name, color) is encrypted end-to-end using the tag's unique encryption key
type Tag struct {
// Identifiers
ID gocql.UUID `bson:"_id" json:"id"`
UserID gocql.UUID `bson:"user_id" json:"user_id"` // Owner of the tag
// Encrypted Tag Details
// EncryptedName is the tag label (e.g., "Important", "Work") encrypted with the tag key
EncryptedName string `bson:"encrypted_name" json:"encrypted_name"`
// EncryptedColor is the hex color code (e.g., "#FF5733") encrypted with the tag key
EncryptedColor string `bson:"encrypted_color" json:"encrypted_color"`
// EncryptedTagKey is the unique symmetric key used to encrypt this tag's data (name and color)
// This key is encrypted with the user's master key for storage and transmission
EncryptedTagKey *crypto.EncryptedTagKey `bson:"encrypted_tag_key" json:"encrypted_tag_key"`
// Timestamps and versioning
CreatedAt time.Time `bson:"created_at" json:"created_at"`
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
Version uint64 `bson:"version" json:"version"` // Versioning for sync
// State management
State string `bson:"state" json:"state"` // active, deleted, archived
}
// TagAssignment represents the assignment of a tag to a collection or file
type TagAssignment struct {
// Identifiers
ID gocql.UUID `bson:"_id" json:"id"`
UserID gocql.UUID `bson:"user_id" json:"user_id"` // User who assigned the tag
TagID gocql.UUID `bson:"tag_id" json:"tag_id"` // Reference to the tag
EntityID gocql.UUID `bson:"entity_id" json:"entity_id"` // Collection or File ID
// EntityType indicates whether this is a "collection" or "file"
EntityType string `bson:"entity_type" json:"entity_type"`
// Timestamps
CreatedAt time.Time `bson:"created_at" json:"created_at"`
}
// TagListFilter represents filter criteria for listing tags
type TagListFilter struct {
UserID gocql.UUID
State string // Optional: filter by state
}
// TagAssignmentFilter represents filter criteria for tag assignments
type TagAssignmentFilter struct {
TagID *gocql.UUID
EntityID *gocql.UUID
EntityType *string
UserID *gocql.UUID
}
// EmbeddedTag represents tag data that is embedded in collections and files
// This eliminates the need for frontend API lookups to get tag colors
type EmbeddedTag struct {
// Core identifiers and data
ID gocql.UUID `bson:"id" json:"id"`
EncryptedName string `bson:"encrypted_name" json:"encrypted_name"`
EncryptedColor string `bson:"encrypted_color" json:"encrypted_color"`
EncryptedTagKey *crypto.EncryptedTagKey `bson:"encrypted_tag_key" json:"encrypted_tag_key"`
// For cache invalidation - detect stale embedded data
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
}
// ToEmbeddedTag converts a Tag to an EmbeddedTag for embedding in collections/files
func (t *Tag) ToEmbeddedTag() *EmbeddedTag {
if t == nil {
return nil
}
return &EmbeddedTag{
ID: t.ID,
EncryptedName: t.EncryptedName,
EncryptedColor: t.EncryptedColor,
EncryptedTagKey: t.EncryptedTagKey,
ModifiedAt: t.ModifiedAt,
}
}

View file

@ -0,0 +1,23 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user/interface.go
package user
import (
"context"
"time"
"github.com/gocql/gocql"
)
// Repository Interface for user management.
type Repository interface {
Create(ctx context.Context, m *User) error
GetByID(ctx context.Context, id gocql.UUID) (*User, error)
GetByEmail(ctx context.Context, email string) (*User, error)
GetByVerificationCode(ctx context.Context, verificationCode string) (*User, error)
DeleteByID(ctx context.Context, id gocql.UUID) error
DeleteByEmail(ctx context.Context, email string) error
CheckIfExistsByEmail(ctx context.Context, email string) (bool, error)
UpdateByID(ctx context.Context, m *User) error
AnonymizeOldIPs(ctx context.Context, cutoffDate time.Time) (int, error)
AnonymizeUserIPs(ctx context.Context, userID gocql.UUID) error // For GDPR right-to-be-forgotten
}

View file

@ -0,0 +1,153 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user/model.go
package user
import (
"time"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
"github.com/gocql/gocql"
)
const (
UserStatusActive = 1 // User is active and can log in.
UserStatusLocked = 50 // User account is locked, typically due to too many failed login attempts.
UserStatusArchived = 100 // User account is archived and cannot log in.
UserRoleRoot = 1 // Root user, has all permissions
UserRoleCompany = 2 // Company user, has permissions for company-related operations
UserRoleIndividual = 3 // Individual user, has permissions for individual-related operations
UserProfileVerificationStatusUnverified = 1 // The user's profile has not yet been submitted for verification.
UserProfileVerificationStatusSubmittedForReview = 2 // The user's profile has been submitted and is awaiting review.
UserProfileVerificationStatusApproved = 3 // The user's profile has been approved.
UserProfileVerificationStatusRejected = 4 // The user's profile has been rejected.
// StorePendingStatus indicates this store needs to be reviewed by CPS and approved / rejected.
StorePendingStatus = 1 // Store is pending review.
StoreActiveStatus = 2 // Store is active and can be used.
StoreRejectedStatus = 3 // Store has been rejected.
StoreErrorStatus = 4 // Store has encountered an error.
StoreArchivedStatus = 5 // Store has been archived.
EstimatedSubmissionsPerMonth1To10 = 1 // Estimated submissions per month: 1 to 10
EstimatedSubmissionsPerMonth10To25 = 2 // Estimated submissions per month: 10 to 25
EstimatedSubmissionsPerMonth25To50 = 3 // Estimated submissions per month: 25 to 50
EstimatedSubmissionsPerMonth50To10 = 4 // Estimated submissions per month: 50 to 100
EstimatedSubmissionsPerMonth100Plus = 5 // Estimated submissions per month: 100+
HasOtherGradingServiceYes = 1 // Has other grading service: Yes
HasOtherGradingServiceNo = 2 // Has other grading service: No
RequestWelcomePackageYes = 1 // Request welcome package: Yes
RequestWelcomePackageNo = 2 // Request welcome package: No
SpecialCollection040001 = 1
UserCodeTypeEmailVerification = "email_verification"
UserCodeTypePasswordReset = "password_reset"
)
type UserProfileData struct {
Phone string `bson:"phone" json:"phone,omitempty"`
Country string `bson:"country" json:"country,omitempty"`
Region string `bson:"region" json:"region,omitempty"`
City string `bson:"city" json:"city,omitempty"`
PostalCode string `bson:"postal_code" json:"postal_code,omitempty"`
AddressLine1 string `bson:"address_line1" json:"address_line1,omitempty"`
AddressLine2 string `bson:"address_line2" json:"address_line2,omitempty"`
HasShippingAddress bool `bson:"has_shipping_address" json:"has_shipping_address,omitempty"`
ShippingName string `bson:"shipping_name" json:"shipping_name,omitempty"`
ShippingPhone string `bson:"shipping_phone" json:"shipping_phone,omitempty"`
ShippingCountry string `bson:"shipping_country" json:"shipping_country,omitempty"`
ShippingRegion string `bson:"shipping_region" json:"shipping_region,omitempty"`
ShippingCity string `bson:"shipping_city" json:"shipping_city,omitempty"`
ShippingPostalCode string `bson:"shipping_postal_code" json:"shipping_postal_code,omitempty"`
ShippingAddressLine1 string `bson:"shipping_address_line1" json:"shipping_address_line1,omitempty"`
ShippingAddressLine2 string `bson:"shipping_address_line2" json:"shipping_address_line2,omitempty"`
Timezone string `bson:"timezone" json:"timezone"`
AgreeTermsOfService bool `bson:"agree_terms_of_service" json:"agree_terms_of_service,omitempty"`
AgreePromotions bool `bson:"agree_promotions" json:"agree_promotions,omitempty"`
AgreeToTrackingAcrossThirdPartyAppsAndServices bool `bson:"agree_to_tracking_across_third_party_apps_and_services" json:"agree_to_tracking_across_third_party_apps_and_services,omitempty"`
// Email share notification preferences
ShareNotificationsEnabled *bool `bson:"share_notifications_enabled" json:"share_notifications_enabled,omitempty"`
}
type UserSecurityData struct {
WasEmailVerified bool `bson:"was_email_verified" json:"was_email_verified,omitempty"`
Code string `bson:"code,omitempty" json:"code,omitempty"`
CodeType string `bson:"code_type,omitempty" json:"code_type,omitempty"` // -- 'email_verification' or 'password_reset'
CodeExpiry time.Time `bson:"code_expiry,omitempty" json:"code_expiry"`
// --- E2EE Related ---
PasswordSalt []byte `json:"password_salt" bson:"password_salt"`
// KDFParams stores the key derivation function parameters used to derive the user's password hash.
KDFParams crypto.KDFParams `json:"kdf_params" bson:"kdf_params"`
EncryptedMasterKey crypto.EncryptedMasterKey `json:"encrypted_master_key" bson:"encrypted_master_key"`
PublicKey crypto.PublicKey `json:"public_key" bson:"public_key"`
EncryptedPrivateKey crypto.EncryptedPrivateKey `json:"encrypted_private_key" bson:"encrypted_private_key"`
EncryptedRecoveryKey crypto.EncryptedRecoveryKey `json:"encrypted_recovery_key" bson:"encrypted_recovery_key"`
MasterKeyEncryptedWithRecoveryKey crypto.MasterKeyEncryptedWithRecoveryKey `json:"master_key_encrypted_with_recovery_key" bson:"master_key_encrypted_with_recovery_key"`
EncryptedChallenge []byte `json:"encrypted_challenge,omitempty" bson:"encrypted_challenge,omitempty"`
VerificationID string `json:"verification_id" bson:"verification_id"`
// Track KDF upgrade status
LastPasswordChange time.Time `json:"last_password_change" bson:"last_password_change"`
KDFParamsNeedUpgrade bool `json:"kdf_params_need_upgrade" bson:"kdf_params_need_upgrade"`
// Key rotation tracking fields
CurrentKeyVersion int `json:"current_key_version" bson:"current_key_version"`
LastKeyRotation *time.Time `json:"last_key_rotation,omitempty" bson:"last_key_rotation,omitempty"`
KeyRotationPolicy *crypto.KeyRotationPolicy `json:"key_rotation_policy,omitempty" bson:"key_rotation_policy,omitempty"`
// OTPEnabled controls whether we force 2FA or not during login.
OTPEnabled bool `bson:"otp_enabled" json:"otp_enabled"`
// OTPVerified indicates user has successfully validated their opt token afer enabling 2FA thus turning it on.
OTPVerified bool `bson:"otp_verified" json:"otp_verified"`
// OTPValidated automatically gets set as `false` on successful login and then sets `true` once successfully validated by 2FA.
OTPValidated bool `bson:"otp_validated" json:"otp_validated"`
// OTPSecret the unique one-time password secret to be shared between our
// backend and 2FA authenticator sort of apps that support `TOPT`.
OTPSecret string `bson:"otp_secret" json:"-"`
// OTPAuthURL is the URL used to share.
OTPAuthURL string `bson:"otp_auth_url" json:"-"`
// OTPBackupCodeHash is the one-time use backup code which resets the 2FA settings and allow the user to setup 2FA from scratch for the user.
OTPBackupCodeHash string `bson:"otp_backup_code_hash" json:"-"`
// OTPBackupCodeHashAlgorithm tracks the hashing algorithm used.
OTPBackupCodeHashAlgorithm string `bson:"otp_backup_code_hash_algorithm" json:"-"`
}
type UserMetadata struct {
CreatedFromIPAddress string `bson:"created_from_ip_address" json:"created_from_ip_address"`
CreatedByUserID gocql.UUID `bson:"created_by_user_id" json:"created_by_user_id"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
CreatedByName string `bson:"created_by_name" json:"created_by_name"`
ModifiedFromIPAddress string `bson:"modified_from_ip_address" json:"modified_from_ip_address"`
ModifiedByUserID gocql.UUID `bson:"modified_by_user_id" json:"modified_by_user_id"`
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
ModifiedByName string `bson:"modified_by_name" json:"modified_by_name"`
LastLoginAt time.Time `json:"last_login_at" bson:"last_login_at"`
}
type User struct {
ID gocql.UUID `bson:"_id" json:"id"`
Email string `bson:"email" json:"email"`
FirstName string `bson:"first_name" json:"first_name"`
LastName string `bson:"last_name" json:"last_name"`
Name string `bson:"name" json:"name"`
LexicalName string `bson:"lexical_name" json:"lexical_name"`
Role int8 `bson:"role" json:"role"`
Status int8 `bson:"status" json:"status"`
Timezone string `bson:"timezone" json:"timezone"`
ProfileData *UserProfileData `bson:"profile_data" json:"profile_data"`
SecurityData *UserSecurityData `bson:"security_data" json:"security_data"`
Metadata *UserMetadata `bson:"metadata" json:"metadata"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
}

View file

@ -0,0 +1,122 @@
# MapleFile HTTP Server
Standalone HTTP server for MapleFile backend - completely independent with no Manifold orchestration.
## Architecture
- **Standard Library**: Uses `net/http` with Go 1.22+ routing patterns
- **No Orchestration**: Direct route registration (no `AsRoute()` wrappers)
- **Middleware Stack**: Applied globally with per-route authentication
- **Lifecycle Management**: Integrated with Uber FX for graceful shutdown
## Server Configuration
Configured via environment variables in `.env`:
```env
SERVER_HOST=0.0.0.0
SERVER_PORT=8000
SERVER_READ_TIMEOUT=30s
SERVER_WRITE_TIMEOUT=30s
SERVER_IDLE_TIMEOUT=60s
SERVER_SHUTDOWN_TIMEOUT=10s
```
## Middleware Stack
Applied in this order (outermost to innermost):
1. **Recovery** - Catches panics and returns 500
2. **Logging** - Logs all requests with duration
3. **CORS** - Handles cross-origin requests
4. **Authentication** (per-route) - JWT validation for protected routes
## Route Structure
### Public Routes
- `GET /health` - Health check
- `GET /version` - Version info
- `POST /api/v1/auth/register` - Registration
- `POST /api/v1/auth/login` - Login
### Protected Routes
All `/api/v1/*` routes (except auth) require JWT authentication via:
```
Authorization: Bearer <jwt_token>
```
Key protected endpoints include:
- `GET/PUT/DELETE /api/v1/me` - User profile management
- `POST/GET/PUT/DELETE /api/v1/collections/*` - Collection CRUD
- `POST/GET/PUT/DELETE /api/v1/file/*` - File operations
- `POST /api/v1/invites/send-email` - Send invitation to non-registered user
See `routes.go` for complete endpoint list.
## Handler Registration
Routes are registered in `server.go` -> `registerRoutes()`:
```go
// Public route
s.mux.HandleFunc("GET /health", s.healthCheckHandler)
// Protected route
s.mux.HandleFunc("POST /api/v1/collections",
s.middleware.Attach(s.handlers.CreateCollection))
```
## Starting the Server
The server is started automatically by Uber FX:
```go
fx.New(
fx.Provide(http.NewServer), // Creates and starts server
// ... other providers
)
```
Lifecycle hooks handle:
- **OnStart**: Starts HTTP listener in goroutine
- **OnStop**: Graceful shutdown with timeout
## Response Format
All JSON responses follow this structure:
**Success:**
```json
{
"data": { ... },
"message": "Success"
}
```
**Error:**
```json
{
"error": "Error message",
"code": "ERROR_CODE"
}
```
## Health Checks
```bash
# Basic health check
curl http://localhost:8000/health
# Version check
curl http://localhost:8000/version
```
## Development
Build and run:
```bash
task build
./maplefile-backend daemon
```
The server will start on `http://localhost:8000` by default.

View file

@ -0,0 +1,53 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/complete_login.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CompleteLoginHandler struct {
logger *zap.Logger
service svc_auth.CompleteLoginService
}
func NewCompleteLoginHandler(
logger *zap.Logger,
service svc_auth.CompleteLoginService,
) *CompleteLoginHandler {
return &CompleteLoginHandler{
logger: logger.Named("CompleteLoginHandler"),
service: service,
}
}
func (h *CompleteLoginHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.CompleteLoginRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode complete login request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload. Expected JSON with 'email', 'challengeId', and 'decryptedData' fields.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Complete login failed", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,49 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/recovery_complete.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RecoveryCompleteHandler struct {
logger *zap.Logger
service svc_auth.RecoveryCompleteService
}
func NewRecoveryCompleteHandler(
logger *zap.Logger,
service svc_auth.RecoveryCompleteService,
) *RecoveryCompleteHandler {
return &RecoveryCompleteHandler{
logger: logger.Named("RecoveryCompleteHandler"),
service: service,
}
}
func (h *RecoveryCompleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.RecoveryCompleteRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode recovery complete request", zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("payload", "Invalid request payload"))
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Recovery complete failed", zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,49 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/recovery_initiate.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RecoveryInitiateHandler struct {
logger *zap.Logger
service svc_auth.RecoveryInitiateService
}
func NewRecoveryInitiateHandler(
logger *zap.Logger,
service svc_auth.RecoveryInitiateService,
) *RecoveryInitiateHandler {
return &RecoveryInitiateHandler{
logger: logger.Named("RecoveryInitiateHandler"),
service: service,
}
}
func (h *RecoveryInitiateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.RecoveryInitiateRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode recovery initiate request", zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("payload", "Invalid request payload"))
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Recovery initiate failed", zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,49 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/recovery_verify.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RecoveryVerifyHandler struct {
logger *zap.Logger
service svc_auth.RecoveryVerifyService
}
func NewRecoveryVerifyHandler(
logger *zap.Logger,
service svc_auth.RecoveryVerifyService,
) *RecoveryVerifyHandler {
return &RecoveryVerifyHandler{
logger: logger.Named("RecoveryVerifyHandler"),
service: service,
}
}
func (h *RecoveryVerifyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.RecoveryVerifyRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode recovery verify request", zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("payload", "Invalid request payload"))
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Recovery verify failed", zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,49 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/refresh_token.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RefreshTokenHandler struct {
logger *zap.Logger
service svc_auth.RefreshTokenService
}
func NewRefreshTokenHandler(
logger *zap.Logger,
service svc_auth.RefreshTokenService,
) *RefreshTokenHandler {
return &RefreshTokenHandler{
logger: logger.Named("RefreshTokenHandler"),
service: service,
}
}
func (h *RefreshTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.RefreshTokenRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode refresh token request", zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("payload", "Invalid request payload"))
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Refresh token failed", zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,77 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/register.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
// RegisterHandler handles user registration
type RegisterHandler struct {
logger *zap.Logger
service svc_auth.RegisterService
}
// NewRegisterHandler creates a new registration handler
func NewRegisterHandler(
logger *zap.Logger,
service svc_auth.RegisterService,
) *RegisterHandler {
return &RegisterHandler{
logger: logger.Named("RegisterHandler"),
service: service,
}
}
// ServeHTTP handles the HTTP request
func (h *RegisterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Extract request ID from existing middleware
requestID := httperror.ExtractRequestID(r)
// Decode request
var req svc_auth.RegisterRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode register request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload: " + err.Error())
problem.WithInstance(r.URL.Path).WithTraceID(requestID)
httperror.RespondWithProblem(w, problem)
return
}
// Call service - service handles validation and returns RFC 9457 errors
resp, err := h.service.Execute(ctx, &req)
if err != nil {
// Check if error is already a ProblemDetail
if problem, ok := err.(*httperror.ProblemDetail); ok {
h.logger.Warn("Registration failed with validation errors",
zap.String("email", validation.MaskEmail(req.Email)),
zap.Int("error_count", len(problem.Errors)))
problem.WithInstance(r.URL.Path).WithTraceID(requestID)
httperror.RespondWithProblem(w, problem)
return
}
// Unexpected error - wrap in internal server error
h.logger.Error("Registration failed with unexpected error",
zap.String("email", validation.MaskEmail(req.Email)),
zap.Error(err))
problem := httperror.NewInternalServerError("Registration failed: " + err.Error())
problem.WithInstance(r.URL.Path).WithTraceID(requestID)
httperror.RespondWithProblem(w, problem)
return
}
// Return success response
h.logger.Info("User registered successfully", zap.String("user_id", resp.UserID))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,53 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/request_ott.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RequestOTTHandler struct {
logger *zap.Logger
service svc_auth.RequestOTTService
}
func NewRequestOTTHandler(
logger *zap.Logger,
service svc_auth.RequestOTTService,
) *RequestOTTHandler {
return &RequestOTTHandler{
logger: logger.Named("RequestOTTHandler"),
service: service,
}
}
func (h *RequestOTTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.RequestOTTRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode request OTT request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload. Expected JSON with 'email' field.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Request OTT failed", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,59 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/resend_verification.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
// ResendVerificationHandler handles resending verification emails
type ResendVerificationHandler struct {
logger *zap.Logger
service svc_auth.ResendVerificationService
}
// NewResendVerificationHandler creates a new resend verification handler
func NewResendVerificationHandler(
logger *zap.Logger,
service svc_auth.ResendVerificationService,
) *ResendVerificationHandler {
return &ResendVerificationHandler{
logger: logger.Named("ResendVerificationHandler"),
service: service,
}
}
// ServeHTTP handles the HTTP request
func (h *ResendVerificationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Decode request
var req svc_auth.ResendVerificationRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode resend verification request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload. Expected JSON with 'email' field.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
// Call service (service now handles validation and returns RFC 9457 errors)
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Resend verification failed", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
// Return success response
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,59 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/verify_email.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
// VerifyEmailHandler handles email verification
type VerifyEmailHandler struct {
logger *zap.Logger
service svc_auth.VerifyEmailService
}
// NewVerifyEmailHandler creates a new verify email handler
func NewVerifyEmailHandler(
logger *zap.Logger,
service svc_auth.VerifyEmailService,
) *VerifyEmailHandler {
return &VerifyEmailHandler{
logger: logger.Named("VerifyEmailHandler"),
service: service,
}
}
// ServeHTTP handles the HTTP request
func (h *VerifyEmailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Decode request
var req svc_auth.VerifyEmailRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode verify email request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload. Expected JSON with 'code' field.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
// Call service
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Email verification failed", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
// Return success response
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,53 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/auth/verify_ott.go
package auth
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
svc_auth "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type VerifyOTTHandler struct {
logger *zap.Logger
service svc_auth.VerifyOTTService
}
func NewVerifyOTTHandler(
logger *zap.Logger,
service svc_auth.VerifyOTTService,
) *VerifyOTTHandler {
return &VerifyOTTHandler{
logger: logger.Named("VerifyOTTHandler"),
service: service,
}
}
func (h *VerifyOTTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var req svc_auth.VerifyOTTRequestDTO
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
h.logger.Error("Failed to decode verify OTT request", zap.Error(err))
problem := httperror.NewBadRequestError("Invalid request payload. Expected JSON with 'email' and 'ott' fields.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
resp, err := h.service.Execute(ctx, &req)
if err != nil {
h.logger.Error("Verify OTT failed", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}

View file

@ -0,0 +1,97 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail/create.go
package blockedemail
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreateBlockedEmailHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_blockedemail.CreateBlockedEmailService
middleware middleware.Middleware
}
func NewCreateBlockedEmailHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.CreateBlockedEmailService,
middleware middleware.Middleware,
) *CreateBlockedEmailHTTPHandler {
logger = logger.Named("CreateBlockedEmailHTTPHandler")
return &CreateBlockedEmailHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*CreateBlockedEmailHTTPHandler) Pattern() string {
return "POST /api/v1/me/blocked-emails"
}
func (h *CreateBlockedEmailHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.middleware.Attach(h.Execute)(w, req)
}
func (h *CreateBlockedEmailHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
) (*svc_blockedemail.CreateBlockedEmailRequestDTO, error) {
var requestData svc_blockedemail.CreateBlockedEmailRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON)
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err))
// Log raw JSON at debug level only to avoid PII exposure in production logs
h.logger.Debug("raw request body for debugging",
zap.String("json", rawJSON.String()))
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
return &requestData, nil
}
func (h *CreateBlockedEmailHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
req, err := h.unmarshalRequest(ctx, r)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
w.WriteHeader(http.StatusCreated)
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,87 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail/delete.go
package blockedemail
import (
"encoding/json"
"net/http"
"net/url"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type DeleteBlockedEmailHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_blockedemail.DeleteBlockedEmailService
middleware middleware.Middleware
}
func NewDeleteBlockedEmailHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.DeleteBlockedEmailService,
middleware middleware.Middleware,
) *DeleteBlockedEmailHTTPHandler {
logger = logger.Named("DeleteBlockedEmailHTTPHandler")
return &DeleteBlockedEmailHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*DeleteBlockedEmailHTTPHandler) Pattern() string {
return "DELETE /api/v1/me/blocked-emails/{email}"
}
func (h *DeleteBlockedEmailHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.middleware.Attach(h.Execute)(w, req)
}
func (h *DeleteBlockedEmailHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract email from URL path
emailEncoded := r.PathValue("email")
if emailEncoded == "" {
httperror.RespondWithError(w, r, httperror.NewBadRequestError("Email is required"))
return
}
// URL decode the email using PathUnescape (not QueryUnescape)
// PathUnescape correctly handles %2B as + instead of treating + as space
email, err := url.PathUnescape(emailEncoded)
if err != nil {
h.logger.Error("failed to decode email",
zap.String("encoded_email", validation.MaskEmail(emailEncoded)),
zap.Any("error", err))
httperror.RespondWithError(w, r, httperror.NewBadRequestError("Invalid email format"))
return
}
h.logger.Debug("decoded email from path",
zap.String("encoded", validation.MaskEmail(emailEncoded)),
zap.String("decoded", validation.MaskEmail(email)))
resp, err := h.service.Execute(ctx, email)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,63 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail/list.go
package blockedemail
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListBlockedEmailsHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_blockedemail.ListBlockedEmailsService
middleware middleware.Middleware
}
func NewListBlockedEmailsHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.ListBlockedEmailsService,
middleware middleware.Middleware,
) *ListBlockedEmailsHTTPHandler {
logger = logger.Named("ListBlockedEmailsHTTPHandler")
return &ListBlockedEmailsHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ListBlockedEmailsHTTPHandler) Pattern() string {
return "GET /api/v1/me/blocked-emails"
}
func (h *ListBlockedEmailsHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ListBlockedEmailsHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
resp, err := h.service.Execute(ctx)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,37 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail/provider.go
package blockedemail
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail"
)
func ProvideCreateBlockedEmailHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.CreateBlockedEmailService,
middleware middleware.Middleware,
) *CreateBlockedEmailHTTPHandler {
return NewCreateBlockedEmailHTTPHandler(cfg, logger, service, middleware)
}
func ProvideListBlockedEmailsHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.ListBlockedEmailsService,
middleware middleware.Middleware,
) *ListBlockedEmailsHTTPHandler {
return NewListBlockedEmailsHTTPHandler(cfg, logger, service, middleware)
}
func ProvideDeleteBlockedEmailHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_blockedemail.DeleteBlockedEmailService,
middleware middleware.Middleware,
) *DeleteBlockedEmailHTTPHandler {
return NewDeleteBlockedEmailHTTPHandler(cfg, logger, service, middleware)
}

View file

@ -0,0 +1,96 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/archive.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ArchiveCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.ArchiveCollectionService
middleware middleware.Middleware
}
func NewArchiveCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.ArchiveCollectionService,
middleware middleware.Middleware,
) *ArchiveCollectionHTTPHandler {
logger = logger.Named("ArchiveCollectionHTTPHandler")
return &ArchiveCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ArchiveCollectionHTTPHandler) Pattern() string {
return "PUT /api/v1/collections/{id}/archive"
}
func (h *ArchiveCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ArchiveCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from the URL
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
// Create request DTO
dtoReq := &svc_collection.ArchiveCollectionRequestDTO{
ID: collectionID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,109 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/create.go
package collection
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreateCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.CreateCollectionService
middleware middleware.Middleware
}
func NewCreateCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.CreateCollectionService,
middleware middleware.Middleware,
) *CreateCollectionHTTPHandler {
logger = logger.Named("CreateCollectionHTTPHandler")
return &CreateCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*CreateCollectionHTTPHandler) Pattern() string {
return "POST /api/v1/collections"
}
func (h *CreateCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *CreateCollectionHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
) (*svc_collection.CreateCollectionRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_collection.CreateCollectionRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("Failed to decode create collection request",
zap.Error(err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewBadRequestError("Invalid request payload. Please check your collection data.")
}
return &requestData, nil
}
func (h *CreateCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := h.unmarshalRequest(ctx, r)
if err != nil {
h.logger.Error("Failed to unmarshal create collection request", zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
h.logger.Error("Failed to create collection", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
if resp == nil {
h.logger.Error("No collection returned from service")
problem := httperror.NewInternalServerError("Failed to create collection. Please try again.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("Failed to encode collection response", zap.Error(err))
// At this point headers are already sent, log the error but can't send RFC 9457 response
return
}
}

View file

@ -0,0 +1,97 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/find_by_parent.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type FindCollectionsByParentHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.FindCollectionsByParentService
middleware middleware.Middleware
}
func NewFindCollectionsByParentHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.FindCollectionsByParentService,
middleware middleware.Middleware,
) *FindCollectionsByParentHTTPHandler {
logger = logger.Named("FindCollectionsByParentHTTPHandler")
return &FindCollectionsByParentHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*FindCollectionsByParentHTTPHandler) Pattern() string {
return "GET /api/v1/collections/parent/{parent_id}"
}
func (h *FindCollectionsByParentHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *FindCollectionsByParentHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract parent ID from URL parameters
parentIDStr := r.PathValue("parent_id")
if parentIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("parent_id", "Parent ID is required"))
return
}
// Convert string ID to ObjectID
parentID, err := gocql.ParseUUID(parentIDStr)
if err != nil {
h.logger.Error("invalid parent ID format",
zap.String("parent_id", parentIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("parent_id", "Invalid parent ID format"))
return
}
// Create request DTO
req := &svc_collection.FindByParentRequestDTO{
ParentID: parentID,
}
// Call service
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,74 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/find_root_collections.go
package collection
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type FindRootCollectionsHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.FindRootCollectionsService
middleware middleware.Middleware
}
func NewFindRootCollectionsHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.FindRootCollectionsService,
middleware middleware.Middleware,
) *FindRootCollectionsHTTPHandler {
logger = logger.Named("FindRootCollectionsHTTPHandler")
return &FindRootCollectionsHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*FindRootCollectionsHTTPHandler) Pattern() string {
return "GET /api/v1/collections/root"
}
func (h *FindRootCollectionsHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *FindRootCollectionsHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
resp, err := h.service.Execute(ctx)
if err != nil {
h.logger.Error("Failed to find root collections", zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
if resp == nil {
h.logger.Error("No collections returned from service")
problem := httperror.NewInternalServerError("Failed to retrieve collections. Please try again.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("Failed to encode collections response", zap.Error(err))
// At this point headers are already sent, log the error but can't send RFC 9457 response
return
}
}

View file

@ -0,0 +1,91 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/get.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.GetCollectionService
middleware middleware.Middleware
}
func NewGetCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.GetCollectionService,
middleware middleware.Middleware,
) *GetCollectionHTTPHandler {
logger = logger.Named("GetCollectionHTTPHandler")
return &GetCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetCollectionHTTPHandler) Pattern() string {
return "GET /api/v1/collections/{id}"
}
func (h *GetCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from URL parameters
// Assuming Go 1.22+ where r.PathValue is available for patterns like "/items/{id}"
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
resp, err := h.service.Execute(ctx, collectionID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,124 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/get_filtered.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"strconv"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetFilteredCollectionsHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.GetFilteredCollectionsService
middleware middleware.Middleware
}
func NewGetFilteredCollectionsHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.GetFilteredCollectionsService,
middleware middleware.Middleware,
) *GetFilteredCollectionsHTTPHandler {
logger = logger.Named("GetFilteredCollectionsHTTPHandler")
return &GetFilteredCollectionsHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetFilteredCollectionsHTTPHandler) Pattern() string {
return "GET /api/v1/collections/filtered"
}
func (h *GetFilteredCollectionsHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetFilteredCollectionsHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Parse query parameters for filter options
req, err := h.parseFilterOptions(r)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}
// parseFilterOptions parses the query parameters to create the request DTO
func (h *GetFilteredCollectionsHTTPHandler) parseFilterOptions(r *http.Request) (*svc_collection.GetFilteredCollectionsRequestDTO, error) {
req := &svc_collection.GetFilteredCollectionsRequestDTO{
IncludeOwned: true, // Default to including owned collections
IncludeShared: false, // Default to not including shared collections
}
// Parse include_owned parameter
if includeOwnedStr := r.URL.Query().Get("include_owned"); includeOwnedStr != "" {
includeOwned, err := strconv.ParseBool(includeOwnedStr)
if err != nil {
h.logger.Warn("Invalid include_owned parameter",
zap.String("value", includeOwnedStr),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("include_owned", "Invalid boolean value for include_owned parameter")
}
req.IncludeOwned = includeOwned
}
// Parse include_shared parameter
if includeSharedStr := r.URL.Query().Get("include_shared"); includeSharedStr != "" {
includeShared, err := strconv.ParseBool(includeSharedStr)
if err != nil {
h.logger.Warn("Invalid include_shared parameter",
zap.String("value", includeSharedStr),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("include_shared", "Invalid boolean value for include_shared parameter")
}
req.IncludeShared = includeShared
}
// Validate that at least one option is enabled
if !req.IncludeOwned && !req.IncludeShared {
return nil, httperror.NewForBadRequestWithSingleField("filter_options", "At least one filter option (include_owned or include_shared) must be enabled")
}
h.logger.Debug("Parsed filter options",
zap.Bool("include_owned", req.IncludeOwned),
zap.Bool("include_shared", req.IncludeShared))
return req, nil
}

View file

@ -0,0 +1,73 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/list_by_user.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListUserCollectionsHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.ListUserCollectionsService
middleware middleware.Middleware
}
func NewListUserCollectionsHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.ListUserCollectionsService,
middleware middleware.Middleware,
) *ListUserCollectionsHTTPHandler {
logger = logger.Named("ListUserCollectionsHTTPHandler")
return &ListUserCollectionsHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ListUserCollectionsHTTPHandler) Pattern() string {
return "GET /api/v1/collections"
}
func (h *ListUserCollectionsHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ListUserCollectionsHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
resp, err := h.service.Execute(ctx)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,73 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/list_shared_with_user.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListSharedCollectionsHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.ListSharedCollectionsService
middleware middleware.Middleware
}
func NewListSharedCollectionsHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.ListSharedCollectionsService,
middleware middleware.Middleware,
) *ListSharedCollectionsHTTPHandler {
logger = logger.Named("ListSharedCollectionsHTTPHandler")
return &ListSharedCollectionsHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ListSharedCollectionsHTTPHandler) Pattern() string {
return "GET /api/v1/collections/shared"
}
func (h *ListSharedCollectionsHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ListSharedCollectionsHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Call service
resp, err := h.service.Execute(ctx)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,129 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/move_collection.go
package collection
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type MoveCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.MoveCollectionService
middleware middleware.Middleware
}
func NewMoveCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.MoveCollectionService,
middleware middleware.Middleware,
) *MoveCollectionHTTPHandler {
logger = logger.Named("MoveCollectionHTTPHandler")
return &MoveCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*MoveCollectionHTTPHandler) Pattern() string {
return "PUT /api/v1/collections/{id}/move"
}
func (h *MoveCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *MoveCollectionHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
collectionID gocql.UUID,
) (*svc_collection.MoveCollectionRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_collection.MoveCollectionRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set the collection ID from the URL parameter
requestData.CollectionID = collectionID
return &requestData, nil
}
func (h *MoveCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from URL parameters
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, collectionID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,146 @@
package collection
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
)
// Wire providers for collection HTTP handlers
func ProvideCreateCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.CreateCollectionService,
mw middleware.Middleware,
) *CreateCollectionHTTPHandler {
return NewCreateCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideGetCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.GetCollectionService,
mw middleware.Middleware,
) *GetCollectionHTTPHandler {
return NewGetCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideListUserCollectionsHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.ListUserCollectionsService,
mw middleware.Middleware,
) *ListUserCollectionsHTTPHandler {
return NewListUserCollectionsHTTPHandler(cfg, logger, service, mw)
}
func ProvideUpdateCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.UpdateCollectionService,
mw middleware.Middleware,
) *UpdateCollectionHTTPHandler {
return NewUpdateCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideSoftDeleteCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.SoftDeleteCollectionService,
mw middleware.Middleware,
) *SoftDeleteCollectionHTTPHandler {
return NewSoftDeleteCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideArchiveCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.ArchiveCollectionService,
mw middleware.Middleware,
) *ArchiveCollectionHTTPHandler {
return NewArchiveCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideRestoreCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.RestoreCollectionService,
mw middleware.Middleware,
) *RestoreCollectionHTTPHandler {
return NewRestoreCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideListSharedCollectionsHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.ListSharedCollectionsService,
mw middleware.Middleware,
) *ListSharedCollectionsHTTPHandler {
return NewListSharedCollectionsHTTPHandler(cfg, logger, service, mw)
}
func ProvideFindRootCollectionsHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.FindRootCollectionsService,
mw middleware.Middleware,
) *FindRootCollectionsHTTPHandler {
return NewFindRootCollectionsHTTPHandler(cfg, logger, service, mw)
}
func ProvideFindCollectionsByParentHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.FindCollectionsByParentService,
mw middleware.Middleware,
) *FindCollectionsByParentHTTPHandler {
return NewFindCollectionsByParentHTTPHandler(cfg, logger, service, mw)
}
func ProvideCollectionSyncHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.GetCollectionSyncDataService,
mw middleware.Middleware,
) *CollectionSyncHTTPHandler {
return NewCollectionSyncHTTPHandler(cfg, logger, service, mw)
}
func ProvideMoveCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.MoveCollectionService,
mw middleware.Middleware,
) *MoveCollectionHTTPHandler {
return NewMoveCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideGetFilteredCollectionsHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.GetFilteredCollectionsService,
mw middleware.Middleware,
) *GetFilteredCollectionsHTTPHandler {
return NewGetFilteredCollectionsHTTPHandler(cfg, logger, service, mw)
}
func ProvideShareCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.ShareCollectionService,
mw middleware.Middleware,
) *ShareCollectionHTTPHandler {
return NewShareCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideRemoveMemberHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_collection.RemoveMemberService,
mw middleware.Middleware,
) *RemoveMemberHTTPHandler {
return NewRemoveMemberHTTPHandler(cfg, logger, service, mw)
}

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/remove_member.go
package collection
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RemoveMemberHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.RemoveMemberService
middleware middleware.Middleware
}
func NewRemoveMemberHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.RemoveMemberService,
middleware middleware.Middleware,
) *RemoveMemberHTTPHandler {
logger = logger.Named("RemoveMemberHTTPHandler")
return &RemoveMemberHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*RemoveMemberHTTPHandler) Pattern() string {
return "DELETE /api/v1/collections/{id}/members/{user_id}"
}
func (h *RemoveMemberHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *RemoveMemberHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
collectionID gocql.UUID,
recipientID gocql.UUID,
) (*svc_collection.RemoveMemberRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_collection.RemoveMemberRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set the collection ID and recipient ID from the URL parameters
requestData.CollectionID = collectionID
requestData.RecipientID = recipientID
return &requestData, nil
}
func (h *RemoveMemberHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from URL parameters
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Extract user ID from URL parameters
userIDStr := r.PathValue("user_id")
if userIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("user_id", "User ID is required"))
return
}
// Convert collection ID string to UUID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
// Convert user ID string to UUID
userID, err := gocql.ParseUUID(userIDStr)
if err != nil {
h.logger.Error("invalid user ID format",
zap.String("user_id", userIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("user_id", "Invalid user ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, collectionID, userID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,96 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/restore.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RestoreCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.RestoreCollectionService
middleware middleware.Middleware
}
func NewRestoreCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.RestoreCollectionService,
middleware middleware.Middleware,
) *RestoreCollectionHTTPHandler {
logger = logger.Named("RestoreCollectionHTTPHandler")
return &RestoreCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*RestoreCollectionHTTPHandler) Pattern() string {
return "PUT /api/v1/collections/{id}/restore"
}
func (h *RestoreCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *RestoreCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from the URL
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
// Create request DTO
dtoReq := &svc_collection.RestoreCollectionRequestDTO{
ID: collectionID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,167 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/share_collection.go
package collection
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type ShareCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.ShareCollectionService
middleware middleware.Middleware
}
func NewShareCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.ShareCollectionService,
middleware middleware.Middleware,
) *ShareCollectionHTTPHandler {
logger = logger.Named("ShareCollectionHTTPHandler")
return &ShareCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ShareCollectionHTTPHandler) Pattern() string {
return "POST /api/v1/collections/{id}/share"
}
func (h *ShareCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ShareCollectionHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
collectionID gocql.UUID,
) (*svc_collection.ShareCollectionRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_collection.ShareCollectionRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("JSON decoding error",
zap.Any("err", err),
zap.String("raw_json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Log the decoded request for debugging (PII masked for security)
h.logger.Debug("decoded share collection request",
zap.String("collection_id_from_url", collectionID.String()),
zap.String("collection_id_from_body", requestData.CollectionID.String()),
zap.String("recipient_id", requestData.RecipientID.String()),
zap.String("recipient_email", validation.MaskEmail(requestData.RecipientEmail)),
zap.String("permission_level", requestData.PermissionLevel),
zap.Int("encrypted_key_length", len(requestData.EncryptedCollectionKey)),
zap.Bool("share_with_descendants", requestData.ShareWithDescendants))
// CRITICAL: Check if encrypted collection key is present in the request
if len(requestData.EncryptedCollectionKey) == 0 {
h.logger.Error("FRONTEND BUG: encrypted_collection_key is missing from request",
zap.String("collection_id", collectionID.String()),
zap.String("recipient_id", requestData.RecipientID.String()),
zap.String("recipient_email", validation.MaskEmail(requestData.RecipientEmail)))
// Log raw JSON at debug level only to avoid PII exposure in production logs
h.logger.Debug("raw request body for debugging",
zap.String("collection_id", collectionID.String()),
zap.String("raw_json", rawJSON.String()))
} else {
h.logger.Debug("encrypted_collection_key found in request",
zap.String("collection_id", collectionID.String()),
zap.String("recipient_id", requestData.RecipientID.String()),
zap.Int("encrypted_key_length", len(requestData.EncryptedCollectionKey)))
}
// Set the collection ID from the URL parameter
requestData.CollectionID = collectionID
return &requestData, nil
}
func (h *ShareCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from URL parameters
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
h.logger.Info("processing share collection request",
zap.String("collection_id", collectionID.String()),
zap.String("method", r.Method),
zap.String("content_type", r.Header.Get("Content-Type")))
req, err := h.unmarshalRequest(ctx, r, collectionID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Call service
resp, err := h.service.Execute(ctx, req)
if err != nil {
h.logger.Error("share collection service failed",
zap.String("collection_id", collectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,96 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/softdelete.go
package collection
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type SoftDeleteCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.SoftDeleteCollectionService
middleware middleware.Middleware
}
func NewSoftDeleteCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.SoftDeleteCollectionService,
middleware middleware.Middleware,
) *SoftDeleteCollectionHTTPHandler {
logger = logger.Named("SoftDeleteCollectionHTTPHandler")
return &SoftDeleteCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*SoftDeleteCollectionHTTPHandler) Pattern() string {
return "DELETE /api/v1/collections/{id}"
}
func (h *SoftDeleteCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *SoftDeleteCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from the URL
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
// Create request DTO
dtoReq := &svc_collection.SoftDeleteCollectionRequestDTO{
ID: collectionID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,127 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/sync.go
package collection
import (
"encoding/json"
"net/http"
"strconv"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_sync "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CollectionSyncHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.GetCollectionSyncDataService
middleware middleware.Middleware
}
func NewCollectionSyncHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.GetCollectionSyncDataService,
middleware middleware.Middleware,
) *CollectionSyncHTTPHandler {
logger = logger.Named("CollectionSyncHTTPHandler")
return &CollectionSyncHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*CollectionSyncHTTPHandler) Pattern() string {
return "POST /api/v1/collections/sync"
}
func (h *CollectionSyncHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *CollectionSyncHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Get user ID from context
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
h.logger.Error("Failed getting user ID from context")
httperror.RespondWithError(w, r, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error"))
return
}
// Parse query parameters
queryParams := r.URL.Query()
// Parse limit parameter (default: 1000, max: 5000)
limit := int64(1000)
if limitStr := queryParams.Get("limit"); limitStr != "" {
if parsedLimit, err := strconv.ParseInt(limitStr, 10, 64); err == nil {
if parsedLimit > 0 && parsedLimit <= 5000 {
limit = parsedLimit
} else {
h.logger.Warn("Invalid limit parameter, using default",
zap.String("limit", limitStr),
zap.Int64("default", limit))
}
} else {
h.logger.Warn("Failed to parse limit parameter, using default",
zap.String("limit", limitStr),
zap.Error(err))
}
}
// Parse cursor parameter
var cursor *dom_sync.CollectionSyncCursor
if cursorStr := queryParams.Get("cursor"); cursorStr != "" {
var parsedCursor dom_sync.CollectionSyncCursor
if err := json.Unmarshal([]byte(cursorStr), &parsedCursor); err != nil {
h.logger.Error("Failed to parse cursor parameter",
zap.String("cursor", cursorStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format"))
return
}
cursor = &parsedCursor
}
h.logger.Debug("Processing collection sync request",
zap.Any("user_id", userID),
zap.Int64("limit", limit),
zap.Any("cursor", cursor))
// Call service to get sync data
response, err := h.service.Execute(ctx, userID, cursor, limit, "all")
if err != nil {
h.logger.Error("Failed to get collection sync data",
zap.Any("user_id", userID),
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
// Encode and return response
if err := json.NewEncoder(w).Encode(response); err != nil {
h.logger.Error("Failed to encode collection sync response",
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
h.logger.Info("Successfully served collection sync data",
zap.Any("user_id", userID),
zap.Int("collections_count", len(response.Collections)),
zap.Bool("has_more", response.HasMore))
}

View file

@ -0,0 +1,136 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/collection/update.go
package collection
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type UpdateCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_collection.UpdateCollectionService
middleware middleware.Middleware
}
func NewUpdateCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_collection.UpdateCollectionService,
middleware middleware.Middleware,
) *UpdateCollectionHTTPHandler {
logger = logger.Named("UpdateCollectionHTTPHandler")
return &UpdateCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*UpdateCollectionHTTPHandler) Pattern() string {
return "PUT /api/v1/collections/{id}"
}
func (h *UpdateCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *UpdateCollectionHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
collectionID gocql.UUID,
) (*svc_collection.UpdateCollectionRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_collection.UpdateCollectionRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set the collection ID from the URL parameter
requestData.ID = collectionID
return &requestData, nil
}
func (h *UpdateCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from the URL path parameter
// This assumes the router is net/http (Go 1.22+) and the pattern was registered like "PUT /path/{id}"
collectionIDStr := r.PathValue("id")
if collectionIDStr == "" {
h.logger.Warn("collection_id not found in path parameters or is empty",
zap.String("path", r.URL.Path),
zap.String("method", r.Method),
)
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, collectionID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Call service
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("transaction completed with no result") // Clarified error message
h.logger.Error("transaction completed with no result", zap.Any("request_payload", req))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,13 @@
package common
import (
"go.uber.org/zap"
)
// Wire providers for common HTTP handlers
func ProvideMapleFileVersionHTTPHandler(
logger *zap.Logger,
) *MapleFileVersionHTTPHandler {
return NewMapleFileVersionHTTPHandler(logger)
}

View file

@ -0,0 +1,34 @@
package common
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
)
// curl http://localhost:8000/maplefile/api/v1/version
type MapleFileVersionHTTPHandler struct {
log *zap.Logger
}
func NewMapleFileVersionHTTPHandler(
log *zap.Logger,
) *MapleFileVersionHTTPHandler {
log = log.Named("MapleFileVersionHTTPHandler")
return &MapleFileVersionHTTPHandler{log}
}
type MapleFileVersionResponseIDO struct {
Version string `json:"version"`
}
func (h *MapleFileVersionHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
response := MapleFileVersionResponseIDO{Version: "v1.0.0"}
json.NewEncoder(w).Encode(response)
}
func (*MapleFileVersionHTTPHandler) Pattern() string {
return "/maplefile/api/v1/version"
}

View file

@ -0,0 +1,85 @@
// cloud/maplefile-backend/internal/maplefile/interface/http/dashboard/get.go
package dashboard
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_dashboard "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/dashboard"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetDashboardHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_dashboard.GetDashboardService
middleware middleware.Middleware
}
func NewGetDashboardHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_dashboard.GetDashboardService,
middleware middleware.Middleware,
) *GetDashboardHTTPHandler {
logger = logger.With(zap.String("module", "maplefile"))
logger = logger.Named("GetDashboardHTTPHandler")
return &GetDashboardHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetDashboardHTTPHandler) Pattern() string {
return "GET /api/v1/dashboard"
}
func (h *GetDashboardHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetDashboardHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
//
// STEP 1: Execute service
//
resp, err := h.service.Execute(ctx)
if err != nil {
h.logger.Error("Failed to get dashboard data",
zap.Error(err))
// Service returns RFC 9457 errors, use RespondWithError to handle them
httperror.RespondWithError(w, r, err)
return
}
//
// STEP 2: Encode and return response
//
if resp == nil {
h.logger.Error("No dashboard data returned from service")
problem := httperror.NewInternalServerError("Failed to retrieve dashboard data. Please try again.").
WithInstance(r.URL.Path).
WithTraceID(httperror.ExtractRequestID(r))
httperror.RespondWithProblem(w, problem)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("Failed to encode dashboard response",
zap.Error(err))
// At this point headers are already sent, log the error but can't send RFC 9457 response
return
}
h.logger.Debug("Dashboard data successfully returned")
}

View file

@ -0,0 +1,20 @@
package dashboard
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_dashboard "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/dashboard"
)
// Wire provider for dashboard HTTP handlers
func ProvideGetDashboardHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_dashboard.GetDashboardService,
mw middleware.Middleware,
) *GetDashboardHTTPHandler {
return NewGetDashboardHTTPHandler(cfg, logger, service, mw)
}

View file

@ -0,0 +1,97 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/archive.go
package file
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ArchiveFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.ArchiveFileService
middleware middleware.Middleware
}
func NewArchiveFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.ArchiveFileService,
middleware middleware.Middleware,
) *ArchiveFileHTTPHandler {
logger = logger.Named("ArchiveFileHTTPHandler")
return &ArchiveFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ArchiveFileHTTPHandler) Pattern() string {
return "PUT /api/v1/file/{id}/archive"
}
func (h *ArchiveFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ArchiveFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from the URL
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
// Create request DTO
dtoReq := &svc_file.ArchiveFileRequestDTO{
FileID: fileID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.String("file_id", fileIDStr),
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,129 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/complete_file_upload.go
package file
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CompleteFileUploadHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.CompleteFileUploadService
middleware middleware.Middleware
}
func NewCompleteFileUploadHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.CompleteFileUploadService,
middleware middleware.Middleware,
) *CompleteFileUploadHTTPHandler {
logger = logger.Named("CompleteFileUploadHTTPHandler")
return &CompleteFileUploadHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*CompleteFileUploadHTTPHandler) Pattern() string {
return "POST /api/v1/file/{id}/complete"
}
func (h *CompleteFileUploadHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *CompleteFileUploadHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
fileID gocql.UUID,
) (*svc_file.CompleteFileUploadRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_file.CompleteFileUploadRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set the file ID from the URL parameter
requestData.FileID = fileID
return &requestData, nil
}
func (h *CompleteFileUploadHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from URL parameters
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, fileID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,108 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/create_pending_file.go
package file
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreatePendingFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.CreatePendingFileService
middleware middleware.Middleware
}
func NewCreatePendingFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.CreatePendingFileService,
middleware middleware.Middleware,
) *CreatePendingFileHTTPHandler {
logger = logger.Named("CreatePendingFileHTTPHandler")
return &CreatePendingFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*CreatePendingFileHTTPHandler) Pattern() string {
return "POST /api/v1/files/pending"
}
func (h *CreatePendingFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *CreatePendingFileHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
) (*svc_file.CreatePendingFileRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_file.CreatePendingFileRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err))
// Log raw JSON at debug level only to avoid PII exposure in production logs
h.logger.Debug("raw request body for debugging",
zap.String("json", rawJSON.String()))
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
return &requestData, nil
}
func (h *CreatePendingFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
req, err := h.unmarshalRequest(ctx, r)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,91 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/get.go
package file
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.GetFileService
middleware middleware.Middleware
}
func NewGetFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.GetFileService,
middleware middleware.Middleware,
) *GetFileHTTPHandler {
logger = logger.Named("GetFileHTTPHandler")
return &GetFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetFileHTTPHandler) Pattern() string {
return "GET /api/v1/file/{id}"
}
func (h *GetFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from URL parameters
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
resp, err := h.service.Execute(ctx, fileID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,134 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/get_presigned_download_url.go
package file
import (
"context"
"encoding/json"
"errors"
"net/http"
"strconv"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedDownloadURLHTTPRequestDTO struct {
URLDurationStr string `json:"url_duration,omitempty"` // Optional, duration as string of nanoseconds, defaults to 1 hour
}
type GetPresignedDownloadURLHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.GetPresignedDownloadURLService
middleware middleware.Middleware
}
func NewGetPresignedDownloadURLHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.GetPresignedDownloadURLService,
middleware middleware.Middleware,
) *GetPresignedDownloadURLHTTPHandler {
logger = logger.Named("GetPresignedDownloadURLHTTPHandler")
return &GetPresignedDownloadURLHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetPresignedDownloadURLHTTPHandler) Pattern() string {
return "GET /api/v1/file/{id}/download-url"
}
func (h *GetPresignedDownloadURLHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetPresignedDownloadURLHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
fileID gocql.UUID,
) (*svc_file.GetPresignedDownloadURLRequestDTO, error) {
// For GET requests, read from query parameters instead of body
urlDurationStr := r.URL.Query().Get("url_duration")
// Set default URL duration if not provided (1 hour in nanoseconds)
var urlDuration time.Duration
if urlDurationStr == "" {
urlDuration = 1 * time.Hour
} else {
// Parse the string to int64 (nanoseconds)
durationNanos, err := strconv.ParseInt(urlDurationStr, 10, 64)
if err != nil {
return nil, httperror.NewForSingleField(http.StatusBadRequest, "url_duration", "Invalid duration format")
}
urlDuration = time.Duration(durationNanos)
}
// Convert to service DTO
serviceRequest := &svc_file.GetPresignedDownloadURLRequestDTO{
FileID: fileID,
URLDuration: urlDuration,
}
return serviceRequest, nil
}
func (h *GetPresignedDownloadURLHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from URL parameters
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, fileID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,152 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/get_presigned_upload_url.go
package file
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"strconv"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedUploadURLHTTPRequestDTO struct {
URLDurationStr string `json:"url_duration,omitempty"` // Optional, duration as string of nanoseconds, defaults to 1 hour
}
type GetPresignedUploadURLHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.GetPresignedUploadURLService
middleware middleware.Middleware
}
func NewGetPresignedUploadURLHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.GetPresignedUploadURLService,
middleware middleware.Middleware,
) *GetPresignedUploadURLHTTPHandler {
logger = logger.Named("GetPresignedUploadURLHTTPHandler")
return &GetPresignedUploadURLHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*GetPresignedUploadURLHTTPHandler) Pattern() string {
return "GET /api/v1/file/{id}/upload-url"
}
func (h *GetPresignedUploadURLHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *GetPresignedUploadURLHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
fileID gocql.UUID,
) (*svc_file.GetPresignedUploadURLRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var httpRequestData GetPresignedUploadURLHTTPRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&httpRequestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set default URL duration if not provided (1 hour in nanoseconds)
var urlDuration time.Duration
if httpRequestData.URLDurationStr == "" {
urlDuration = 1 * time.Hour
} else {
// Parse the string to int64 (nanoseconds)
durationNanos, err := strconv.ParseInt(httpRequestData.URLDurationStr, 10, 64)
if err != nil {
return nil, httperror.NewForSingleField(http.StatusBadRequest, "url_duration", "Invalid duration format")
}
urlDuration = time.Duration(durationNanos)
}
// Convert to service DTO
serviceRequest := &svc_file.GetPresignedUploadURLRequestDTO{
FileID: fileID,
URLDuration: urlDuration,
}
return serviceRequest, nil
}
func (h *GetPresignedUploadURLHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from URL parameters
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, fileID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Call service
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,96 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/list_by_collection.go
package file
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByCollectionHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.ListFilesByCollectionService
middleware middleware.Middleware
}
func NewListFilesByCollectionHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.ListFilesByCollectionService,
middleware middleware.Middleware,
) *ListFilesByCollectionHTTPHandler {
logger = logger.Named("ListFilesByCollectionHTTPHandler")
return &ListFilesByCollectionHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*ListFilesByCollectionHTTPHandler) Pattern() string {
return "GET /api/v1/collection/{collection_id}/files"
}
func (h *ListFilesByCollectionHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ListFilesByCollectionHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract collection ID from URL parameters
collectionIDStr := r.PathValue("collection_id")
if collectionIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required"))
return
}
// Convert string ID to ObjectID
collectionID, err := gocql.ParseUUID(collectionIDStr)
if err != nil {
h.logger.Error("invalid collection ID format",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("collection_id", "Invalid collection ID format"))
return
}
// Create request DTO
req := &svc_file.ListFilesByCollectionRequestDTO{
CollectionID: collectionID,
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,106 @@
// cloud/maplefile-backend/internal/maplefile/interface/http/file/list_recent_files.go
package file
import (
"encoding/json"
"net/http"
"strconv"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
file_service "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListRecentFilesHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
listRecentFilesService file_service.ListRecentFilesService
middleware middleware.Middleware
}
func NewListRecentFilesHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
listRecentFilesService file_service.ListRecentFilesService,
middleware middleware.Middleware,
) *ListRecentFilesHTTPHandler {
logger = logger.Named("ListRecentFilesHTTPHandler")
return &ListRecentFilesHTTPHandler{
config: config,
logger: logger,
listRecentFilesService: listRecentFilesService,
middleware: middleware,
}
}
func (*ListRecentFilesHTTPHandler) Pattern() string {
return "GET /api/v1/files/recent"
}
func (h *ListRecentFilesHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ListRecentFilesHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Parse query parameters
queryParams := r.URL.Query()
// Parse limit parameter (default: 30, max: 100)
limit := int64(30)
if limitStr := queryParams.Get("limit"); limitStr != "" {
if parsedLimit, err := strconv.ParseInt(limitStr, 10, 64); err == nil {
if parsedLimit > 0 && parsedLimit <= 100 {
limit = parsedLimit
} else {
h.logger.Warn("Invalid limit parameter, using default",
zap.String("limit", limitStr),
zap.Int64("default", limit))
}
} else {
h.logger.Warn("Failed to parse limit parameter, using default",
zap.String("limit", limitStr),
zap.Error(err))
}
}
// Parse cursor parameter
var cursor *string
if cursorStr := queryParams.Get("cursor"); cursorStr != "" {
cursor = &cursorStr
}
h.logger.Debug("Processing recent files request",
zap.Int64("limit", limit),
zap.Any("cursor", cursor))
// Call service to get recent files
response, err := h.listRecentFilesService.Execute(ctx, cursor, limit)
if err != nil {
h.logger.Error("Failed to get recent files",
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
// Encode and return response
if err := json.NewEncoder(w).Encode(response); err != nil {
h.logger.Error("Failed to encode recent files response",
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
h.logger.Info("Successfully served recent files",
zap.Int("files_count", len(response.Files)),
zap.Bool("has_more", response.HasMore),
zap.Any("next_cursor", response.NextCursor))
}

View file

@ -0,0 +1,146 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/list_sync.go
package file
import (
"encoding/json"
"net/http"
"strconv"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
file_service "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type FileSyncHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
fileSyncService file_service.ListFileSyncDataService
middleware middleware.Middleware
}
func NewFileSyncHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
fileSyncService file_service.ListFileSyncDataService,
middleware middleware.Middleware,
) *FileSyncHTTPHandler {
logger = logger.Named("FileSyncHTTPHandler")
return &FileSyncHTTPHandler{
config: config,
logger: logger,
fileSyncService: fileSyncService,
middleware: middleware,
}
}
func (*FileSyncHTTPHandler) Pattern() string {
return "POST /api/v1/files/sync"
}
func (h *FileSyncHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *FileSyncHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Get user ID from context
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
h.logger.Error("Failed getting user ID from context")
httperror.RespondWithError(w, r, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error"))
return
}
// Parse query parameters
queryParams := r.URL.Query()
// Parse limit parameter (default: 5000, max: 10000)
limit := int64(5000)
if limitStr := queryParams.Get("limit"); limitStr != "" {
if parsedLimit, err := strconv.ParseInt(limitStr, 10, 64); err == nil {
if parsedLimit > 0 && parsedLimit <= 10000 {
limit = parsedLimit
} else {
h.logger.Warn("Invalid limit parameter, using default",
zap.String("limit", limitStr),
zap.Int64("default", limit))
}
} else {
h.logger.Warn("Failed to parse limit parameter, using default",
zap.String("limit", limitStr),
zap.Error(err))
}
}
// Parse cursor parameter
var cursor *dom_file.FileSyncCursor
if cursorStr := queryParams.Get("cursor"); cursorStr != "" {
var parsedCursor dom_file.FileSyncCursor
if err := json.Unmarshal([]byte(cursorStr), &parsedCursor); err != nil {
h.logger.Error("Failed to parse cursor parameter",
zap.String("cursor", cursorStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format"))
return
}
cursor = &parsedCursor
}
h.logger.Debug("Processing file sync request",
zap.Any("user_id", userID),
zap.Int64("limit", limit),
zap.Any("cursor", cursor))
// Call service to get sync data
response, err := h.fileSyncService.Execute(ctx, cursor, limit)
if err != nil {
h.logger.Error("Failed to get file sync data",
zap.Any("user_id", userID),
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
// Verify the response contains all fields including EncryptedFileSizeInBytes before encoding
h.logger.Debug("File sync response validation",
zap.Any("user_id", userID),
zap.Int("files_count", len(response.Files)))
for i, item := range response.Files {
h.logger.Debug("File sync response item",
zap.Int("index", i),
zap.String("file_id", item.ID.String()),
zap.String("collection_id", item.CollectionID.String()),
zap.Uint64("version", item.Version),
zap.Time("modified_at", item.ModifiedAt),
zap.String("state", item.State),
zap.Uint64("tombstone_version", item.TombstoneVersion),
zap.Time("tombstone_expiry", item.TombstoneExpiry),
zap.Int64("encrypted_file_size_in_bytes", item.EncryptedFileSizeInBytes))
}
// Encode and return response
if err := json.NewEncoder(w).Encode(response); err != nil {
h.logger.Error("Failed to encode file sync response",
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
h.logger.Info("Successfully served file sync data",
zap.Any("user_id", userID),
zap.Int("files_count", len(response.Files)),
zap.Bool("has_more", response.HasMore),
zap.Any("next_cursor", response.NextCursor))
}

View file

@ -0,0 +1,136 @@
package file
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
)
// Wire providers for file HTTP handlers
func ProvideCreatePendingFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.CreatePendingFileService,
mw middleware.Middleware,
) *CreatePendingFileHTTPHandler {
return NewCreatePendingFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideGetPresignedUploadURLHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.GetPresignedUploadURLService,
mw middleware.Middleware,
) *GetPresignedUploadURLHTTPHandler {
return NewGetPresignedUploadURLHTTPHandler(cfg, logger, service, mw)
}
func ProvideCompleteFileUploadHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.CompleteFileUploadService,
mw middleware.Middleware,
) *CompleteFileUploadHTTPHandler {
return NewCompleteFileUploadHTTPHandler(cfg, logger, service, mw)
}
func ProvideGetFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.GetFileService,
mw middleware.Middleware,
) *GetFileHTTPHandler {
return NewGetFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideGetPresignedDownloadURLHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.GetPresignedDownloadURLService,
mw middleware.Middleware,
) *GetPresignedDownloadURLHTTPHandler {
return NewGetPresignedDownloadURLHTTPHandler(cfg, logger, service, mw)
}
func ProvideListFilesByCollectionHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.ListFilesByCollectionService,
mw middleware.Middleware,
) *ListFilesByCollectionHTTPHandler {
return NewListFilesByCollectionHTTPHandler(cfg, logger, service, mw)
}
func ProvideListRecentFilesHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.ListRecentFilesService,
mw middleware.Middleware,
) *ListRecentFilesHTTPHandler {
return NewListRecentFilesHTTPHandler(cfg, logger, service, mw)
}
func ProvideUpdateFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.UpdateFileService,
mw middleware.Middleware,
) *UpdateFileHTTPHandler {
return NewUpdateFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideSoftDeleteFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.SoftDeleteFileService,
mw middleware.Middleware,
) *SoftDeleteFileHTTPHandler {
return NewSoftDeleteFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideArchiveFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.ArchiveFileService,
mw middleware.Middleware,
) *ArchiveFileHTTPHandler {
return NewArchiveFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideRestoreFileHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.RestoreFileService,
mw middleware.Middleware,
) *RestoreFileHTTPHandler {
return NewRestoreFileHTTPHandler(cfg, logger, service, mw)
}
func ProvideDeleteMultipleFilesHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.DeleteMultipleFilesService,
mw middleware.Middleware,
) *DeleteMultipleFilesHTTPHandler {
return NewDeleteMultipleFilesHTTPHandler(cfg, logger, service, mw)
}
func ProvideFileSyncHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
service svc_file.ListFileSyncDataService,
mw middleware.Middleware,
) *FileSyncHTTPHandler {
return NewFileSyncHTTPHandler(cfg, logger, service, mw)
}
func ProvideReportDownloadCompletedHTTPHandler(
cfg *config.Configuration,
logger *zap.Logger,
mw middleware.Middleware,
) *ReportDownloadCompletedHTTPHandler {
return NewReportDownloadCompletedHTTPHandler(cfg, logger, mw)
}

View file

@ -0,0 +1,82 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/report_download_completed.go
package file
import (
"encoding/json"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ReportDownloadCompletedHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
middleware middleware.Middleware
}
func NewReportDownloadCompletedHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
middleware middleware.Middleware,
) *ReportDownloadCompletedHTTPHandler {
logger = logger.Named("ReportDownloadCompletedHTTPHandler")
return &ReportDownloadCompletedHTTPHandler{
config: config,
logger: logger,
middleware: middleware,
}
}
func (*ReportDownloadCompletedHTTPHandler) Pattern() string {
return "POST /api/v1/file/{id}/download-completed"
}
func (h *ReportDownloadCompletedHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *ReportDownloadCompletedHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
// Extract file ID from the URL
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Validate UUID format
_, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
// Log the download completion (analytics/telemetry)
h.logger.Debug("download completed reported",
zap.String("file_id", fileIDStr))
// Return success response
response := map[string]interface{}{
"success": true,
"message": "Download completion recorded",
}
if err := json.NewEncoder(w).Encode(response); err != nil {
h.logger.Error("failed to encode response",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,97 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/restore.go
package file
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RestoreFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.RestoreFileService
middleware middleware.Middleware
}
func NewRestoreFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.RestoreFileService,
middleware middleware.Middleware,
) *RestoreFileHTTPHandler {
logger = logger.Named("RestoreFileHTTPHandler")
return &RestoreFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*RestoreFileHTTPHandler) Pattern() string {
return "PUT /api/v1/file/{id}/restore"
}
func (h *RestoreFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *RestoreFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from the URL
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
// Create request DTO
dtoReq := &svc_file.RestoreFileRequestDTO{
FileID: fileID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.String("file_id", fileIDStr),
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,97 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/softdelete.go
package file
import (
"encoding/json"
"errors"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type SoftDeleteFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.SoftDeleteFileService
middleware middleware.Middleware
}
func NewSoftDeleteFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.SoftDeleteFileService,
middleware middleware.Middleware,
) *SoftDeleteFileHTTPHandler {
logger = logger.Named("SoftDeleteFileHTTPHandler")
return &SoftDeleteFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*SoftDeleteFileHTTPHandler) Pattern() string {
return "DELETE /api/v1/file/{id}"
}
func (h *SoftDeleteFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *SoftDeleteFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from the URL
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
// Create request DTO
dtoReq := &svc_file.SoftDeleteFileRequestDTO{
FileID: fileID,
}
resp, err := h.service.Execute(ctx, dtoReq)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.String("file_id", fileIDStr),
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,107 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/delete_multiple.go
package file
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type DeleteMultipleFilesHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.DeleteMultipleFilesService
middleware middleware.Middleware
}
func NewDeleteMultipleFilesHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.DeleteMultipleFilesService,
middleware middleware.Middleware,
) *DeleteMultipleFilesHTTPHandler {
logger = logger.Named("DeleteMultipleFilesHTTPHandler")
return &DeleteMultipleFilesHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*DeleteMultipleFilesHTTPHandler) Pattern() string {
return "POST /api/v1/files/delete-multiple"
}
func (h *DeleteMultipleFilesHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *DeleteMultipleFilesHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
) (*svc_file.DeleteMultipleFilesRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_file.DeleteMultipleFilesRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err),
zap.String("json", rawJSON.String()),
)
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
return &requestData, nil
}
func (h *DeleteMultipleFilesHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
req, err := h.unmarshalRequest(ctx, r)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("no result")
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,135 @@
// monorepo/cloud/backend/internal/maplefile/interface/http/file/update.go
package file
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/middleware"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type UpdateFileHTTPHandler struct {
config *config.Configuration
logger *zap.Logger
service svc_file.UpdateFileService
middleware middleware.Middleware
}
func NewUpdateFileHTTPHandler(
config *config.Configuration,
logger *zap.Logger,
service svc_file.UpdateFileService,
middleware middleware.Middleware,
) *UpdateFileHTTPHandler {
logger = logger.Named("UpdateFileHTTPHandler")
return &UpdateFileHTTPHandler{
config: config,
logger: logger,
service: service,
middleware: middleware,
}
}
func (*UpdateFileHTTPHandler) Pattern() string {
return "PUT /api/v1/file/{id}"
}
func (h *UpdateFileHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Apply middleware before handling the request
h.middleware.Attach(h.Execute)(w, req)
}
func (h *UpdateFileHTTPHandler) unmarshalRequest(
ctx context.Context,
r *http.Request,
fileID gocql.UUID,
) (*svc_file.UpdateFileRequestDTO, error) {
// Initialize our structure which will store the parsed request data
var requestData svc_file.UpdateFileRequestDTO
defer r.Body.Close()
var rawJSON bytes.Buffer
teeReader := io.TeeReader(r.Body, &rawJSON) // TeeReader allows you to read the JSON and capture it
// Read the JSON string and convert it into our golang struct
err := json.NewDecoder(teeReader).Decode(&requestData)
if err != nil {
h.logger.Error("decoding error",
zap.Any("err", err))
// Log raw JSON at debug level only to avoid PII exposure in production logs
h.logger.Debug("raw request body for debugging",
zap.String("json", rawJSON.String()))
return nil, httperror.NewForSingleField(http.StatusBadRequest, "non_field_error", "payload structure is wrong")
}
// Set the file ID from the URL parameter
requestData.ID = fileID
return &requestData, nil
}
func (h *UpdateFileHTTPHandler) Execute(w http.ResponseWriter, r *http.Request) {
// Set response content type
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
// Extract file ID from the URL path parameter
fileIDStr := r.PathValue("id")
if fileIDStr == "" {
h.logger.Warn("file_id not found in path parameters or is empty",
zap.String("path", r.URL.Path),
zap.String("method", r.Method),
)
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required"))
return
}
// Convert string ID to ObjectID
fileID, err := gocql.ParseUUID(fileIDStr)
if err != nil {
h.logger.Error("invalid file ID format",
zap.String("file_id", fileIDStr),
zap.Error(err))
httperror.RespondWithError(w, r, httperror.NewForBadRequestWithSingleField("file_id", "Invalid file ID format"))
return
}
req, err := h.unmarshalRequest(ctx, r, fileID)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
resp, err := h.service.Execute(ctx, req)
if err != nil {
httperror.RespondWithError(w, r, err)
return
}
// Encode response
if resp != nil {
if err := json.NewEncoder(w).Encode(resp); err != nil {
h.logger.Error("failed to encode response",
zap.Any("error", err))
httperror.RespondWithError(w, r, err)
return
}
} else {
err := errors.New("transaction completed with no result")
h.logger.Error("transaction completed with no result", zap.Any("request_payload", req))
httperror.RespondWithError(w, r, err)
return
}
}

View file

@ -0,0 +1,258 @@
package http
import (
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/collection"
commonhttp "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/common"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/dashboard"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/file"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/inviteemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/me"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/tag"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/interface/http/user"
)
// Handlers aggregates all HTTP handlers
type Handlers struct {
// Common handlers
Version *commonhttp.MapleFileVersionHTTPHandler
// Dashboard handlers
GetDashboard *dashboard.GetDashboardHTTPHandler
// Me handlers
GetMe *me.GetMeHTTPHandler
UpdateMe *me.PutUpdateMeHTTPHandler
DeleteMe *me.DeleteMeHTTPHandler
// User handlers
UserPublicLookup *user.UserPublicLookupHTTPHandler
// Blocked Email handlers
CreateBlockedEmail *blockedemail.CreateBlockedEmailHTTPHandler
ListBlockedEmails *blockedemail.ListBlockedEmailsHTTPHandler
DeleteBlockedEmail *blockedemail.DeleteBlockedEmailHTTPHandler
// Invite Email handlers
SendInviteEmail *inviteemail.SendInviteEmailHTTPHandler
// Collection handlers - Basic CRUD
CreateCollection *collection.CreateCollectionHTTPHandler
GetCollection *collection.GetCollectionHTTPHandler
ListUserCollections *collection.ListUserCollectionsHTTPHandler
UpdateCollection *collection.UpdateCollectionHTTPHandler
SoftDeleteCollection *collection.SoftDeleteCollectionHTTPHandler
ArchiveCollection *collection.ArchiveCollectionHTTPHandler
RestoreCollection *collection.RestoreCollectionHTTPHandler
// Collection handlers - Hierarchical operations
FindCollectionsByParent *collection.FindCollectionsByParentHTTPHandler
FindRootCollections *collection.FindRootCollectionsHTTPHandler
MoveCollection *collection.MoveCollectionHTTPHandler
// Collection handlers - Sharing
ShareCollection *collection.ShareCollectionHTTPHandler
RemoveMember *collection.RemoveMemberHTTPHandler
ListSharedCollections *collection.ListSharedCollectionsHTTPHandler
// Collection handlers - Filtered operations
GetFilteredCollections *collection.GetFilteredCollectionsHTTPHandler
// Collection Sync
CollectionSync *collection.CollectionSyncHTTPHandler
// File handlers - Basic CRUD
SoftDeleteFile *file.SoftDeleteFileHTTPHandler
DeleteMultipleFiles *file.DeleteMultipleFilesHTTPHandler
GetFile *file.GetFileHTTPHandler
ListFilesByCollection *file.ListFilesByCollectionHTTPHandler
UpdateFile *file.UpdateFileHTTPHandler
CreatePendingFile *file.CreatePendingFileHTTPHandler
CompleteFileUpload *file.CompleteFileUploadHTTPHandler
GetPresignedUploadURL *file.GetPresignedUploadURLHTTPHandler
GetPresignedDownloadURL *file.GetPresignedDownloadURLHTTPHandler
ReportDownloadCompleted *file.ReportDownloadCompletedHTTPHandler
ArchiveFile *file.ArchiveFileHTTPHandler
RestoreFile *file.RestoreFileHTTPHandler
ListRecentFiles *file.ListRecentFilesHTTPHandler
// File Sync
FileSync *file.FileSyncHTTPHandler
// Tag handlers
CreateTag *tag.CreateTagHTTPHandler
ListTags *tag.ListTagsHTTPHandler
GetTag *tag.GetTagHTTPHandler
UpdateTag *tag.UpdateTagHTTPHandler
DeleteTag *tag.DeleteTagHTTPHandler
AssignTag *tag.AssignTagHTTPHandler
UnassignTag *tag.UnassignTagHTTPHandler
GetTagsForCollection *tag.GetTagsForCollectionHTTPHandler
GetTagsForFile *tag.GetTagsForFileHTTPHandler
ListCollectionsByTag *tag.ListCollectionsByTagHandler
ListFilesByTag *tag.ListFilesByTagHandler
SearchByTags *tag.SearchByTagsHandler
}
// NewHandlers creates and wires all HTTP handlers
func NewHandlers(
// Common
versionHandler *commonhttp.MapleFileVersionHTTPHandler,
// Dashboard
getDashboard *dashboard.GetDashboardHTTPHandler,
// Me
getMe *me.GetMeHTTPHandler,
updateMe *me.PutUpdateMeHTTPHandler,
deleteMe *me.DeleteMeHTTPHandler,
// User
userPublicLookup *user.UserPublicLookupHTTPHandler,
// Blocked Email
createBlockedEmail *blockedemail.CreateBlockedEmailHTTPHandler,
listBlockedEmails *blockedemail.ListBlockedEmailsHTTPHandler,
deleteBlockedEmail *blockedemail.DeleteBlockedEmailHTTPHandler,
// Invite Email
sendInviteEmail *inviteemail.SendInviteEmailHTTPHandler,
// Collection - Basic CRUD
createCollection *collection.CreateCollectionHTTPHandler,
getCollection *collection.GetCollectionHTTPHandler,
listUserCollections *collection.ListUserCollectionsHTTPHandler,
updateCollection *collection.UpdateCollectionHTTPHandler,
softDeleteCollection *collection.SoftDeleteCollectionHTTPHandler,
archiveCollection *collection.ArchiveCollectionHTTPHandler,
restoreCollection *collection.RestoreCollectionHTTPHandler,
// Collection - Hierarchical
findCollectionsByParent *collection.FindCollectionsByParentHTTPHandler,
findRootCollections *collection.FindRootCollectionsHTTPHandler,
moveCollection *collection.MoveCollectionHTTPHandler,
// Collection - Sharing
shareCollection *collection.ShareCollectionHTTPHandler,
removeMember *collection.RemoveMemberHTTPHandler,
listSharedCollections *collection.ListSharedCollectionsHTTPHandler,
// Collection - Filtered
getFilteredCollections *collection.GetFilteredCollectionsHTTPHandler,
// Collection - Sync
collectionSync *collection.CollectionSyncHTTPHandler,
// File - CRUD
softDeleteFile *file.SoftDeleteFileHTTPHandler,
deleteMultipleFiles *file.DeleteMultipleFilesHTTPHandler,
getFile *file.GetFileHTTPHandler,
listFilesByCollection *file.ListFilesByCollectionHTTPHandler,
updateFile *file.UpdateFileHTTPHandler,
createPendingFile *file.CreatePendingFileHTTPHandler,
completeFileUpload *file.CompleteFileUploadHTTPHandler,
getPresignedUploadURL *file.GetPresignedUploadURLHTTPHandler,
getPresignedDownloadURL *file.GetPresignedDownloadURLHTTPHandler,
reportDownloadCompleted *file.ReportDownloadCompletedHTTPHandler,
archiveFile *file.ArchiveFileHTTPHandler,
restoreFile *file.RestoreFileHTTPHandler,
listRecentFiles *file.ListRecentFilesHTTPHandler,
// File - Sync
fileSync *file.FileSyncHTTPHandler,
// Tag handlers
createTag *tag.CreateTagHTTPHandler,
listTags *tag.ListTagsHTTPHandler,
getTag *tag.GetTagHTTPHandler,
updateTag *tag.UpdateTagHTTPHandler,
deleteTag *tag.DeleteTagHTTPHandler,
assignTag *tag.AssignTagHTTPHandler,
unassignTag *tag.UnassignTagHTTPHandler,
getTagsForCollection *tag.GetTagsForCollectionHTTPHandler,
getTagsForFile *tag.GetTagsForFileHTTPHandler,
listCollectionsByTag *tag.ListCollectionsByTagHandler,
listFilesByTag *tag.ListFilesByTagHandler,
searchByTags *tag.SearchByTagsHandler,
) *Handlers {
return &Handlers{
// Common
Version: versionHandler,
// Dashboard
GetDashboard: getDashboard,
// Me
GetMe: getMe,
UpdateMe: updateMe,
DeleteMe: deleteMe,
// User
UserPublicLookup: userPublicLookup,
// Blocked Email
CreateBlockedEmail: createBlockedEmail,
ListBlockedEmails: listBlockedEmails,
DeleteBlockedEmail: deleteBlockedEmail,
// Invite Email
SendInviteEmail: sendInviteEmail,
// Collection - Basic CRUD
CreateCollection: createCollection,
GetCollection: getCollection,
ListUserCollections: listUserCollections,
UpdateCollection: updateCollection,
SoftDeleteCollection: softDeleteCollection,
ArchiveCollection: archiveCollection,
RestoreCollection: restoreCollection,
// Collection - Hierarchical
FindCollectionsByParent: findCollectionsByParent,
FindRootCollections: findRootCollections,
MoveCollection: moveCollection,
// Collection - Sharing
ShareCollection: shareCollection,
RemoveMember: removeMember,
ListSharedCollections: listSharedCollections,
// Collection - Filtered
GetFilteredCollections: getFilteredCollections,
// Collection Sync
CollectionSync: collectionSync,
// File - CRUD
SoftDeleteFile: softDeleteFile,
DeleteMultipleFiles: deleteMultipleFiles,
GetFile: getFile,
ListFilesByCollection: listFilesByCollection,
UpdateFile: updateFile,
CreatePendingFile: createPendingFile,
CompleteFileUpload: completeFileUpload,
GetPresignedUploadURL: getPresignedUploadURL,
GetPresignedDownloadURL: getPresignedDownloadURL,
ReportDownloadCompleted: reportDownloadCompleted,
ArchiveFile: archiveFile,
RestoreFile: restoreFile,
ListRecentFiles: listRecentFiles,
// File Sync
FileSync: fileSync,
// Tag handlers
CreateTag: createTag,
ListTags: listTags,
GetTag: getTag,
UpdateTag: updateTag,
DeleteTag: deleteTag,
AssignTag: assignTag,
UnassignTag: unassignTag,
GetTagsForCollection: getTagsForCollection,
GetTagsForFile: getTagsForFile,
ListCollectionsByTag: listCollectionsByTag,
ListFilesByTag: listFilesByTag,
SearchByTags: searchByTags,
}
}

Some files were not shown because too many files have changed in this diff Show more