Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,222 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/complete_login.go
package auth
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/hash"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/jwt"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type CompleteLoginRequestDTO struct {
Email string `json:"email"`
ChallengeID string `json:"challengeId"`
DecryptedData string `json:"decryptedData"`
}
type CompleteLoginResponseDTO struct {
Message string `json:"message"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessTokenExpiryDate string `json:"access_token_expiry_date"`
RefreshTokenExpiryDate string `json:"refresh_token_expiry_date"`
Username string `json:"username"`
}
type CompleteLoginService interface {
Execute(ctx context.Context, req *CompleteLoginRequestDTO) (*CompleteLoginResponseDTO, error)
}
type completeLoginServiceImpl struct {
config *config.Config
logger *zap.Logger
auditLogger auditlog.AuditLogger
userGetByEmailUC uc_user.UserGetByEmailUseCase
cache cassandracache.CassandraCacher
jwtProvider jwt.JWTProvider
}
func NewCompleteLoginService(
config *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
jwtProvider jwt.JWTProvider,
) CompleteLoginService {
return &completeLoginServiceImpl{
config: config,
logger: logger.Named("CompleteLoginService"),
auditLogger: auditLogger,
userGetByEmailUC: userGetByEmailUC,
cache: cache,
jwtProvider: jwtProvider,
}
}
func (s *completeLoginServiceImpl) Execute(ctx context.Context, req *CompleteLoginRequestDTO) (*CompleteLoginResponseDTO, error) {
// Validate request
if err := s.validateCompleteLoginRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Create SAGA for complete login workflow
saga := transaction.NewSaga("complete-login", s.logger)
s.logger.Info("starting login completion")
// Step 1: Normalize email
email := strings.ToLower(strings.TrimSpace(req.Email))
// Step 2: Get the original challenge from cache
challengeKey := fmt.Sprintf("challenge:%s", req.ChallengeID)
originalChallenge, err := s.cache.Get(ctx, challengeKey)
if err != nil || originalChallenge == nil {
s.logger.Warn("Challenge not found", zap.String("challenge_id", req.ChallengeID))
s.auditLogger.LogAuth(ctx, auditlog.EventTypeLoginFailure, auditlog.OutcomeFailure,
validation.MaskEmail(email), "", map[string]string{
"reason": "challenge_expired",
})
return nil, httperror.NewUnauthorizedError("Invalid or expired login challenge. Please request a new login code.")
}
defer memguard.WipeBytes(originalChallenge) // SECURITY: Wipe challenge from memory
// Step 3: Decode and verify decrypted data matches challenge
decryptedData, err := base64.StdEncoding.DecodeString(req.DecryptedData)
if err != nil {
s.logger.Warn("Failed to decode decrypted data", zap.Error(err))
return nil, httperror.NewBadRequestError("Invalid encrypted data format.")
}
defer memguard.WipeBytes(decryptedData) // SECURITY: Wipe decrypted data from memory
if !bytes.Equal(decryptedData, originalChallenge) {
s.logger.Warn("Challenge verification failed", zap.String("email", validation.MaskEmail(email)))
s.auditLogger.LogAuth(ctx, auditlog.EventTypeLoginFailure, auditlog.OutcomeFailure,
validation.MaskEmail(email), "", map[string]string{
"reason": "challenge_verification_failed",
})
return nil, httperror.NewUnauthorizedError("Challenge verification failed. Incorrect password or encryption keys.")
}
// Step 4: Get user (read-only, no compensation)
user, err := s.userGetByEmailUC.Execute(ctx, email)
if err != nil || user == nil {
s.logger.Error("User not found", zap.String("email", validation.MaskEmail(email)))
return nil, httperror.NewUnauthorizedError("Invalid email or password.")
}
// Step 5: Generate JWT token pair
accessToken, accessExpiry, refreshToken, refreshExpiry, err := s.jwtProvider.GenerateJWTTokenPair(
user.ID.String(),
s.config.JWT.AccessTokenDuration,
s.config.JWT.RefreshTokenDuration,
)
if err != nil {
s.logger.Error("Failed to generate JWT tokens", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to generate authentication tokens. Please try again.")
}
// Step 6: Store refresh token FIRST (compensate: delete refresh token)
// CRITICAL: Store refresh token before deleting challenge to prevent login failure
// SECURITY: Hash refresh token to prevent token leakage via cache key inspection
refreshTokenHash := hash.HashToken(refreshToken)
refreshKey := fmt.Sprintf("refresh:%s", refreshTokenHash)
if err := s.cache.SetWithExpiry(ctx, refreshKey, []byte(user.ID.String()), s.config.JWT.RefreshTokenDuration); err != nil {
s.logger.Error("Failed to store refresh token", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to store authentication session. Please try again.")
}
// Register compensation: delete refresh token if challenge deletion fails
refreshKeyCaptured := refreshKey
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting refresh token",
zap.String("refresh_key", refreshKeyCaptured))
return s.cache.Delete(ctx, refreshKeyCaptured)
})
// Step 7: Clear challenge from cache (one-time use) (compensate: restore challenge)
challengeKeyCaptured := challengeKey
originalChallengeCaptured := originalChallenge
if err := s.cache.Delete(ctx, challengeKey); err != nil {
s.logger.Error("Failed to delete challenge",
zap.String("challenge_key", challengeKey),
zap.Error(err))
// Trigger compensation: Delete refresh token
saga.Rollback(ctx)
return nil, httperror.NewInternalServerError("Login failed. Please try again.")
}
// Register compensation: restore challenge with reduced TTL (5 minutes for retry)
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: restoring challenge",
zap.String("challenge_key", challengeKeyCaptured))
// Restore with reduced TTL (5 minutes) to allow user retry
return s.cache.SetWithExpiry(ctx, challengeKeyCaptured, originalChallengeCaptured, 5*time.Minute)
})
s.logger.Info("Login completed successfully",
zap.String("user_id", user.ID.String()),
zap.String("email", validation.MaskEmail(email)),
zap.String("refresh_token", refreshToken[:16]+"...")) // Log prefix for security
// Audit log successful login
s.auditLogger.LogAuth(ctx, auditlog.EventTypeLoginSuccess, auditlog.OutcomeSuccess,
validation.MaskEmail(email), "", map[string]string{
"user_id": user.ID.String(),
})
return &CompleteLoginResponseDTO{
Message: "Login successful",
AccessToken: accessToken,
RefreshToken: refreshToken,
AccessTokenExpiryDate: accessExpiry.Format(time.RFC3339),
RefreshTokenExpiryDate: refreshExpiry.Format(time.RFC3339),
Username: user.Email,
}, nil
}
// validateCompleteLoginRequest validates the complete login request.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *completeLoginServiceImpl) validateCompleteLoginRequest(req *CompleteLoginRequestDTO) error {
errors := make(map[string]string)
// Validate email using shared validation utility
if errMsg := validation.ValidateEmail(req.Email); errMsg != "" {
errors["email"] = errMsg
}
// Validate challengeId
challengeId := strings.TrimSpace(req.ChallengeID)
if challengeId == "" {
errors["challengeId"] = "Challenge ID is required"
}
// Validate decryptedData
decryptedData := strings.TrimSpace(req.DecryptedData)
if decryptedData == "" {
errors["decryptedData"] = "Decrypted challenge data is required"
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,121 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/provider.go
package auth
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/jwt"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
)
// ProvideRegisterService provides the register service
func ProvideRegisterService(
config *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userCreateUC uc_user.UserCreateUseCase,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userDeleteByIDUC uc_user.UserDeleteByIDUseCase,
emailer mailgun.Emailer,
) RegisterService {
return NewRegisterService(config, logger, auditLogger, userCreateUC, userGetByEmailUC, userDeleteByIDUC, emailer)
}
// ProvideVerifyEmailService provides the verify email service
func ProvideVerifyEmailService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByVerificationCodeUC uc_user.UserGetByVerificationCodeUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
) VerifyEmailService {
return NewVerifyEmailService(logger, auditLogger, userGetByVerificationCodeUC, userUpdateUC)
}
// ProvideResendVerificationService provides the resend verification service
func ProvideResendVerificationService(
config *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
emailer mailgun.Emailer,
) ResendVerificationService {
return NewResendVerificationService(config, logger, userGetByEmailUC, userUpdateUC, emailer)
}
// ProvideRequestOTTService provides the request OTT service
func ProvideRequestOTTService(
config *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
emailer mailgun.Emailer,
) RequestOTTService {
return NewRequestOTTService(config, logger, userGetByEmailUC, cache, emailer)
}
// ProvideVerifyOTTService provides the verify OTT service
func ProvideVerifyOTTService(
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
) VerifyOTTService {
return NewVerifyOTTService(logger, userGetByEmailUC, cache)
}
// ProvideCompleteLoginService provides the complete login service
func ProvideCompleteLoginService(
config *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
jwtProvider jwt.JWTProvider,
) CompleteLoginService {
return NewCompleteLoginService(config, logger, auditLogger, userGetByEmailUC, cache, jwtProvider)
}
// ProvideRefreshTokenService provides the refresh token service
func ProvideRefreshTokenService(
cfg *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
cache cassandracache.CassandraCacher,
jwtProvider jwt.JWTProvider,
userGetByIDUC uc_user.UserGetByIDUseCase,
) RefreshTokenService {
return NewRefreshTokenService(cfg, logger, auditLogger, cache, jwtProvider, userGetByIDUC)
}
// ProvideRecoveryInitiateService provides the recovery initiate service
func ProvideRecoveryInitiateService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
) RecoveryInitiateService {
return NewRecoveryInitiateService(logger, auditLogger, userGetByEmailUC, cache)
}
// ProvideRecoveryVerifyService provides the recovery verify service
func ProvideRecoveryVerifyService(
logger *zap.Logger,
cache cassandracache.CassandraCacher,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
) RecoveryVerifyService {
return NewRecoveryVerifyService(logger, cache, userGetByEmailUC)
}
// ProvideRecoveryCompleteService provides the recovery complete service
func ProvideRecoveryCompleteService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
cache cassandracache.CassandraCacher,
) RecoveryCompleteService {
return NewRecoveryCompleteService(logger, auditLogger, userGetByEmailUC, userUpdateUC, cache)
}

View file

@ -0,0 +1,251 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/recovery_complete.go
package auth
import (
"context"
"encoding/base64"
"fmt"
"time"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type RecoveryCompleteRequestDTO struct {
RecoveryToken string `json:"recovery_token"`
NewSalt string `json:"new_salt"`
NewPublicKey string `json:"new_public_key"`
NewEncryptedMasterKey string `json:"new_encrypted_master_key"`
NewEncryptedPrivateKey string `json:"new_encrypted_private_key"`
NewEncryptedRecoveryKey string `json:"new_encrypted_recovery_key"`
NewMasterKeyEncryptedWithRecoveryKey string `json:"new_master_key_encrypted_with_recovery_key"`
}
type RecoveryCompleteResponseDTO struct {
Message string `json:"message"`
Success bool `json:"success"`
}
type RecoveryCompleteService interface {
Execute(ctx context.Context, req *RecoveryCompleteRequestDTO) (*RecoveryCompleteResponseDTO, error)
}
type recoveryCompleteServiceImpl struct {
logger *zap.Logger
auditLogger auditlog.AuditLogger
userGetByEmailUC uc_user.UserGetByEmailUseCase
userUpdateUC uc_user.UserUpdateUseCase
cache cassandracache.CassandraCacher
}
func NewRecoveryCompleteService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
cache cassandracache.CassandraCacher,
) RecoveryCompleteService {
return &recoveryCompleteServiceImpl{
logger: logger.Named("RecoveryCompleteService"),
auditLogger: auditLogger,
userGetByEmailUC: userGetByEmailUC,
userUpdateUC: userUpdateUC,
cache: cache,
}
}
func (s *recoveryCompleteServiceImpl) Execute(ctx context.Context, req *RecoveryCompleteRequestDTO) (*RecoveryCompleteResponseDTO, error) {
// Create SAGA for recovery completion workflow
saga := transaction.NewSaga("recovery-complete", s.logger)
s.logger.Info("starting recovery completion")
// Step 1: Validate recovery token from cache
tokenKey := fmt.Sprintf("recovery_token:%s", req.RecoveryToken)
emailBytes, err := s.cache.Get(ctx, tokenKey)
if err != nil || emailBytes == nil {
s.logger.Warn("Recovery token not found or expired")
return nil, fmt.Errorf("invalid or expired recovery token")
}
email := string(emailBytes)
// Step 2: Get user by email and backup current credentials
user, err := s.userGetByEmailUC.Execute(ctx, email)
if err != nil || user == nil {
s.logger.Error("User not found during recovery completion", zap.String("email", validation.MaskEmail(email)))
return nil, fmt.Errorf("recovery completion failed")
}
// Backup current credentials for compensation (deep copy)
var oldSecurityData *dom_user.UserSecurityData
if user.SecurityData != nil {
// Create a deep copy of security data
oldSecurityData = &dom_user.UserSecurityData{
PasswordSalt: make([]byte, len(user.SecurityData.PasswordSalt)),
PublicKey: user.SecurityData.PublicKey,
EncryptedMasterKey: user.SecurityData.EncryptedMasterKey,
EncryptedPrivateKey: user.SecurityData.EncryptedPrivateKey,
EncryptedRecoveryKey: user.SecurityData.EncryptedRecoveryKey,
MasterKeyEncryptedWithRecoveryKey: user.SecurityData.MasterKeyEncryptedWithRecoveryKey,
}
copy(oldSecurityData.PasswordSalt, user.SecurityData.PasswordSalt)
}
// Decode new encryption keys from base64
// SECURITY: All decoded key material is wiped from memory after use
newSalt, err := base64.StdEncoding.DecodeString(req.NewSalt)
if err != nil {
return nil, fmt.Errorf("invalid salt format")
}
defer memguard.WipeBytes(newSalt)
newPublicKey, err := base64.StdEncoding.DecodeString(req.NewPublicKey)
if err != nil {
return nil, fmt.Errorf("invalid public key format")
}
defer memguard.WipeBytes(newPublicKey)
newEncryptedMasterKey, err := base64.StdEncoding.DecodeString(req.NewEncryptedMasterKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted master key format")
}
defer memguard.WipeBytes(newEncryptedMasterKey)
newEncryptedPrivateKey, err := base64.StdEncoding.DecodeString(req.NewEncryptedPrivateKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted private key format")
}
defer memguard.WipeBytes(newEncryptedPrivateKey)
newEncryptedRecoveryKey, err := base64.StdEncoding.DecodeString(req.NewEncryptedRecoveryKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted recovery key format")
}
defer memguard.WipeBytes(newEncryptedRecoveryKey)
newMasterKeyEncryptedWithRecovery, err := base64.StdEncoding.DecodeString(req.NewMasterKeyEncryptedWithRecoveryKey)
if err != nil {
return nil, fmt.Errorf("invalid master key encrypted with recovery format")
}
defer memguard.WipeBytes(newMasterKeyEncryptedWithRecovery)
// Update user's encryption keys
if user.SecurityData == nil {
user.SecurityData = &dom_user.UserSecurityData{}
}
// Parse the encrypted keys into their proper structures
// Format: nonce (24 bytes) + ciphertext (remaining bytes)
// Update password salt
user.SecurityData.PasswordSalt = newSalt
// Update public key (critical for login challenge encryption)
user.SecurityData.PublicKey = crypto.PublicKey{
Key: newPublicKey,
}
// Update encrypted master key
if len(newEncryptedMasterKey) > 24 {
user.SecurityData.EncryptedMasterKey = crypto.EncryptedMasterKey{
Nonce: newEncryptedMasterKey[:24],
Ciphertext: newEncryptedMasterKey[24:],
KeyVersion: 1,
}
}
// Update encrypted private key
if len(newEncryptedPrivateKey) > 24 {
user.SecurityData.EncryptedPrivateKey = crypto.EncryptedPrivateKey{
Nonce: newEncryptedPrivateKey[:24],
Ciphertext: newEncryptedPrivateKey[24:],
}
}
// Update encrypted recovery key
if len(newEncryptedRecoveryKey) > 24 {
user.SecurityData.EncryptedRecoveryKey = crypto.EncryptedRecoveryKey{
Nonce: newEncryptedRecoveryKey[:24],
Ciphertext: newEncryptedRecoveryKey[24:],
}
}
// Update master key encrypted with recovery key
if len(newMasterKeyEncryptedWithRecovery) > 24 {
user.SecurityData.MasterKeyEncryptedWithRecoveryKey = crypto.MasterKeyEncryptedWithRecoveryKey{
Nonce: newMasterKeyEncryptedWithRecovery[:24],
Ciphertext: newMasterKeyEncryptedWithRecovery[24:],
}
}
// Update user's modified timestamp
user.ModifiedAt = time.Now()
// Step 3: Save updated user with new credentials (compensate: restore old credentials)
// CRITICAL: This must succeed before token deletion to prevent account takeover
if err := s.userUpdateUC.Execute(ctx, user); err != nil {
s.logger.Error("Failed to update user with new keys", zap.Error(err))
return nil, fmt.Errorf("failed to complete recovery")
}
// Register compensation: restore old credentials if token deletion fails
userCaptured := user
oldSecurityDataCaptured := oldSecurityData
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Warn("compensating: restoring old credentials",
zap.String("user_id", userCaptured.ID.String()))
// Restore old security data
userCaptured.SecurityData = oldSecurityDataCaptured
userCaptured.ModifiedAt = time.Now()
if err := s.userUpdateUC.Execute(ctx, userCaptured); err != nil {
s.logger.Error("Failed to restore old credentials during compensation",
zap.String("user_id", userCaptured.ID.String()),
zap.Error(err))
return fmt.Errorf("compensation failed: %w", err)
}
s.logger.Info("old credentials restored successfully during compensation",
zap.String("user_id", userCaptured.ID.String()))
return nil
})
// Step 4: Clear recovery token (one-time use) - MUST succeed to prevent reuse
// CRITICAL: If this fails, recovery token could be reused for account takeover
tokenKeyCaptured := tokenKey
if err := s.cache.Delete(ctx, tokenKeyCaptured); err != nil {
s.logger.Error("Failed to delete recovery token - SECURITY RISK",
zap.String("token_key", tokenKeyCaptured),
zap.Error(err))
// Trigger compensation: Restore old credentials
saga.Rollback(ctx)
return nil, fmt.Errorf("failed to invalidate recovery token - please contact support")
}
s.logger.Info("Recovery completion successful",
zap.String("email", validation.MaskEmail(email)),
zap.String("user_id", user.ID.String()))
// Audit log recovery completion
s.auditLogger.LogAuth(ctx, auditlog.EventTypeRecoveryCompleted, auditlog.OutcomeSuccess,
validation.MaskEmail(email), "", map[string]string{
"user_id": user.ID.String(),
})
return &RecoveryCompleteResponseDTO{
Message: "Account recovery completed successfully. You can now log in with your new credentials.",
Success: true,
}, nil
}

View file

@ -0,0 +1,133 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/recovery_initiate.go
package auth
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/awnumar/memguard"
"github.com/gocql/gocql"
"go.uber.org/zap"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type RecoveryInitiateRequestDTO struct {
Email string `json:"email"`
Method string `json:"method"` // "recovery_key"
}
type RecoveryInitiateResponseDTO struct {
Message string `json:"message"`
SessionID string `json:"session_id"`
EncryptedChallenge string `json:"encrypted_challenge"`
}
type RecoveryInitiateService interface {
Execute(ctx context.Context, req *RecoveryInitiateRequestDTO) (*RecoveryInitiateResponseDTO, error)
}
type recoveryInitiateServiceImpl struct {
logger *zap.Logger
auditLogger auditlog.AuditLogger
userGetByEmailUC uc_user.UserGetByEmailUseCase
cache cassandracache.CassandraCacher
}
func NewRecoveryInitiateService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
) RecoveryInitiateService {
return &recoveryInitiateServiceImpl{
logger: logger.Named("RecoveryInitiateService"),
auditLogger: auditLogger,
userGetByEmailUC: userGetByEmailUC,
cache: cache,
}
}
func (s *recoveryInitiateServiceImpl) Execute(ctx context.Context, req *RecoveryInitiateRequestDTO) (*RecoveryInitiateResponseDTO, error) {
// Normalize email
email := strings.ToLower(strings.TrimSpace(req.Email))
// Verify user exists
user, err := s.userGetByEmailUC.Execute(ctx, email)
if err != nil || user == nil {
// For security, don't reveal if user exists or not
s.logger.Warn("User not found for recovery", zap.String("email", validation.MaskEmail(email)))
// Generate fake session ID and challenge to prevent timing attacks and enumeration
// This ensures the response looks identical whether the user exists or not
fakeSessionID := gocql.TimeUUID().String()
fakeChallenge := make([]byte, 32)
if _, err := rand.Read(fakeChallenge); err != nil {
// Fallback to zeros if random fails (extremely unlikely)
fakeChallenge = make([]byte, 32)
}
defer memguard.WipeBytes(fakeChallenge) // SECURITY: Wipe fake challenge from memory
fakeEncryptedChallenge := base64.StdEncoding.EncodeToString(fakeChallenge)
return &RecoveryInitiateResponseDTO{
Message: "Recovery initiated. Please decrypt the challenge with your recovery key.",
SessionID: fakeSessionID,
EncryptedChallenge: fakeEncryptedChallenge,
}, nil
}
// Generate recovery session ID
sessionID := gocql.TimeUUID().String()
// Generate random challenge (32 bytes)
challenge := make([]byte, 32)
if _, err := rand.Read(challenge); err != nil {
s.logger.Error("Failed to generate recovery challenge", zap.Error(err))
return nil, fmt.Errorf("failed to initiate recovery")
}
defer memguard.WipeBytes(challenge) // SECURITY: Wipe challenge from memory after use
// Store recovery challenge in cache (30 minute expiry)
challengeKey := fmt.Sprintf("recovery_challenge:%s", sessionID)
if err := s.cache.SetWithExpiry(ctx, challengeKey, challenge, 30*time.Minute); err != nil {
s.logger.Error("Failed to store recovery challenge", zap.Error(err))
return nil, fmt.Errorf("failed to initiate recovery")
}
// Store email associated with recovery session
emailKey := fmt.Sprintf("recovery_email:%s", sessionID)
if err := s.cache.SetWithExpiry(ctx, emailKey, []byte(email), 30*time.Minute); err != nil {
s.logger.Error("Failed to store recovery email", zap.Error(err))
// Continue anyway
}
// NOTE: In a real implementation with recovery key encryption:
// - We would retrieve the user's encrypted recovery key
// - Encrypt the challenge with it
// - The client would decrypt with their recovery key
// For now, return base64-encoded challenge (frontend will handle encryption)
encryptedChallenge := base64.StdEncoding.EncodeToString(challenge)
s.logger.Info("Recovery initiated successfully",
zap.String("email", validation.MaskEmail(email)),
zap.String("session_id", sessionID))
// Audit log recovery initiation
s.auditLogger.LogAuth(ctx, auditlog.EventTypeRecoveryInitiated, auditlog.OutcomeSuccess,
validation.MaskEmail(email), "", map[string]string{
"session_id": sessionID,
})
return &RecoveryInitiateResponseDTO{
Message: "Recovery initiated. Please decrypt the challenge with your recovery key.",
SessionID: sessionID,
EncryptedChallenge: encryptedChallenge,
}, nil
}

View file

@ -0,0 +1,177 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/recovery_verify.go
package auth
import (
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"time"
"github.com/awnumar/memguard"
"go.uber.org/zap"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type RecoveryVerifyRequestDTO struct {
SessionID string `json:"session_id"`
DecryptedChallenge string `json:"decrypted_challenge"`
}
type RecoveryVerifyResponseDTO struct {
Message string `json:"message"`
RecoveryToken string `json:"recovery_token"`
CanResetCredentials bool `json:"can_reset_credentials"`
MasterKeyEncryptedWithRecoveryKey string `json:"master_key_encrypted_with_recovery_key"`
}
type RecoveryVerifyService interface {
Execute(ctx context.Context, req *RecoveryVerifyRequestDTO) (*RecoveryVerifyResponseDTO, error)
}
type recoveryVerifyServiceImpl struct {
logger *zap.Logger
cache cassandracache.CassandraCacher
userGetByEmailUC uc_user.UserGetByEmailUseCase
}
func NewRecoveryVerifyService(
logger *zap.Logger,
cache cassandracache.CassandraCacher,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
) RecoveryVerifyService {
return &recoveryVerifyServiceImpl{
logger: logger.Named("RecoveryVerifyService"),
cache: cache,
userGetByEmailUC: userGetByEmailUC,
}
}
func (s *recoveryVerifyServiceImpl) Execute(ctx context.Context, req *RecoveryVerifyRequestDTO) (*RecoveryVerifyResponseDTO, error) {
// Create SAGA for recovery verify workflow
saga := transaction.NewSaga("recovery-verify", s.logger)
s.logger.Info("starting recovery verification")
// Step 1: Get the original challenge from cache
challengeKey := fmt.Sprintf("recovery_challenge:%s", req.SessionID)
originalChallenge, err := s.cache.Get(ctx, challengeKey)
if err != nil || originalChallenge == nil {
s.logger.Warn("Recovery challenge not found or expired", zap.String("session_id", req.SessionID))
return nil, fmt.Errorf("invalid or expired recovery session")
}
defer memguard.WipeBytes(originalChallenge) // SECURITY: Wipe challenge from memory
// Step 2: Decode the decrypted challenge from base64
decryptedChallenge, err := base64.StdEncoding.DecodeString(req.DecryptedChallenge)
if err != nil {
s.logger.Warn("Failed to decode decrypted challenge", zap.Error(err))
return nil, fmt.Errorf("invalid decrypted challenge format")
}
defer memguard.WipeBytes(decryptedChallenge) // SECURITY: Wipe decrypted challenge from memory
// Step 3: Verify that decrypted challenge matches original
if !bytes.Equal(decryptedChallenge, originalChallenge) {
s.logger.Warn("Recovery challenge verification failed", zap.String("session_id", req.SessionID))
return nil, fmt.Errorf("challenge verification failed")
}
// Step 4: Generate recovery token (random secure token)
tokenBytes := make([]byte, 32)
if _, err := rand.Read(tokenBytes); err != nil {
s.logger.Error("Failed to generate recovery token", zap.Error(err))
return nil, fmt.Errorf("failed to generate recovery token")
}
defer memguard.WipeBytes(tokenBytes) // SECURITY: Wipe token bytes from memory
recoveryToken := base64.URLEncoding.EncodeToString(tokenBytes)
// Step 5: Get email associated with recovery session (read-only, no compensation)
emailKey := fmt.Sprintf("recovery_email:%s", req.SessionID)
email, err := s.cache.Get(ctx, emailKey)
if err != nil || email == nil {
s.logger.Error("Recovery email not found", zap.String("session_id", req.SessionID))
return nil, fmt.Errorf("recovery session invalid")
}
// Step 5b: Fetch user to get their encrypted master key with recovery key
user, err := s.userGetByEmailUC.Execute(ctx, string(email))
if err != nil || user == nil {
s.logger.Error("User not found for recovery", zap.String("email", validation.MaskEmail(string(email))))
return nil, fmt.Errorf("user not found")
}
// Validate user has the required key data
if user.SecurityData == nil ||
user.SecurityData.MasterKeyEncryptedWithRecoveryKey.Ciphertext == nil ||
user.SecurityData.MasterKeyEncryptedWithRecoveryKey.Nonce == nil {
s.logger.Error("User missing master key encrypted with recovery key",
zap.String("email", validation.MaskEmail(string(email))))
return nil, fmt.Errorf("account recovery data not available")
}
// Combine nonce + ciphertext for transmission (matches frontend expectation)
// Format: nonce (24 bytes) || ciphertext (variable length)
nonce := user.SecurityData.MasterKeyEncryptedWithRecoveryKey.Nonce
ciphertext := user.SecurityData.MasterKeyEncryptedWithRecoveryKey.Ciphertext
combined := make([]byte, len(nonce)+len(ciphertext))
copy(combined[:len(nonce)], nonce)
copy(combined[len(nonce):], ciphertext)
defer memguard.WipeBytes(combined) // SECURITY: Wipe combined key data from memory
// Encode the combined data to base64 for transmission
masterKeyEncryptedWithRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(combined)
// Step 6: Store recovery token FIRST (compensate: delete recovery token)
// CRITICAL: Store recovery token before deleting challenge to prevent flow interruption
tokenKey := fmt.Sprintf("recovery_token:%s", recoveryToken)
if err := s.cache.SetWithExpiry(ctx, tokenKey, email, 15*time.Minute); err != nil {
s.logger.Error("Failed to store recovery token", zap.Error(err))
return nil, fmt.Errorf("failed to complete recovery verification")
}
// Register compensation: delete recovery token if challenge deletion fails
tokenKeyCaptured := tokenKey
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting recovery token",
zap.String("token_key", tokenKeyCaptured))
return s.cache.Delete(ctx, tokenKeyCaptured)
})
// Step 7: Clear recovery challenge (one-time use) (compensate: restore challenge)
challengeKeyCaptured := challengeKey
originalChallengeCaptured := originalChallenge
if err := s.cache.Delete(ctx, challengeKey); err != nil {
s.logger.Error("Failed to delete recovery challenge",
zap.String("challenge_key", challengeKey),
zap.Error(err))
// Trigger compensation: Delete recovery token
saga.Rollback(ctx)
return nil, fmt.Errorf("failed to delete recovery challenge: %w", err)
}
// Register compensation: restore challenge with reduced TTL (15 minutes for retry)
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: restoring recovery challenge",
zap.String("challenge_key", challengeKeyCaptured))
// Restore with same TTL (15 minutes) to allow user retry
return s.cache.SetWithExpiry(ctx, challengeKeyCaptured, originalChallengeCaptured, 15*time.Minute)
})
s.logger.Info("Recovery verification successful",
zap.String("session_id", req.SessionID),
zap.String("email", validation.MaskEmail(string(email))),
zap.String("recovery_token", recoveryToken[:16]+"...")) // Log prefix for security
return &RecoveryVerifyResponseDTO{
Message: "Recovery challenge verified successfully. You can now reset your credentials.",
RecoveryToken: recoveryToken,
CanResetCredentials: true,
MasterKeyEncryptedWithRecoveryKey: masterKeyEncryptedWithRecoveryKeyBase64,
}, nil
}

View file

@ -0,0 +1,177 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/refresh_token.go
package auth
import (
"context"
"fmt"
"time"
"github.com/gocql/gocql"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/hash"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/jwt"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type RefreshTokenRequestDTO struct {
RefreshToken string `json:"value"`
}
type RefreshTokenResponseDTO struct {
Message string `json:"message"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessTokenExpiryDate string `json:"access_token_expiry_date"`
RefreshTokenExpiryDate string `json:"refresh_token_expiry_date"`
Username string `json:"username"`
}
type RefreshTokenService interface {
Execute(ctx context.Context, req *RefreshTokenRequestDTO) (*RefreshTokenResponseDTO, error)
}
type refreshTokenServiceImpl struct {
config *config.Config
logger *zap.Logger
auditLogger auditlog.AuditLogger
cache cassandracache.CassandraCacher
jwtProvider jwt.JWTProvider
userGetByIDUC uc_user.UserGetByIDUseCase
}
func NewRefreshTokenService(
config *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
cache cassandracache.CassandraCacher,
jwtProvider jwt.JWTProvider,
userGetByIDUC uc_user.UserGetByIDUseCase,
) RefreshTokenService {
return &refreshTokenServiceImpl{
config: config,
logger: logger.Named("RefreshTokenService"),
auditLogger: auditLogger,
cache: cache,
jwtProvider: jwtProvider,
userGetByIDUC: userGetByIDUC,
}
}
func (s *refreshTokenServiceImpl) Execute(ctx context.Context, req *RefreshTokenRequestDTO) (*RefreshTokenResponseDTO, error) {
// Create SAGA for token refresh workflow
saga := transaction.NewSaga("refresh-token", s.logger)
s.logger.Info("starting token refresh")
// Step 1: Validate refresh token JWT
userID, err := s.jwtProvider.ProcessJWTToken(req.RefreshToken)
if err != nil {
s.logger.Warn("Invalid refresh token JWT", zap.Error(err))
return nil, fmt.Errorf("invalid refresh token")
}
// Step 2: Check if refresh token exists in cache
// SECURITY: Hash refresh token to match how it was stored (prevents token leakage via cache keys)
refreshTokenHash := hash.HashToken(req.RefreshToken)
refreshKey := fmt.Sprintf("refresh:%s", refreshTokenHash)
cachedUserID, err := s.cache.Get(ctx, refreshKey)
if err != nil || cachedUserID == nil {
s.logger.Warn("Refresh token not found in cache", zap.String("user_id", userID))
return nil, fmt.Errorf("refresh token not found or expired")
}
// Step 3: Verify user IDs match
if string(cachedUserID) != userID {
s.logger.Warn("User ID mismatch", zap.String("jwt_user_id", userID), zap.String("cached_user_id", string(cachedUserID)))
return nil, fmt.Errorf("invalid refresh token")
}
// Step 4: Generate new token pair (token rotation for security)
newAccessToken, accessExpiry, newRefreshToken, refreshExpiry, err := s.jwtProvider.GenerateJWTTokenPair(
userID,
s.config.JWT.AccessTokenDuration,
s.config.JWT.RefreshTokenDuration,
)
if err != nil {
s.logger.Error("Failed to generate new tokens", zap.Error(err))
return nil, fmt.Errorf("failed to generate new tokens")
}
// Step 5: Store NEW refresh token FIRST (compensate: delete new token)
// CRITICAL: Store new token before deleting old token to prevent lockout
// SECURITY: Hash new refresh token to prevent token leakage via cache key inspection
newRefreshTokenHash := hash.HashToken(newRefreshToken)
newRefreshKey := fmt.Sprintf("refresh:%s", newRefreshTokenHash)
if err := s.cache.SetWithExpiry(ctx, newRefreshKey, []byte(userID), s.config.JWT.RefreshTokenDuration); err != nil {
s.logger.Error("Failed to store new refresh token", zap.Error(err))
return nil, fmt.Errorf("failed to store new refresh token")
}
// Register compensation: if deletion of old token fails, delete new token
newRefreshKeyCaptured := newRefreshKey
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting new refresh token",
zap.String("new_refresh_key", newRefreshKeyCaptured))
return s.cache.Delete(ctx, newRefreshKeyCaptured)
})
// Step 6: Delete old refresh token from cache (compensate: restore old token)
oldRefreshKeyCaptured := refreshKey
oldUserIDCaptured := userID
if err := s.cache.Delete(ctx, refreshKey); err != nil {
s.logger.Error("Failed to delete old refresh token",
zap.String("refresh_key", refreshKey),
zap.Error(err))
// Trigger compensation: Delete new token (restore consistency)
saga.Rollback(ctx)
return nil, fmt.Errorf("failed to delete old refresh token: %w", err)
}
// Register compensation: restore old token with reduced TTL (1 hour grace period)
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: restoring old refresh token",
zap.String("old_refresh_key", oldRefreshKeyCaptured))
// Restore with reduced TTL (1 hour) to allow user retry without long-lived old token
return s.cache.SetWithExpiry(ctx, oldRefreshKeyCaptured, []byte(oldUserIDCaptured), 1*time.Hour)
})
// Step 7: Get user to retrieve username/email (read-only, no compensation needed)
userUUID, err := gocql.ParseUUID(userID)
if err != nil {
s.logger.Error("Invalid user ID", zap.Error(err))
// No rollback needed for UUID parsing error (tokens already rotated successfully)
return nil, fmt.Errorf("invalid user ID")
}
user, err := s.userGetByIDUC.Execute(ctx, userUUID)
if err != nil || user == nil {
s.logger.Error("User not found", zap.String("user_id", userID), zap.Error(err))
// No rollback needed for user lookup error (tokens already rotated successfully)
return nil, fmt.Errorf("user not found")
}
s.logger.Info("Token refreshed successfully",
zap.String("user_id", userID),
zap.String("new_refresh_token", newRefreshToken[:16]+"...")) // Log prefix only for security
// Audit log token refresh
s.auditLogger.LogAuth(ctx, auditlog.EventTypeTokenRefresh, auditlog.OutcomeSuccess,
"", "", map[string]string{
"user_id": userID,
})
return &RefreshTokenResponseDTO{
Message: "Token refreshed successfully",
AccessToken: newAccessToken,
RefreshToken: newRefreshToken,
AccessTokenExpiryDate: accessExpiry.Format(time.RFC3339),
RefreshTokenExpiryDate: refreshExpiry.Format(time.RFC3339),
Username: user.Email,
}, nil
}

View file

@ -0,0 +1,390 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/register.go
package auth
import (
"context"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"fmt"
"html"
"net/mail"
"strings"
"time"
"github.com/awnumar/memguard"
"github.com/gocql/gocql"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type RegisterRequestDTO struct {
BetaAccessCode string `json:"beta_access_code"`
Email string `json:"email"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Phone string `json:"phone"`
Country string `json:"country"`
Timezone string `json:"timezone"`
PasswordSalt string `json:"salt"`
KDFAlgorithm string `json:"kdf_algorithm"`
KDFIterations int `json:"kdf_iterations"`
KDFMemory int `json:"kdf_memory"`
KDFParallelism int `json:"kdf_parallelism"`
KDFSaltLength int `json:"kdf_salt_length"`
KDFKeyLength int `json:"kdf_key_length"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
PublicKey string `json:"publicKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
EncryptedRecoveryKey string `json:"encryptedRecoveryKey"`
MasterKeyEncryptedWithRecoveryKey string `json:"masterKeyEncryptedWithRecoveryKey"`
AgreeTermsOfService bool `json:"agree_terms_of_service"`
AgreePromotions bool `json:"agree_promotions"`
AgreeToTrackingAcrossThirdPartyAppsAndServices bool `json:"agree_to_tracking_across_third_party_apps_and_services"`
}
type RegisterResponseDTO struct {
Message string `json:"message"`
UserID string `json:"user_id"`
}
type RegisterService interface {
Execute(ctx context.Context, req *RegisterRequestDTO) (*RegisterResponseDTO, error)
}
type registerServiceImpl struct {
config *config.Config
logger *zap.Logger
auditLogger auditlog.AuditLogger
userCreateUC uc_user.UserCreateUseCase
userGetByEmailUC uc_user.UserGetByEmailUseCase
userDeleteByIDUC uc_user.UserDeleteByIDUseCase
emailer mailgun.Emailer
}
func NewRegisterService(
config *config.Config,
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userCreateUC uc_user.UserCreateUseCase,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userDeleteByIDUC uc_user.UserDeleteByIDUseCase,
emailer mailgun.Emailer,
) RegisterService {
return &registerServiceImpl{
config: config,
logger: logger.Named("RegisterService"),
auditLogger: auditLogger,
userCreateUC: userCreateUC,
userGetByEmailUC: userGetByEmailUC,
userDeleteByIDUC: userDeleteByIDUC,
emailer: emailer,
}
}
func (s *registerServiceImpl) Execute(ctx context.Context, req *RegisterRequestDTO) (*RegisterResponseDTO, error) {
// Validate request first - backend is the single source of truth for validation
if err := s.validateRegisterRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Create SAGA for user registration workflow
saga := transaction.NewSaga("register", s.logger)
s.logger.Info("starting user registration")
// Step 1: Check if user already exists (read-only, no compensation)
existingUser, err := s.userGetByEmailUC.Execute(ctx, req.Email)
if err == nil && existingUser != nil {
s.logger.Warn("User already exists", zap.String("email", validation.MaskEmail(req.Email)))
return nil, httperror.NewConflictError("User with this email already exists")
}
// Step 2: Generate verification code
verificationCode := s.generateVerificationCode()
verificationExpiry := time.Now().Add(24 * time.Hour)
// Step 3: Parse E2EE keys from base64
passwordSalt, err := s.decodeBase64(req.PasswordSalt)
if err != nil {
return nil, fmt.Errorf("invalid password salt: %w", err)
}
encryptedMasterKey, err := s.decodeBase64(req.EncryptedMasterKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted master key: %w", err)
}
publicKey, err := s.decodeBase64(req.PublicKey)
if err != nil {
return nil, fmt.Errorf("invalid public key: %w", err)
}
encryptedPrivateKey, err := s.decodeBase64(req.EncryptedPrivateKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted private key: %w", err)
}
encryptedRecoveryKey, err := s.decodeBase64(req.EncryptedRecoveryKey)
if err != nil {
return nil, fmt.Errorf("invalid encrypted recovery key: %w", err)
}
masterKeyEncryptedWithRecoveryKey, err := s.decodeBase64(req.MasterKeyEncryptedWithRecoveryKey)
if err != nil {
return nil, fmt.Errorf("invalid master key encrypted with recovery key: %w", err)
}
// Step 4: Create user object
user := &dom_user.User{
ID: gocql.TimeUUID(),
Email: req.Email,
FirstName: req.FirstName,
LastName: req.LastName,
Name: req.FirstName + " " + req.LastName,
LexicalName: req.LastName + ", " + req.FirstName,
Role: dom_user.UserRoleIndividual,
Status: dom_user.UserStatusActive,
Timezone: req.Timezone,
ProfileData: &dom_user.UserProfileData{
Phone: req.Phone,
Country: req.Country,
Timezone: req.Timezone,
AgreeTermsOfService: req.AgreeTermsOfService,
AgreePromotions: req.AgreePromotions,
AgreeToTrackingAcrossThirdPartyAppsAndServices: req.AgreeToTrackingAcrossThirdPartyAppsAndServices,
},
SecurityData: &dom_user.UserSecurityData{
WasEmailVerified: false,
Code: verificationCode,
CodeType: dom_user.UserCodeTypeEmailVerification,
CodeExpiry: verificationExpiry,
PasswordSalt: passwordSalt,
KDFParams: crypto.KDFParams{
Algorithm: req.KDFAlgorithm, // Use the algorithm from the request (PBKDF2-SHA256 or argon2id)
Iterations: uint32(req.KDFIterations),
Memory: uint32(req.KDFMemory),
Parallelism: uint8(req.KDFParallelism),
SaltLength: uint32(req.KDFSaltLength),
KeyLength: uint32(req.KDFKeyLength),
},
EncryptedMasterKey: crypto.EncryptedMasterKey{
Nonce: encryptedMasterKey[:24],
Ciphertext: encryptedMasterKey[24:],
KeyVersion: 1,
},
PublicKey: crypto.PublicKey{
Key: publicKey,
},
EncryptedPrivateKey: crypto.EncryptedPrivateKey{
Nonce: encryptedPrivateKey[:24],
Ciphertext: encryptedPrivateKey[24:],
},
EncryptedRecoveryKey: crypto.EncryptedRecoveryKey{
Nonce: encryptedRecoveryKey[:24],
Ciphertext: encryptedRecoveryKey[24:],
},
MasterKeyEncryptedWithRecoveryKey: crypto.MasterKeyEncryptedWithRecoveryKey{
Nonce: masterKeyEncryptedWithRecoveryKey[:24],
Ciphertext: masterKeyEncryptedWithRecoveryKey[24:],
},
},
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
}
// Step 5: Save user to database FIRST (compensate: delete user if email fails)
// CRITICAL: Create user before sending email to enable rollback if email fails
if err := s.userCreateUC.Execute(ctx, user); err != nil {
s.logger.Error("Failed to create user", zap.Error(err))
return nil, fmt.Errorf("failed to create user: %w", err)
}
// Register compensation: delete user if email sending fails
userIDCaptured := user.ID
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting user due to email failure",
zap.String("user_id", userIDCaptured.String()),
zap.String("email", validation.MaskEmail(req.Email)))
return s.userDeleteByIDUC.Execute(ctx, userIDCaptured)
})
// Step 6: Send verification email - MUST succeed or rollback
// NOTE: Default tags are NOT created server-side due to E2EE
// The client must create default tags after first login using the user's master key
if err := s.sendVerificationEmail(ctx, req.Email, req.FirstName, verificationCode); err != nil {
s.logger.Error("Failed to send verification email",
zap.String("email", validation.MaskEmail(req.Email)),
zap.Error(err))
// Trigger compensation: Delete user from database
saga.Rollback(ctx)
return nil, fmt.Errorf("failed to send verification email, please try again later")
}
s.logger.Info("User registered successfully",
zap.String("user_id", user.ID.String()),
zap.String("email", validation.MaskEmail(req.Email)))
// Audit log successful registration
s.auditLogger.LogAuth(ctx, auditlog.EventTypeAccountCreated, auditlog.OutcomeSuccess,
validation.MaskEmail(req.Email), "", map[string]string{
"user_id": user.ID.String(),
})
return &RegisterResponseDTO{
Message: "Registration successful. Please check your email to verify your account.",
UserID: user.ID.String(),
}, nil
}
func (s *registerServiceImpl) generateVerificationCode() string {
// Generate random 8-digit code for increased entropy
// 8 digits = 90,000,000 combinations vs 6 digits = 900,000
b := make([]byte, 4)
rand.Read(b)
defer memguard.WipeBytes(b) // SECURITY: Wipe random bytes after use
code := int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3])
code = (code % 90000000) + 10000000
return fmt.Sprintf("%d", code)
}
func (s *registerServiceImpl) decodeBase64(encoded string) ([]byte, error) {
// Try base64 first
decoded, err := base64.StdEncoding.DecodeString(encoded)
if err == nil {
return decoded, nil
}
// If base64 fails, try hex encoding (some clients send hex)
if hexDecoded, hexErr := hex.DecodeString(encoded); hexErr == nil {
return hexDecoded, nil
}
// Return original base64 error
return nil, err
}
func (s *registerServiceImpl) sendVerificationEmail(ctx context.Context, email, firstName, code string) error {
subject := "Verify Your MapleFile Account"
sender := s.emailer.GetSenderEmail()
// Escape user input to prevent HTML injection
safeFirstName := html.EscapeString(firstName)
htmlContent := fmt.Sprintf(`
<html>
<body>
<h2>Welcome to MapleFile, %s!</h2>
<p>Thank you for registering. Please verify your email address by entering this code:</p>
<h1 style="color: #4CAF50; font-size: 32px; letter-spacing: 5px;">%s</h1>
<p>This code will expire in 24 hours.</p>
<p>If you didn't create this account, please ignore this email.</p>
</body>
</html>
`, safeFirstName, code)
return s.emailer.Send(ctx, sender, subject, email, htmlContent)
}
// validateRegisterRequest validates all registration fields.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *registerServiceImpl) validateRegisterRequest(req *RegisterRequestDTO) error {
errors := make(map[string]string)
// Validate beta access code
if strings.TrimSpace(req.BetaAccessCode) == "" {
errors["beta_access_code"] = "Beta access code is required"
}
// Validate first name
if strings.TrimSpace(req.FirstName) == "" {
errors["first_name"] = "First name is required"
} else if len(req.FirstName) > 100 {
errors["first_name"] = "First name must be less than 100 characters"
}
// Validate last name
if strings.TrimSpace(req.LastName) == "" {
errors["last_name"] = "Last name is required"
} else if len(req.LastName) > 100 {
errors["last_name"] = "Last name must be less than 100 characters"
}
// Validate email
email := strings.TrimSpace(req.Email)
if email == "" {
errors["email"] = "Email is required"
} else {
// Use Go's mail package for proper email validation
if _, err := mail.ParseAddress(email); err != nil {
errors["email"] = "Please enter a valid email address"
}
}
// Validate phone
if strings.TrimSpace(req.Phone) == "" {
errors["phone"] = "Phone number is required"
}
// Validate timezone
if strings.TrimSpace(req.Timezone) == "" {
errors["timezone"] = "Timezone is required"
}
// Validate encryption data - these are critical for E2EE
// Use user-friendly error messages instead of technical field names
if strings.TrimSpace(req.PasswordSalt) == "" {
errors["password"] = "Master password is required for encryption setup"
}
if strings.TrimSpace(req.EncryptedMasterKey) == "" {
errors["password"] = "Master password is required for encryption setup"
}
if strings.TrimSpace(req.PublicKey) == "" {
errors["password"] = "Master password is required for encryption setup"
}
if strings.TrimSpace(req.EncryptedPrivateKey) == "" {
errors["password"] = "Master password is required for encryption setup"
}
if strings.TrimSpace(req.EncryptedRecoveryKey) == "" {
errors["password"] = "Master password is required for encryption setup"
}
if strings.TrimSpace(req.MasterKeyEncryptedWithRecoveryKey) == "" {
errors["password"] = "Master password is required for encryption setup"
}
// Validate KDF parameters - use user-friendly message
if req.KDFAlgorithm == "" {
errors["password"] = "Master password is required for encryption setup"
}
if req.KDFIterations <= 0 {
errors["password"] = "Master password is required for encryption setup"
}
// Validate terms agreement
if !req.AgreeTermsOfService {
errors["agree_terms_of_service"] = "You must agree to the terms of service to register"
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,184 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/request_ott.go
package auth
import (
"context"
"crypto/rand"
"fmt"
"html"
"strings"
"time"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type RequestOTTRequestDTO struct {
Email string `json:"email"`
}
type RequestOTTResponseDTO struct {
Message string `json:"message"`
Success bool `json:"success"`
}
type RequestOTTService interface {
Execute(ctx context.Context, req *RequestOTTRequestDTO) (*RequestOTTResponseDTO, error)
}
type requestOTTServiceImpl struct {
config *config.Config
logger *zap.Logger
userGetByEmailUC uc_user.UserGetByEmailUseCase
cache cassandracache.CassandraCacher
emailer mailgun.Emailer
}
func NewRequestOTTService(
config *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
emailer mailgun.Emailer,
) RequestOTTService {
return &requestOTTServiceImpl{
config: config,
logger: logger.Named("RequestOTTService"),
userGetByEmailUC: userGetByEmailUC,
cache: cache,
emailer: emailer,
}
}
func (s *requestOTTServiceImpl) Execute(ctx context.Context, req *RequestOTTRequestDTO) (*RequestOTTResponseDTO, error) {
// Validate request
if err := s.validateRequestOTTRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Create SAGA for OTT request workflow
saga := transaction.NewSaga("request-ott", s.logger)
s.logger.Info("starting OTT request")
// Step 1: Normalize email
email := strings.ToLower(strings.TrimSpace(req.Email))
// Step 2: Check if user exists and is verified (read-only, no compensation)
user, err := s.userGetByEmailUC.Execute(ctx, email)
if err != nil || user == nil {
s.logger.Warn("User not found", zap.String("email", validation.MaskEmail(email)))
// For security, don't reveal if user exists
return &RequestOTTResponseDTO{
Message: "If an account exists with this email, you will receive an OTT code shortly.",
Success: true,
}, nil
}
// Step 3: Check if email is verified
if user.SecurityData == nil || !user.SecurityData.WasEmailVerified {
s.logger.Warn("User email not verified", zap.String("email", validation.MaskEmail(email)))
return nil, httperror.NewBadRequestError("Email address not verified. Please verify your email before logging in.")
}
// Step 4: Generate 8-digit OTT code
ottCode := s.generateOTTCode()
ottCodeBytes := []byte(ottCode)
defer memguard.WipeBytes(ottCodeBytes) // SECURITY: Wipe OTT code from memory after use
// Step 5: Store OTT in cache FIRST (compensate: delete OTT if email fails)
// CRITICAL: Store OTT before sending email to enable rollback if email fails
cacheKey := fmt.Sprintf("ott:%s", email)
if err := s.cache.SetWithExpiry(ctx, cacheKey, []byte(ottCode), 10*time.Minute); err != nil {
s.logger.Error("Failed to store OTT in cache", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to generate login code. Please try again later.")
}
// Register compensation: delete OTT if email sending fails
cacheKeyCaptured := cacheKey
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting OTT due to email failure",
zap.String("cache_key", cacheKeyCaptured))
return s.cache.Delete(ctx, cacheKeyCaptured)
})
// Step 6: Send OTT email - MUST succeed or rollback
if err := s.sendOTTEmail(ctx, email, user.FirstName, ottCode); err != nil {
s.logger.Error("Failed to send OTT email",
zap.String("email", validation.MaskEmail(email)),
zap.Error(err))
// Trigger compensation: Delete OTT from cache
saga.Rollback(ctx)
return nil, httperror.NewInternalServerError("Failed to send login code email. Please try again later.")
}
s.logger.Info("OTT generated and sent successfully",
zap.String("email", validation.MaskEmail(email)),
zap.String("cache_key", cacheKey[:16]+"...")) // Log prefix for security
return &RequestOTTResponseDTO{
Message: "OTT code sent to your email. Please check your inbox.",
Success: true,
}, nil
}
func (s *requestOTTServiceImpl) generateOTTCode() string {
// Generate random 8-digit code for increased entropy
// 8 digits = 90,000,000 combinations vs 6 digits = 900,000
b := make([]byte, 4)
rand.Read(b)
defer memguard.WipeBytes(b) // SECURITY: Wipe random bytes after use
code := int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3])
code = (code % 90000000) + 10000000
return fmt.Sprintf("%d", code)
}
func (s *requestOTTServiceImpl) sendOTTEmail(ctx context.Context, email, firstName, code string) error {
subject := "Your MapleFile Login Code"
sender := s.emailer.GetSenderEmail()
// Escape user input to prevent HTML injection
safeFirstName := html.EscapeString(firstName)
htmlContent := fmt.Sprintf(`
<html>
<body>
<h2>Hello %s,</h2>
<p>Here is your one-time login code for MapleFile:</p>
<h1 style="color: #4CAF50; font-size: 32px; letter-spacing: 5px;">%s</h1>
<p>This code will expire in 10 minutes.</p>
<p>If you didn't request this code, please ignore this email.</p>
</body>
</html>
`, safeFirstName, code)
return s.emailer.Send(ctx, sender, subject, email, htmlContent)
}
// validateRequestOTTRequest validates the request OTT request.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *requestOTTServiceImpl) validateRequestOTTRequest(req *RequestOTTRequestDTO) error {
errors := make(map[string]string)
// Validate email using shared validation utility
if errMsg := validation.ValidateEmail(req.Email); errMsg != "" {
errors["email"] = errMsg
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,199 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/resend_verification.go
package auth
import (
"context"
"crypto/rand"
"fmt"
"html"
"time"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type ResendVerificationRequestDTO struct {
Email string `json:"email"`
}
type ResendVerificationResponseDTO struct {
Message string `json:"message"`
}
type ResendVerificationService interface {
Execute(ctx context.Context, req *ResendVerificationRequestDTO) (*ResendVerificationResponseDTO, error)
}
type resendVerificationServiceImpl struct {
config *config.Config
logger *zap.Logger
userGetByEmailUC uc_user.UserGetByEmailUseCase
userUpdateUC uc_user.UserUpdateUseCase
emailer mailgun.Emailer
}
func NewResendVerificationService(
config *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
emailer mailgun.Emailer,
) ResendVerificationService {
return &resendVerificationServiceImpl{
config: config,
logger: logger.Named("ResendVerificationService"),
userGetByEmailUC: userGetByEmailUC,
userUpdateUC: userUpdateUC,
emailer: emailer,
}
}
func (s *resendVerificationServiceImpl) Execute(ctx context.Context, req *ResendVerificationRequestDTO) (*ResendVerificationResponseDTO, error) {
// Validate request
if err := s.validateResendVerificationRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Create SAGA for resend verification workflow
saga := transaction.NewSaga("resend-verification", s.logger)
s.logger.Info("starting resend verification")
// Step 1: Get user by email (read-only, no compensation)
user, err := s.userGetByEmailUC.Execute(ctx, req.Email)
if err != nil || user == nil {
s.logger.Warn("User not found for resend verification", zap.String("email", validation.MaskEmail(req.Email)))
// Don't reveal if user exists or not for security
return &ResendVerificationResponseDTO{
Message: "If the email exists and is unverified, a new verification code has been sent.",
}, nil
}
// Step 2: Check if email is already verified
if user.SecurityData != nil && user.SecurityData.WasEmailVerified {
s.logger.Info("Email already verified", zap.String("email", validation.MaskEmail(req.Email)))
// Don't reveal that email is already verified for security
return &ResendVerificationResponseDTO{
Message: "If the email exists and is unverified, a new verification code has been sent.",
}, nil
}
// Step 3: Backup old verification data for compensation
var oldCode string
var oldCodeExpiry time.Time
if user.SecurityData != nil {
oldCode = user.SecurityData.Code
oldCodeExpiry = user.SecurityData.CodeExpiry
}
// Step 4: Generate new verification code
verificationCode := s.generateVerificationCode()
verificationExpiry := time.Now().Add(24 * time.Hour)
// Step 5: Update user with new code
if user.SecurityData == nil {
user.SecurityData = &dom_user.UserSecurityData{}
}
user.SecurityData.Code = verificationCode
user.SecurityData.CodeType = dom_user.UserCodeTypeEmailVerification
user.SecurityData.CodeExpiry = verificationExpiry
user.ModifiedAt = time.Now()
// Step 6: Save updated user FIRST (compensate: restore old code if email fails)
// CRITICAL: Save new code before sending email to enable rollback if email fails
if err := s.userUpdateUC.Execute(ctx, user); err != nil {
s.logger.Error("Failed to update user with new verification code", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to update verification code. Please try again later.")
}
// Register compensation: restore old verification code if email fails
userCaptured := user
oldCodeCaptured := oldCode
oldCodeExpiryCaptured := oldCodeExpiry
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: restoring old verification code due to email failure",
zap.String("email", validation.MaskEmail(userCaptured.Email)))
userCaptured.SecurityData.Code = oldCodeCaptured
userCaptured.SecurityData.CodeExpiry = oldCodeExpiryCaptured
userCaptured.ModifiedAt = time.Now()
return s.userUpdateUC.Execute(ctx, userCaptured)
})
// Step 7: Send verification email - MUST succeed or rollback
if err := s.sendVerificationEmail(ctx, user.Email, user.FirstName, verificationCode); err != nil {
s.logger.Error("Failed to send verification email",
zap.String("email", validation.MaskEmail(user.Email)),
zap.Error(err))
// Trigger compensation: Restore old verification code
saga.Rollback(ctx)
return nil, httperror.NewInternalServerError("Failed to send verification email. Please try again later.")
}
s.logger.Info("Verification code resent successfully",
zap.String("email", validation.MaskEmail(req.Email)),
zap.String("user_id", user.ID.String()))
return &ResendVerificationResponseDTO{
Message: "If the email exists and is unverified, a new verification code has been sent.",
}, nil
}
func (s *resendVerificationServiceImpl) generateVerificationCode() string {
// Generate random 8-digit code for increased entropy
// 8 digits = 90,000,000 combinations vs 6 digits = 900,000
b := make([]byte, 4)
rand.Read(b)
defer memguard.WipeBytes(b) // SECURITY: Wipe random bytes after use
code := int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3])
code = (code % 90000000) + 10000000
return fmt.Sprintf("%d", code)
}
func (s *resendVerificationServiceImpl) sendVerificationEmail(ctx context.Context, email, firstName, code string) error {
subject := "Verify Your MapleFile Account"
sender := s.emailer.GetSenderEmail()
// Escape user input to prevent HTML injection
safeFirstName := html.EscapeString(firstName)
htmlContent := fmt.Sprintf(`
<html>
<body>
<h2>Welcome to MapleFile, %s!</h2>
<p>You requested a new verification code. Please verify your email address by entering this code:</p>
<h1 style="color: #4CAF50; font-size: 32px; letter-spacing: 5px;">%s</h1>
<p>This code will expire in 24 hours.</p>
<p>If you didn't request this code, please ignore this email.</p>
</body>
</html>
`, safeFirstName, code)
return s.emailer.Send(ctx, sender, subject, email, htmlContent)
}
// validateResendVerificationRequest validates the resend verification request.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *resendVerificationServiceImpl) validateResendVerificationRequest(req *ResendVerificationRequestDTO) error {
errors := make(map[string]string)
// Validate email using shared validation utility
if errMsg := validation.ValidateEmail(req.Email); errMsg != "" {
errors["email"] = errMsg
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,127 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/verify_email.go
package auth
import (
"context"
"fmt"
"strings"
"time"
"go.uber.org/zap"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/auditlog"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type VerifyEmailRequestDTO struct {
Code string `json:"code"`
}
type VerifyEmailResponseDTO struct {
Message string `json:"message"`
Success bool `json:"success"`
UserRole int8 `json:"user_role"`
}
type VerifyEmailService interface {
Execute(ctx context.Context, req *VerifyEmailRequestDTO) (*VerifyEmailResponseDTO, error)
}
type verifyEmailServiceImpl struct {
logger *zap.Logger
auditLogger auditlog.AuditLogger
userGetByVerificationCodeUC uc_user.UserGetByVerificationCodeUseCase
userUpdateUC uc_user.UserUpdateUseCase
}
func NewVerifyEmailService(
logger *zap.Logger,
auditLogger auditlog.AuditLogger,
userGetByVerificationCodeUC uc_user.UserGetByVerificationCodeUseCase,
userUpdateUC uc_user.UserUpdateUseCase,
) VerifyEmailService {
return &verifyEmailServiceImpl{
logger: logger.Named("VerifyEmailService"),
auditLogger: auditLogger,
userGetByVerificationCodeUC: userGetByVerificationCodeUC,
userUpdateUC: userUpdateUC,
}
}
func (s *verifyEmailServiceImpl) Execute(ctx context.Context, req *VerifyEmailRequestDTO) (*VerifyEmailResponseDTO, error) {
// Validate request
if err := s.validateVerifyEmailRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Get user by verification code
user, err := s.userGetByVerificationCodeUC.Execute(ctx, req.Code)
if err != nil || user == nil {
s.logger.Warn("Invalid verification code attempted")
return nil, httperror.NewNotFoundError("Verification code not found or has already been used")
}
// Check if code has expired
if time.Now().After(user.SecurityData.CodeExpiry) {
s.logger.Warn("Verification code expired",
zap.String("user_id", user.ID.String()),
zap.Time("expiry", user.SecurityData.CodeExpiry))
return nil, httperror.NewBadRequestError("Verification code has expired. Please request a new verification email.")
}
// Update user to mark as verified
user.SecurityData.WasEmailVerified = true
user.SecurityData.Code = ""
user.SecurityData.CodeExpiry = time.Time{}
user.ModifiedAt = time.Now()
if err := s.userUpdateUC.Execute(ctx, user); err != nil {
s.logger.Error("Failed to update user", zap.Error(err))
return nil, httperror.NewInternalServerError(fmt.Sprintf("Failed to verify email: %v", err))
}
s.logger.Info("Email verified successfully", zap.String("user_id", user.ID.String()))
// Audit log email verification
s.auditLogger.LogAuth(ctx, auditlog.EventTypeEmailVerified, auditlog.OutcomeSuccess,
validation.MaskEmail(user.Email), "", map[string]string{
"user_id": user.ID.String(),
})
return &VerifyEmailResponseDTO{
Message: "Email verified successfully. You can now log in.",
Success: true,
UserRole: user.Role,
}, nil
}
// validateVerifyEmailRequest validates the verify email request.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *verifyEmailServiceImpl) validateVerifyEmailRequest(req *VerifyEmailRequestDTO) error {
errors := make(map[string]string)
// Validate verification code
code := strings.TrimSpace(req.Code)
if code == "" {
errors["code"] = "Verification code is required"
} else if len(code) != 8 {
errors["code"] = "Verification code must be 8 digits"
} else {
// Validate that code is numeric
for _, c := range code {
if c < '0' || c > '9' {
errors["code"] = "Verification code must contain only numbers"
break
}
}
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,221 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/auth/verify_ott.go
package auth
import (
"context"
"crypto/rand"
"crypto/subtle"
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/awnumar/memguard"
"github.com/gocql/gocql"
"go.uber.org/zap"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/crypto"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/storage/cache/cassandracache"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type VerifyOTTRequestDTO struct {
Email string `json:"email"`
OTT string `json:"ott"`
}
type VerifyOTTResponseDTO struct {
Message string `json:"message"`
ChallengeID string `json:"challengeId"`
EncryptedChallenge string `json:"encryptedChallenge"`
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
PublicKey string `json:"publicKey"`
// KDFAlgorithm specifies which key derivation algorithm to use.
// Values: "PBKDF2-SHA256" (web frontend) or "argon2id" (native app legacy)
KDFAlgorithm string `json:"kdfAlgorithm"`
}
type VerifyOTTService interface {
Execute(ctx context.Context, req *VerifyOTTRequestDTO) (*VerifyOTTResponseDTO, error)
}
type verifyOTTServiceImpl struct {
logger *zap.Logger
userGetByEmailUC uc_user.UserGetByEmailUseCase
cache cassandracache.CassandraCacher
}
func NewVerifyOTTService(
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
cache cassandracache.CassandraCacher,
) VerifyOTTService {
return &verifyOTTServiceImpl{
logger: logger.Named("VerifyOTTService"),
userGetByEmailUC: userGetByEmailUC,
cache: cache,
}
}
func (s *verifyOTTServiceImpl) Execute(ctx context.Context, req *VerifyOTTRequestDTO) (*VerifyOTTResponseDTO, error) {
// Validate request
if err := s.validateVerifyOTTRequest(req); err != nil {
return nil, err // Returns RFC 9457 ProblemDetail
}
// Create SAGA for OTT verification workflow
saga := transaction.NewSaga("verify-ott", s.logger)
s.logger.Info("starting OTT verification")
// Step 1: Normalize email
email := strings.ToLower(strings.TrimSpace(req.Email))
// Step 2: Get OTT from cache
cacheKey := fmt.Sprintf("ott:%s", email)
cachedOTT, err := s.cache.Get(ctx, cacheKey)
if err != nil || cachedOTT == nil {
s.logger.Warn("OTT not found in cache", zap.String("email", validation.MaskEmail(email)))
return nil, httperror.NewUnauthorizedError("Invalid or expired verification code. Please request a new code.")
}
defer memguard.WipeBytes(cachedOTT) // SECURITY: Wipe OTT from memory after use
// Step 3: Verify OTT matches using constant-time comparison
// CWE-208: Prevents timing attacks by ensuring comparison takes same time regardless of match
if subtle.ConstantTimeCompare(cachedOTT, []byte(req.OTT)) != 1 {
s.logger.Warn("OTT mismatch", zap.String("email", validation.MaskEmail(email)))
return nil, httperror.NewUnauthorizedError("Incorrect verification code. Please check the code and try again.")
}
// Step 4: Get user to retrieve encrypted keys (read-only, no compensation)
user, err := s.userGetByEmailUC.Execute(ctx, email)
if err != nil || user == nil {
s.logger.Error("User not found after OTT verification", zap.String("email", validation.MaskEmail(email)), zap.Error(err))
return nil, httperror.NewUnauthorizedError("User account not found. Please contact support.")
}
// Step 5: Generate random challenge (32 bytes)
challenge := make([]byte, 32)
if _, err := rand.Read(challenge); err != nil {
s.logger.Error("Failed to generate challenge", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to generate security challenge. Please try again.")
}
defer memguard.WipeBytes(challenge) // SECURITY: Wipe challenge from memory after use
// Step 6: Generate challenge ID
challengeID := gocql.TimeUUID().String()
// Step 7: Store challenge in cache FIRST (compensate: delete challenge)
// CRITICAL: Store challenge before deleting OTT to prevent lockout
challengeKey := fmt.Sprintf("challenge:%s", challengeID)
if err := s.cache.SetWithExpiry(ctx, challengeKey, challenge, 5*time.Minute); err != nil {
s.logger.Error("Failed to store challenge", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to store security challenge. Please try again.")
}
// Register compensation: delete challenge if OTT deletion fails
challengeKeyCaptured := challengeKey
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: deleting challenge",
zap.String("challenge_key", challengeKeyCaptured))
return s.cache.Delete(ctx, challengeKeyCaptured)
})
// Step 8: Delete OTT from cache (one-time use) (compensate: restore OTT)
cacheKeyCaptured := cacheKey
cachedOTTCaptured := cachedOTT
if err := s.cache.Delete(ctx, cacheKey); err != nil {
s.logger.Error("Failed to delete OTT",
zap.String("cache_key", cacheKey),
zap.Error(err))
// Trigger compensation: Delete challenge
saga.Rollback(ctx)
return nil, httperror.NewInternalServerError("Verification failed. Please try again.")
}
// Register compensation: restore OTT with reduced TTL (5 minutes for retry)
saga.AddCompensation(func(ctx context.Context) error {
s.logger.Info("compensating: restoring OTT",
zap.String("cache_key", cacheKeyCaptured))
// Restore with reduced TTL (5 minutes) to allow user retry
return s.cache.SetWithExpiry(ctx, cacheKeyCaptured, cachedOTTCaptured, 5*time.Minute)
})
// Encrypt the challenge with the user's public key using NaCl sealed box
encryptedChallengeBytes, err := crypto.EncryptWithPublicKey(challenge, user.SecurityData.PublicKey.Key)
if err != nil {
s.logger.Error("Failed to encrypt challenge", zap.Error(err))
return nil, httperror.NewInternalServerError("Failed to encrypt security challenge. Please try again.")
}
defer memguard.WipeBytes(encryptedChallengeBytes) // SECURITY: Wipe encrypted challenge after encoding
encryptedChallenge := base64.StdEncoding.EncodeToString(encryptedChallengeBytes)
s.logger.Info("OTT verified successfully",
zap.String("email", validation.MaskEmail(email)),
zap.String("challenge_id", challengeID),
zap.String("challenge_key", challengeKey[:16]+"...")) // Log prefix for security
// Prepare user's encrypted keys for frontend
salt := base64.StdEncoding.EncodeToString(user.SecurityData.PasswordSalt)
encryptedMasterKey := base64.StdEncoding.EncodeToString(append(user.SecurityData.EncryptedMasterKey.Nonce, user.SecurityData.EncryptedMasterKey.Ciphertext...))
encryptedPrivateKey := base64.StdEncoding.EncodeToString(append(user.SecurityData.EncryptedPrivateKey.Nonce, user.SecurityData.EncryptedPrivateKey.Ciphertext...))
publicKey := base64.StdEncoding.EncodeToString(user.SecurityData.PublicKey.Key)
// Get KDF algorithm from user's security data
kdfAlgorithm := user.SecurityData.KDFParams.Algorithm
if kdfAlgorithm == "" {
// Default to argon2id for backward compatibility with old accounts
kdfAlgorithm = "argon2id"
}
return &VerifyOTTResponseDTO{
Message: "OTT verified. Please decrypt the challenge with your master key.",
ChallengeID: challengeID,
EncryptedChallenge: encryptedChallenge,
Salt: salt,
EncryptedMasterKey: encryptedMasterKey,
EncryptedPrivateKey: encryptedPrivateKey,
PublicKey: publicKey,
KDFAlgorithm: kdfAlgorithm,
}, nil
}
// validateVerifyOTTRequest validates the verify OTT request.
// Returns RFC 9457 ProblemDetail error with field-specific errors.
func (s *verifyOTTServiceImpl) validateVerifyOTTRequest(req *VerifyOTTRequestDTO) error {
errors := make(map[string]string)
// Validate email using shared validation utility
if errMsg := validation.ValidateEmail(req.Email); errMsg != "" {
errors["email"] = errMsg
}
// Validate OTT code
ott := strings.TrimSpace(req.OTT)
if ott == "" {
errors["ott"] = "Verification code is required"
} else if len(ott) != 8 {
errors["ott"] = "Verification code must be 8 digits"
} else {
// Check if all characters are digits
for _, c := range ott {
if c < '0' || c > '9' {
errors["ott"] = "Verification code must contain only numbers"
break
}
}
}
// If there are validation errors, return RFC 9457 error
if len(errors) > 0 {
return httperror.NewValidationError(errors)
}
return nil
}

View file

@ -0,0 +1,112 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail/create.go
package blockedemail
import (
"context"
"strings"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type CreateBlockedEmailService interface {
Execute(ctx context.Context, req *CreateBlockedEmailRequestDTO) (*BlockedEmailResponseDTO, error)
}
type createBlockedEmailServiceImpl struct {
config *config.Configuration
logger *zap.Logger
createBlockedEmailUseCase uc_blockedemail.CreateBlockedEmailUseCase
userGetByEmailUseCase uc_user.UserGetByEmailUseCase
}
func NewCreateBlockedEmailService(
config *config.Configuration,
logger *zap.Logger,
createBlockedEmailUseCase uc_blockedemail.CreateBlockedEmailUseCase,
userGetByEmailUseCase uc_user.UserGetByEmailUseCase,
) CreateBlockedEmailService {
logger = logger.Named("CreateBlockedEmailService")
return &createBlockedEmailServiceImpl{
config: config,
logger: logger,
createBlockedEmailUseCase: createBlockedEmailUseCase,
userGetByEmailUseCase: userGetByEmailUseCase,
}
}
func (svc *createBlockedEmailServiceImpl) Execute(ctx context.Context, req *CreateBlockedEmailRequestDTO) (*BlockedEmailResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewBadRequestError("Request is required")
}
e := make(map[string]string)
if strings.TrimSpace(req.Email) == "" {
e["email"] = "Email is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewValidationError(e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewInternalServerError("Authentication context error")
}
//
// STEP 3: Check if blocked email belongs to a registered user
//
var blockedUserID gocql.UUID
blockedUser, err := svc.userGetByEmailUseCase.Execute(ctx, req.Email)
if err != nil {
svc.logger.Debug("Blocked email user not found, continuing without user ID",
zap.String("email", validation.MaskEmail(req.Email)),
zap.Any("error", err))
// User not found is fine - we still allow blocking non-existent emails
} else if blockedUser != nil {
blockedUserID = blockedUser.ID
}
//
// STEP 4: Create blocked email
//
blockedEmail, err := svc.createBlockedEmailUseCase.Execute(ctx, userID, req.Email, blockedUserID, req.Reason)
if err != nil {
svc.logger.Error("Failed to create blocked email",
zap.Any("error", err),
zap.Any("user_id", userID),
zap.String("email", validation.MaskEmail(req.Email)))
return nil, err
}
//
// STEP 5: Map to response DTO
//
response := &BlockedEmailResponseDTO{
UserID: blockedEmail.UserID,
BlockedEmail: blockedEmail.BlockedEmail,
BlockedUserID: blockedEmail.BlockedUserID,
Reason: blockedEmail.Reason,
CreatedAt: blockedEmail.CreatedAt,
}
return response, nil
}

View file

@ -0,0 +1,80 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail/delete.go
package blockedemail
import (
"context"
"strings"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type DeleteBlockedEmailService interface {
Execute(ctx context.Context, email string) (*DeleteBlockedEmailResponseDTO, error)
}
type deleteBlockedEmailServiceImpl struct {
config *config.Configuration
logger *zap.Logger
deleteBlockedEmailUseCase uc_blockedemail.DeleteBlockedEmailUseCase
}
func NewDeleteBlockedEmailService(
config *config.Configuration,
logger *zap.Logger,
deleteBlockedEmailUseCase uc_blockedemail.DeleteBlockedEmailUseCase,
) DeleteBlockedEmailService {
logger = logger.Named("DeleteBlockedEmailService")
return &deleteBlockedEmailServiceImpl{
config: config,
logger: logger,
deleteBlockedEmailUseCase: deleteBlockedEmailUseCase,
}
}
func (svc *deleteBlockedEmailServiceImpl) Execute(ctx context.Context, email string) (*DeleteBlockedEmailResponseDTO, error) {
//
// STEP 1: Validation
//
if strings.TrimSpace(email) == "" {
svc.logger.Warn("Failed validation with empty email")
return nil, httperror.NewValidationError(map[string]string{"email": "Email is required"})
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewInternalServerError("Authentication context error")
}
//
// STEP 3: Delete blocked email
//
err := svc.deleteBlockedEmailUseCase.Execute(ctx, userID, email)
if err != nil {
svc.logger.Error("Failed to delete blocked email",
zap.Any("error", err),
zap.Any("user_id", userID),
zap.String("email", validation.MaskEmail(email)))
return nil, err
}
//
// STEP 4: Return success response
//
response := &DeleteBlockedEmailResponseDTO{
Success: true,
Message: "Email unblocked successfully",
}
return response, nil
}

View file

@ -0,0 +1,35 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail/dto.go
package blockedemail
import (
"time"
"github.com/gocql/gocql"
)
// CreateBlockedEmailRequestDTO represents the request to add a blocked email
type CreateBlockedEmailRequestDTO struct {
Email string `json:"email"`
Reason string `json:"reason,omitempty"`
}
// BlockedEmailResponseDTO represents a blocked email in the response
type BlockedEmailResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
BlockedEmail string `json:"blocked_email"`
BlockedUserID gocql.UUID `json:"blocked_user_id,omitempty"`
Reason string `json:"reason,omitempty"`
CreatedAt time.Time `json:"created_at"`
}
// ListBlockedEmailsResponseDTO represents the response for listing blocked emails
type ListBlockedEmailsResponseDTO struct {
BlockedEmails []*BlockedEmailResponseDTO `json:"blocked_emails"`
Count int `json:"count"`
}
// DeleteBlockedEmailResponseDTO represents the response for deleting a blocked email
type DeleteBlockedEmailResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}

View file

@ -0,0 +1,80 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail/list.go
package blockedemail
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListBlockedEmailsService interface {
Execute(ctx context.Context) (*ListBlockedEmailsResponseDTO, error)
}
type listBlockedEmailsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listBlockedEmailsUseCase uc_blockedemail.ListBlockedEmailsUseCase
}
func NewListBlockedEmailsService(
config *config.Configuration,
logger *zap.Logger,
listBlockedEmailsUseCase uc_blockedemail.ListBlockedEmailsUseCase,
) ListBlockedEmailsService {
logger = logger.Named("ListBlockedEmailsService")
return &listBlockedEmailsServiceImpl{
config: config,
logger: logger,
listBlockedEmailsUseCase: listBlockedEmailsUseCase,
}
}
func (svc *listBlockedEmailsServiceImpl) Execute(ctx context.Context) (*ListBlockedEmailsResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewInternalServerError("Authentication context error")
}
//
// STEP 2: List blocked emails
//
blockedEmails, err := svc.listBlockedEmailsUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to list blocked emails",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
//
// STEP 3: Map to response DTOs
//
responseDTOs := make([]*BlockedEmailResponseDTO, len(blockedEmails))
for i, blockedEmail := range blockedEmails {
responseDTOs[i] = &BlockedEmailResponseDTO{
UserID: blockedEmail.UserID,
BlockedEmail: blockedEmail.BlockedEmail,
BlockedUserID: blockedEmail.BlockedUserID,
Reason: blockedEmail.Reason,
CreatedAt: blockedEmail.CreatedAt,
}
}
response := &ListBlockedEmailsResponseDTO{
BlockedEmails: responseDTOs,
Count: len(responseDTOs),
}
return response, nil
}

View file

@ -0,0 +1,35 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/blockedemail/provider.go
package blockedemail
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
func ProvideCreateBlockedEmailService(
cfg *config.Configuration,
logger *zap.Logger,
createBlockedEmailUseCase uc_blockedemail.CreateBlockedEmailUseCase,
userGetByEmailUseCase uc_user.UserGetByEmailUseCase,
) CreateBlockedEmailService {
return NewCreateBlockedEmailService(cfg, logger, createBlockedEmailUseCase, userGetByEmailUseCase)
}
func ProvideListBlockedEmailsService(
cfg *config.Configuration,
logger *zap.Logger,
listBlockedEmailsUseCase uc_blockedemail.ListBlockedEmailsUseCase,
) ListBlockedEmailsService {
return NewListBlockedEmailsService(cfg, logger, listBlockedEmailsUseCase)
}
func ProvideDeleteBlockedEmailService(
cfg *config.Configuration,
logger *zap.Logger,
deleteBlockedEmailUseCase uc_blockedemail.DeleteBlockedEmailUseCase,
) DeleteBlockedEmailService {
return NewDeleteBlockedEmailService(cfg, logger, deleteBlockedEmailUseCase)
}

View file

@ -0,0 +1,135 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/archive.go
package collection
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ArchiveCollectionRequestDTO struct {
ID gocql.UUID `json:"id"`
}
type ArchiveCollectionResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type ArchiveCollectionService interface {
Execute(ctx context.Context, req *ArchiveCollectionRequestDTO) (*ArchiveCollectionResponseDTO, error)
}
type archiveCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewArchiveCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) ArchiveCollectionService {
logger = logger.Named("ArchiveCollectionService")
return &archiveCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *archiveCollectionServiceImpl) Execute(ctx context.Context, req *ArchiveCollectionRequestDTO) (*ArchiveCollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection ID is required")
}
if req.ID.String() == "" {
svc.logger.Warn("Empty collection ID")
return nil, httperror.NewForBadRequestWithSingleField("id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Retrieve existing collection (including non-active states for archiving)
//
collection, err := svc.repo.Get(ctx, req.ID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", req.ID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection not found")
}
//
// STEP 4: Check if user has rights to archive this collection
//
if collection.OwnerID != userID {
svc.logger.Warn("Unauthorized collection archive attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
return nil, httperror.NewForForbiddenWithSingleField("message", "Only the collection owner can archive a collection")
}
//
// STEP 5: Validate state transition
//
err = dom_collection.IsValidStateTransition(collection.State, dom_collection.CollectionStateArchived)
if err != nil {
svc.logger.Warn("Invalid state transition for collection archive",
zap.Any("collection_id", req.ID),
zap.String("current_state", collection.State),
zap.String("target_state", dom_collection.CollectionStateArchived),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Archive the collection
//
collection.State = dom_collection.CollectionStateArchived
collection.Version++ // Update mutation means we increment version.
collection.ModifiedAt = time.Now()
collection.ModifiedByUserID = userID
err = svc.repo.Update(ctx, collection)
if err != nil {
svc.logger.Error("Failed to archive collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
svc.logger.Info("Collection archived successfully",
zap.Any("collection_id", req.ID),
zap.Any("user_id", userID))
return &ArchiveCollectionResponseDTO{
Success: true,
Message: "Collection archived successfully",
}, nil
}

View file

@ -0,0 +1,336 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/create.go
package collection
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
// CreateCollectionRequestDTO represents a Data Transfer Object (DTO)
// used for transferring collection (folder or album) data between the local device and the cloud server.
// This data is end-to-end encrypted (E2EE) on the local device before transmission.
// The cloud server stores this encrypted data but cannot decrypt it.
// On the local device, this data is decrypted for use and storage (not stored in this encrypted DTO format locally).
// It can represent both root collections and embedded subcollections.
type CreateCollectionRequestDTO struct {
ID gocql.UUID `bson:"_id" json:"id"`
OwnerID gocql.UUID `bson:"owner_id" json:"owner_id"`
EncryptedName string `bson:"encrypted_name" json:"encrypted_name"`
EncryptedCustomIcon string `bson:"encrypted_custom_icon" json:"encrypted_custom_icon"`
CollectionType string `bson:"collection_type" json:"collection_type"`
EncryptedCollectionKey *crypto.EncryptedCollectionKey `bson:"encrypted_collection_key" json:"encrypted_collection_key"`
Members []*CollectionMembershipDTO `bson:"members" json:"members"`
ParentID gocql.UUID `bson:"parent_id,omitempty" json:"parent_id,omitempty"`
AncestorIDs []gocql.UUID `bson:"ancestor_ids,omitempty" json:"ancestor_ids,omitempty"`
TagIDs []gocql.UUID `bson:"tag_ids,omitempty" json:"tag_ids,omitempty"` // Tag IDs to embed in collection
Children []*CreateCollectionRequestDTO `bson:"children,omitempty" json:"children,omitempty"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
CreatedByUserID gocql.UUID `json:"created_by_user_id"`
ModifiedAt time.Time `bson:"modified_at" json:"modified_at"`
ModifiedByUserID gocql.UUID `json:"modified_by_user_id"`
}
type CollectionMembershipDTO struct {
ID gocql.UUID `bson:"_id" json:"id"`
CollectionID gocql.UUID `bson:"collection_id" json:"collection_id"`
RecipientID gocql.UUID `bson:"recipient_id" json:"recipient_id"`
RecipientEmail string `bson:"recipient_email" json:"recipient_email"`
GrantedByID gocql.UUID `bson:"granted_by_id" json:"granted_by_id"`
EncryptedCollectionKey []byte `bson:"encrypted_collection_key" json:"encrypted_collection_key"`
PermissionLevel string `bson:"permission_level" json:"permission_level"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
IsInherited bool `bson:"is_inherited" json:"is_inherited"`
InheritedFromID gocql.UUID `bson:"inherited_from_id,omitempty" json:"inherited_from_id,omitempty"`
}
type CollectionResponseDTO struct {
ID gocql.UUID `json:"id"`
OwnerID gocql.UUID `json:"owner_id"`
OwnerEmail string `json:"owner_email"`
EncryptedName string `json:"encrypted_name"`
EncryptedCustomIcon string `json:"encrypted_custom_icon,omitempty"`
CollectionType string `json:"collection_type"`
ParentID gocql.UUID `json:"parent_id,omitempty"`
AncestorIDs []gocql.UUID `json:"ancestor_ids,omitempty"`
Tags []tag.EmbeddedTag `json:"tags,omitempty"`
EncryptedCollectionKey *crypto.EncryptedCollectionKey `json:"encrypted_collection_key,omitempty"`
Children []*CollectionResponseDTO `json:"children,omitempty"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Members []MembershipResponseDTO `json:"members"`
FileCount int `json:"file_count"`
Version uint64 `json:"version"`
}
type MembershipResponseDTO struct {
ID gocql.UUID `bson:"_id" json:"id"`
CollectionID gocql.UUID `bson:"collection_id" json:"collection_id"` // ID of the collection (redundant but helpful for queries)
RecipientID gocql.UUID `bson:"recipient_id" json:"recipient_id"` // User receiving access
RecipientEmail string `bson:"recipient_email" json:"recipient_email"` // Email for display purposes
GrantedByID gocql.UUID `bson:"granted_by_id" json:"granted_by_id"` // User who shared the collection
// Collection key encrypted with recipient's public key using box_seal. This matches the box_seal format which doesn't need a separate nonce.
EncryptedCollectionKey []byte `bson:"encrypted_collection_key" json:"encrypted_collection_key"`
// Access details
PermissionLevel string `bson:"permission_level" json:"permission_level"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
// Sharing origin tracking
IsInherited bool `bson:"is_inherited" json:"is_inherited"` // Tracks whether access was granted directly or inherited from a parent
InheritedFromID gocql.UUID `bson:"inherited_from_id,omitempty" json:"inherited_from_id,omitempty"` // InheritedFromID identifies which parent collection granted this access
}
type CreateCollectionService interface {
Execute(ctx context.Context, req *CreateCollectionRequestDTO) (*CollectionResponseDTO, error)
}
type createCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
userGetByIDUseCase uc_user.UserGetByIDUseCase
repo dom_collection.CollectionRepository
tagRepo tag.Repository
}
func NewCreateCollectionService(
config *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
repo dom_collection.CollectionRepository,
tagRepo tag.Repository,
) CreateCollectionService {
logger = logger.Named("CreateCollectionService")
return &createCollectionServiceImpl{
config: config,
logger: logger,
userGetByIDUseCase: userGetByIDUseCase,
repo: repo,
tagRepo: tagRepo,
}
}
func (svc *createCollectionServiceImpl) Execute(ctx context.Context, req *CreateCollectionRequestDTO) (*CollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection details are required")
}
e := make(map[string]string)
if req.ID.String() == "" {
e["encrypted_name"] = "Client-side generated ID is required"
}
if req.EncryptedName == "" {
e["encrypted_name"] = "Collection name is required"
}
if req.CollectionType == "" {
e["collection_type"] = "Collection type is required"
} else if req.CollectionType != dom_collection.CollectionTypeFolder && req.CollectionType != dom_collection.CollectionTypeAlbum {
e["collection_type"] = "Collection type must be either 'folder' or 'album'"
}
// Check pointer and then content
if req.EncryptedCollectionKey == nil || req.EncryptedCollectionKey.Ciphertext == nil || len(req.EncryptedCollectionKey.Ciphertext) == 0 {
e["encrypted_collection_key"] = "Encrypted collection key ciphertext is required"
}
if req.EncryptedCollectionKey == nil || req.EncryptedCollectionKey.Nonce == nil || len(req.EncryptedCollectionKey.Nonce) == 0 {
e["encrypted_collection_key"] = "Encrypted collection key nonce is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
federateduser, err := svc.userGetByIDUseCase.Execute(ctx, userID)
if err != nil {
return nil, fmt.Errorf("Failed getting user from database: %v", err)
}
if federateduser == nil {
return nil, fmt.Errorf("User does not exist for user id: %v", userID.String())
}
//
// STEP 3: Create collection object by mapping DTO and applying server-side logic
//
now := time.Now()
// Map all fields from the request DTO to the domain object.
// This copies client-provided values including potential ID, OwnerID, timestamps, etc.
collection := mapCollectionDTOToDomain(req, userID, now)
// Apply server-side mandatory fields/overrides for the top-level collection.
// These values are managed by the backend regardless of what the client provides in the DTO.
// This ensures data integrity and reflects the server's perspective of the creation event.
collection.ID = gocql.TimeUUID() // Always generate a new ID on the server for a new creation
collection.OwnerID = userID // The authenticated user is the authoritative owner
collection.CreatedAt = now // Server timestamp for creation
collection.ModifiedAt = now // Server timestamp for modification
collection.CreatedByUserID = userID // The authenticated user is the creator
collection.ModifiedByUserID = userID // The authenticated user is the initial modifier
collection.Version = 1 // Collection creation **always** starts mutation version at 1.
collection.State = dom_collection.CollectionStateActive // Collection creation **always** starts in active state.
// Ensure owner membership exists with Admin permissions.
// Check if the owner is already present in the members list copied from the DTO.
ownerAlreadyMember := false
for i := range collection.Members { // Iterate by index to allow modification if needed
if collection.Members[i].RecipientID == userID {
// Owner is found. Ensure they have Admin permission and correct granted_by/is_inherited status.
collection.Members[i].RecipientEmail = federateduser.Email
collection.Members[i].PermissionLevel = dom_collection.CollectionPermissionAdmin
collection.Members[i].GrantedByID = userID
collection.Members[i].IsInherited = false
// NOTE: We intentionally do NOT set EncryptedCollectionKey here for the owner
// The owner accesses the collection key through their master key, not through
// the encrypted member key. This is validated in the repository layer.
collection.Members[i].EncryptedCollectionKey = nil
// Optionally update membership CreatedAt here if server should control it, otherwise keep DTO value.
// collection.Members[i].CreatedAt = now
ownerAlreadyMember = true
svc.logger.Debug("✅ Owner membership updated with Admin permissions (no encrypted key needed)")
break
}
}
// If owner is not in the members list, add their mandatory membership.
if !ownerAlreadyMember {
svc.logger.Debug("☑️ Owner is not in the members list, add their mandatory membership now")
ownerMembership := dom_collection.CollectionMembership{
ID: gocql.TimeUUID(), // Unique ID for this specific membership record
RecipientID: userID,
RecipientEmail: federateduser.Email,
CollectionID: collection.ID, // Link to the newly created collection ID
PermissionLevel: dom_collection.CollectionPermissionAdmin, // Owner must have Admin
GrantedByID: userID, // Owner implicitly grants themselves permission
IsInherited: false, // Owner membership is never inherited
CreatedAt: now, // Server timestamp for membership creation
// NOTE: EncryptedCollectionKey is intentionally nil for owner memberships
// The owner has access to the collection key through their master key
// This is validated in the repository layer which allows nil encrypted keys for owners
EncryptedCollectionKey: nil,
// InheritedFromID is nil for direct membership.
}
// Append the mandatory owner membership. If req.Members was empty, this initializes the slice.
collection.Members = append(collection.Members, ownerMembership)
svc.logger.Debug("✅ Owner membership added with Admin permissions (no encrypted key needed)")
}
svc.logger.Debug("🔍 Collection debugging info",
zap.String("collectionID", collection.ID.String()),
zap.String("collectionOwnerID", collection.OwnerID.String()),
zap.String("currentUserID", userID.String()),
zap.Int("totalMembers", len(collection.Members)),
zap.String("encryptedName", collection.EncryptedName))
for i, memberDTO := range collection.Members {
isOwner := memberDTO.RecipientID == collection.OwnerID
svc.logger.Debug("🔍 Cloud collection member DTO",
zap.Int("memberIndex", i),
zap.String("memberID", memberDTO.ID.String()),
zap.String("recipientID", memberDTO.RecipientID.String()),
zap.String("recipientEmail", validation.MaskEmail(memberDTO.RecipientEmail)),
zap.String("permissionLevel", memberDTO.PermissionLevel),
zap.Bool("isInherited", memberDTO.IsInherited),
zap.Bool("isOwner", isOwner),
zap.Int("encryptedKeyLength", len(memberDTO.EncryptedCollectionKey)))
}
// ENHANCED DEBUGGING: Log current user info for comparison
svc.logger.Debug("🔍 Current user info for comparison",
zap.String("currentUserID", federateduser.ID.String()),
zap.String("currentUserEmail", validation.MaskEmail(federateduser.Email)),
zap.String("currentUserName", federateduser.Name))
// Note: Fields like ParentID, AncestorIDs, EncryptedCollectionKey,
// EncryptedName, CollectionType, and recursively mapped Children are copied directly from the DTO
// by the mapCollectionDTOToDomain function before server overrides. This fulfills the
// prompt's requirement to copy these fields from the DTO.
//
// STEP 3.5: Look up and embed tags if TagIDs were provided
//
if len(req.TagIDs) > 0 {
svc.logger.Debug("🏷️ Looking up tags to embed in collection",
zap.Int("tagCount", len(req.TagIDs)))
var embeddedTags []tag.EmbeddedTag
for _, tagID := range req.TagIDs {
tagObj, err := svc.tagRepo.GetByID(ctx, tagID)
if err != nil {
svc.logger.Warn("Failed to get tag for embedding, skipping",
zap.String("tagID", tagID.String()),
zap.Error(err))
continue
}
if tagObj == nil {
svc.logger.Warn("Tag not found for embedding, skipping",
zap.String("tagID", tagID.String()))
continue
}
// Convert Tag to EmbeddedTag
embedded := tagObj.ToEmbeddedTag()
if embedded != nil {
embeddedTags = append(embeddedTags, *embedded)
svc.logger.Debug("🏷️ Tag embedded successfully",
zap.String("tagID", tagID.String()))
}
}
collection.Tags = embeddedTags
svc.logger.Debug("🏷️ Tags embedded in collection",
zap.Int("embeddedCount", len(embeddedTags)))
}
//
// STEP 4: Create collection in repository
//
if err := svc.repo.Create(ctx, collection); err != nil {
svc.logger.Error("Failed to create collection",
zap.Any("error", err),
zap.Any("owner_id", collection.OwnerID),
zap.String("name", collection.EncryptedName))
return nil, err
}
//
// STEP 5: Map domain model to response DTO
//
// The mapCollectionToDTO helper is used here to convert the created domain object back
// into the response DTO format, potentially excluding sensitive fields like keys
// or specific membership details not meant for the general response.
response := mapCollectionToDTO(collection, 0, federateduser.Email)
svc.logger.Debug("Collection created successfully",
zap.Any("collection_id", collection.ID),
zap.Any("owner_id", collection.OwnerID))
return response, nil
}

View file

@ -0,0 +1,113 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/find_by_parent.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type FindByParentRequestDTO struct {
ParentID gocql.UUID `json:"parent_id"`
}
type FindCollectionsByParentService interface {
Execute(ctx context.Context, req *FindByParentRequestDTO) (*CollectionsResponseDTO, error)
}
type findCollectionsByParentServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewFindCollectionsByParentService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) FindCollectionsByParentService {
logger = logger.Named("FindCollectionsByParentService")
return &findCollectionsByParentServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *findCollectionsByParentServiceImpl) Execute(ctx context.Context, req *FindByParentRequestDTO) (*CollectionsResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Parent ID is required")
}
if req.ParentID.String() == "" {
svc.logger.Warn("Empty parent ID provided")
return nil, httperror.NewForBadRequestWithSingleField("parent_id", "Parent ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if user has access to the parent collection
//
hasAccess, err := svc.repo.CheckAccess(ctx, req.ParentID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check access",
zap.Any("error", err),
zap.Any("parent_id", req.ParentID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized parent collection access attempt",
zap.Any("user_id", userID),
zap.Any("parent_id", req.ParentID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have access to this parent collection")
}
//
// STEP 4: Find collections by parent
//
collections, err := svc.repo.FindByParent(ctx, req.ParentID)
if err != nil {
svc.logger.Error("Failed to find collections by parent",
zap.Any("error", err),
zap.Any("parent_id", req.ParentID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &CollectionsResponseDTO{
Collections: make([]*CollectionResponseDTO, len(collections)),
}
for i, collection := range collections {
ownerEmail := getOwnerEmailFromMembers(collection)
response.Collections[i] = mapCollectionToDTO(collection, 0, ownerEmail)
}
svc.logger.Debug("Found collections by parent",
zap.Int("count", len(collections)),
zap.Any("parent_id", req.ParentID))
return response, nil
}

View file

@ -0,0 +1,96 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/find_root_collections.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
)
type FindRootCollectionsService interface {
Execute(ctx context.Context) (*CollectionsResponseDTO, error)
}
type findRootCollectionsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewFindRootCollectionsService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) FindRootCollectionsService {
logger = logger.Named("FindRootCollectionsService")
return &findRootCollectionsServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *findRootCollectionsServiceImpl) Execute(ctx context.Context) (*CollectionsResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, nil
}
//
// STEP 2: Find root collections for the user
//
collections, err := svc.repo.FindRootCollections(ctx, userID)
if err != nil {
svc.logger.Error("Failed to find root collections",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
//
// STEP 3: Filter collections based on permission levels and map to DTOs
//
// Filter out collections where the user doesn't have at least read_only permission
collectionsWithPermission := make([]*CollectionResponseDTO, 0, len(collections))
for _, collection := range collections {
// Check if user has at least read_only permission for this collection
hasAccess, err := svc.repo.CheckAccess(ctx, collection.ID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Warn("Failed to check collection access for root collection, skipping",
zap.Error(err),
zap.Any("collection_id", collection.ID),
zap.Any("user_id", userID))
continue // Skip collections where we can't verify access
}
if hasAccess {
ownerEmail := getOwnerEmailFromMembers(collection)
collectionsWithPermission = append(collectionsWithPermission,
mapCollectionToDTO(collection, 0, ownerEmail))
} else {
svc.logger.Debug("User lacks permission for root collection, filtering out",
zap.Any("collection_id", collection.ID),
zap.Any("user_id", userID))
}
}
response := &CollectionsResponseDTO{
Collections: collectionsWithPermission,
}
svc.logger.Debug("Found root collections",
zap.Int("count", len(collections)),
zap.Any("user_id", userID))
return response, nil
}

View file

@ -0,0 +1,199 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/get.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/ratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type GetCollectionService interface {
Execute(ctx context.Context, collectionID gocql.UUID) (*CollectionResponseDTO, error)
}
type getCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
userGetByIDUseCase uc_user.UserGetByIDUseCase
authFailureRateLimiter ratelimit.AuthFailureRateLimiter
}
func NewGetCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
authFailureRateLimiter ratelimit.AuthFailureRateLimiter,
) GetCollectionService {
logger = logger.Named("GetCollectionService")
return &getCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
userGetByIDUseCase: userGetByIDUseCase,
authFailureRateLimiter: authFailureRateLimiter,
}
}
func (svc *getCollectionServiceImpl) Execute(ctx context.Context, collectionID gocql.UUID) (*CollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if collectionID.String() == "" {
svc.logger.Warn("Empty collection ID provided")
return nil, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get collection from repository
//
collection, err := svc.repo.Get(ctx, collectionID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", collectionID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", collectionID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection not found")
}
//
// STEP 4: Check rate limiting for authorization failures
//
// Check if user has exceeded authorization failure limits before checking access
// This helps prevent privilege escalation attempts
if svc.authFailureRateLimiter != nil {
allowed, remainingAttempts, resetTime, err := svc.authFailureRateLimiter.CheckAuthFailure(
ctx,
userID.String(),
collectionID.String(),
"collection:get")
if err != nil {
// Log error but continue - fail open for availability
svc.logger.Error("Failed to check auth failure rate limit",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID))
} else if !allowed {
svc.logger.Warn("User blocked due to excessive authorization failures",
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID),
zap.Int("remaining_attempts", remainingAttempts),
zap.Time("reset_time", resetTime))
return nil, httperror.NewTooManyRequestsError(
"Too many authorization failures. Please try again later")
}
}
//
// STEP 5: Check if the user has access to this collection
//
// Use CheckAccess to verify both access and permission level
// For GET operations, read_only permission is sufficient
hasAccess, err := svc.repo.CheckAccess(ctx, collectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID))
return nil, httperror.NewInternalServerError("Failed to check collection access")
}
if !hasAccess {
// Record authorization failure for rate limiting
if svc.authFailureRateLimiter != nil {
if err := svc.authFailureRateLimiter.RecordAuthFailure(
ctx,
userID.String(),
collectionID.String(),
"collection:get",
"insufficient_permission"); err != nil {
svc.logger.Error("Failed to record auth failure",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID))
}
}
svc.logger.Warn("Unauthorized collection access attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID),
zap.String("required_permission", dom_collection.CollectionPermissionReadOnly))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have access to this collection")
}
// Record successful authorization
if svc.authFailureRateLimiter != nil {
if err := svc.authFailureRateLimiter.RecordAuthSuccess(
ctx,
userID.String(),
collectionID.String(),
"collection:get"); err != nil {
svc.logger.Debug("Failed to record auth success",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", collectionID))
}
}
//
// STEP 5: Get owner's email
//
var ownerEmail string
svc.logger.Info("🔍 GetCollectionService: Looking up owner email",
zap.String("collection_id", collectionID.String()),
zap.String("owner_id", collection.OwnerID.String()))
owner, err := svc.userGetByIDUseCase.Execute(ctx, collection.OwnerID)
if err != nil {
svc.logger.Warn("Failed to get owner email, continuing without it",
zap.Any("error", err),
zap.Any("owner_id", collection.OwnerID))
// Don't fail the request, just continue without the owner email
} else if owner != nil {
ownerEmail = owner.Email
svc.logger.Info("🔍 GetCollectionService: Found owner email",
zap.String("owner_email", validation.MaskEmail(ownerEmail)))
} else {
svc.logger.Warn("🔍 GetCollectionService: Owner user not found",
zap.String("owner_id", collection.OwnerID.String()))
}
//
// STEP 6: Map domain model to response DTO
//
// Note: We pass collection.FileCount (not 0) to include the actual file count
// in the response. This field is maintained by IncrementFileCount/DecrementFileCount
// calls when files are added/removed from the collection.
//
svc.logger.Info("🔍 GetCollectionService: Mapping to DTO with owner_email",
zap.String("owner_email", validation.MaskEmail(ownerEmail)))
response := mapCollectionToDTO(collection, int(collection.FileCount), ownerEmail)
return response, nil
}

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/get_filtered.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetFilteredCollectionsRequestDTO struct {
IncludeOwned bool `json:"include_owned"`
IncludeShared bool `json:"include_shared"`
}
type FilteredCollectionsResponseDTO struct {
OwnedCollections []*CollectionResponseDTO `json:"owned_collections"`
SharedCollections []*CollectionResponseDTO `json:"shared_collections"`
TotalCount int `json:"total_count"`
}
type GetFilteredCollectionsService interface {
Execute(ctx context.Context, req *GetFilteredCollectionsRequestDTO) (*FilteredCollectionsResponseDTO, error)
}
type getFilteredCollectionsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewGetFilteredCollectionsService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) GetFilteredCollectionsService {
logger = logger.Named("GetFilteredCollectionsService")
return &getFilteredCollectionsServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *getFilteredCollectionsServiceImpl) Execute(ctx context.Context, req *GetFilteredCollectionsRequestDTO) (*FilteredCollectionsResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
e := make(map[string]string)
if !req.IncludeOwned && !req.IncludeShared {
e["filter_options"] = "At least one filter option (include_owned or include_shared) must be enabled"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Create filter options
//
filterOptions := dom_collection.CollectionFilterOptions{
IncludeOwned: req.IncludeOwned,
IncludeShared: req.IncludeShared,
UserID: userID,
}
//
// STEP 4: Get filtered collections from repository
//
result, err := svc.repo.GetCollectionsWithFilter(ctx, filterOptions)
if err != nil {
svc.logger.Error("Failed to get filtered collections",
zap.Any("error", err),
zap.Any("user_id", userID),
zap.Any("filter_options", filterOptions))
return nil, err
}
//
// STEP 5: Filter collections based on permission levels and map to DTOs
//
// For owned collections, the owner always has admin permission
ownedCollectionsWithPermission := make([]*CollectionResponseDTO, 0, len(result.OwnedCollections))
for _, collection := range result.OwnedCollections {
// Owner always has full access, no need to check permission
ownerEmail := getOwnerEmailFromMembers(collection)
ownedCollectionsWithPermission = append(ownedCollectionsWithPermission,
mapCollectionToDTO(collection, int(collection.FileCount), ownerEmail))
}
// For shared collections, verify the user has at least read_only permission
sharedCollectionsWithPermission := make([]*CollectionResponseDTO, 0, len(result.SharedCollections))
for _, collection := range result.SharedCollections {
// Check if user has at least read_only permission for this shared collection
hasAccess, err := svc.repo.CheckAccess(ctx, collection.ID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Warn("Failed to check collection access, skipping collection",
zap.Error(err),
zap.Any("collection_id", collection.ID),
zap.Any("user_id", userID))
continue // Skip collections where we can't verify access
}
if hasAccess {
ownerEmail := getOwnerEmailFromMembers(collection)
sharedCollectionsWithPermission = append(sharedCollectionsWithPermission,
mapCollectionToDTO(collection, int(collection.FileCount), ownerEmail))
} else {
svc.logger.Debug("User lacks permission for shared collection, filtering out",
zap.Any("collection_id", collection.ID),
zap.Any("user_id", userID))
}
}
response := &FilteredCollectionsResponseDTO{
OwnedCollections: ownedCollectionsWithPermission,
SharedCollections: sharedCollectionsWithPermission,
TotalCount: len(ownedCollectionsWithPermission) + len(sharedCollectionsWithPermission),
}
svc.logger.Debug("Retrieved filtered collections successfully",
zap.Int("owned_count", len(response.OwnedCollections)),
zap.Int("shared_count", len(response.SharedCollections)),
zap.Int("total_count", response.TotalCount),
zap.Any("user_id", userID))
return response, nil
}

View file

@ -0,0 +1,94 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/get.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetCollectionSyncDataService interface {
Execute(ctx context.Context, userID gocql.UUID, cursor *dom_collection.CollectionSyncCursor, limit int64, accessType string) (*dom_collection.CollectionSyncResponse, error)
}
type getCollectionSyncDataServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getCollectionSyncDataUseCase uc_collection.GetCollectionSyncDataUseCase
}
func NewGetCollectionSyncDataService(
config *config.Configuration,
logger *zap.Logger,
getCollectionSyncDataUseCase uc_collection.GetCollectionSyncDataUseCase,
) GetCollectionSyncDataService {
logger = logger.Named("GetCollectionSyncDataService")
return &getCollectionSyncDataServiceImpl{
config: config,
logger: logger,
getCollectionSyncDataUseCase: getCollectionSyncDataUseCase,
}
}
func (svc *getCollectionSyncDataServiceImpl) Execute(ctx context.Context, userID gocql.UUID, cursor *dom_collection.CollectionSyncCursor, limit int64, accessType string) (*dom_collection.CollectionSyncResponse, error) {
//
// STEP 1: Validation
//
if userID.String() == "" {
svc.logger.Warn("Empty user ID provided")
return nil, httperror.NewForBadRequestWithSingleField("user_id", "User ID is required")
}
//
// STEP 2: Verify user ID from context matches the parameter
//
sessionUserID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
// Ensure the user can only get their own sync data
if sessionUserID != userID {
svc.logger.Warn("User trying to access another user's sync data",
zap.Any("session_user_id", sessionUserID),
zap.Any("requested_user_id", userID))
return nil, httperror.NewForForbiddenWithSingleField("message", "Cannot access other user's sync data")
}
//
// STEP 3: Get sync data based on access type
//
// Note: The use case will handle filtering collections based on the user's access
// It returns only collections the user owns or has been granted access to
syncData, err := svc.getCollectionSyncDataUseCase.Execute(ctx, userID, cursor, limit, accessType)
if err != nil {
svc.logger.Error("Failed to get collection sync data",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
if syncData == nil {
svc.logger.Debug("Collection sync data not found",
zap.Any("user_id", userID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection sync results not found")
}
// Note: Access control is already handled by the use case
// It only returns collections the user has access to
// No need to check individual collection access here
svc.logger.Debug("Collection sync data successfully retrieved",
zap.Any("user_id", userID),
zap.Int("collections_returned", len(syncData.Collections)))
return syncData, nil
}

View file

@ -0,0 +1,106 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/list_by_user.go
package collection
import (
"context"
"errors"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
)
type CollectionsResponseDTO struct {
Collections []*CollectionResponseDTO `json:"collections"`
}
type ListUserCollectionsService interface {
Execute(ctx context.Context) (*CollectionsResponseDTO, error)
}
type listUserCollectionsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
fileRepo dom_file.FileMetadataRepository
}
func NewListUserCollectionsService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
) ListUserCollectionsService {
logger = logger.Named("ListUserCollectionsService")
return &listUserCollectionsServiceImpl{
config: config,
logger: logger,
repo: repo,
fileRepo: fileRepo,
}
}
func (svc *listUserCollectionsServiceImpl) Execute(ctx context.Context) (*CollectionsResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, errors.New("user ID not found in context")
}
//
// STEP 2: Get user's owned collections from repository
//
filterResult, err := svc.repo.GetCollectionsWithFilter(ctx, dom_collection.CollectionFilterOptions{
UserID: userID,
IncludeOwned: true,
IncludeShared: false, // Only include owned collections for "My Folders"
})
if err != nil {
svc.logger.Error("Failed to get user collections",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
collections := filterResult.GetAllCollections()
//
// STEP 3: Deduplicate collections (user might be both owner and member)
//
seen := make(map[string]bool)
uniqueCollections := make([]*dom_collection.Collection, 0, len(collections))
for _, collection := range collections {
collectionIDStr := collection.ID.String()
if !seen[collectionIDStr] {
seen[collectionIDStr] = true
uniqueCollections = append(uniqueCollections, collection)
}
}
//
// STEP 4: Map domain models to response DTOs with file counts
//
response := &CollectionsResponseDTO{
Collections: make([]*CollectionResponseDTO, len(uniqueCollections)),
}
for i, collection := range uniqueCollections {
// Use the file count stored in the collection itself (no N+1 query)
ownerEmail := getOwnerEmailFromMembers(collection)
response.Collections[i] = mapCollectionToDTO(collection, int(collection.FileCount), ownerEmail)
}
svc.logger.Debug("Retrieved user collections",
zap.Int("total_count", len(collections)),
zap.Int("unique_count", len(uniqueCollections)),
zap.Any("user_id", userID))
return response, nil
}

View file

@ -0,0 +1,111 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/list_shared_with_user.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
)
type ListSharedCollectionsService interface {
Execute(ctx context.Context) (*CollectionsResponseDTO, error)
}
type listSharedCollectionsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
fileRepo dom_file.FileMetadataRepository
}
func NewListSharedCollectionsService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
) ListSharedCollectionsService {
logger = logger.Named("ListSharedCollectionsService")
return &listSharedCollectionsServiceImpl{
config: config,
logger: logger,
repo: repo,
fileRepo: fileRepo,
}
}
func (svc *listSharedCollectionsServiceImpl) Execute(ctx context.Context) (*CollectionsResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, nil
}
svc.logger.Info("🔍 LIST SHARED COLLECTIONS: Starting",
zap.String("user_id", userID.String()))
//
// STEP 2: Get collections shared with the user
//
collections, err := svc.repo.GetCollectionsSharedWithUser(ctx, userID)
if err != nil {
svc.logger.Error("🔍 LIST SHARED COLLECTIONS: Failed to get shared collections",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
svc.logger.Info("🔍 LIST SHARED COLLECTIONS: Query completed",
zap.String("user_id", userID.String()),
zap.Int("collections_found", len(collections)))
//
// STEP 3: Filter out collections where user is the owner
// (Only show collections shared BY others, not collections user owns and shared with themselves)
//
var sharedByOthers []*dom_collection.Collection
for _, collection := range collections {
if collection.OwnerID != userID {
sharedByOthers = append(sharedByOthers, collection)
svc.logger.Debug("🔍 LIST SHARED COLLECTIONS: Including collection shared by another user",
zap.String("collection_id", collection.ID.String()),
zap.String("owner_id", collection.OwnerID.String()))
} else {
svc.logger.Debug("🔍 LIST SHARED COLLECTIONS: Excluding self-owned collection",
zap.String("collection_id", collection.ID.String()),
zap.String("owner_id", collection.OwnerID.String()))
}
}
svc.logger.Info("🔍 LIST SHARED COLLECTIONS: Filtered collections",
zap.Int("total_collections", len(collections)),
zap.Int("shared_by_others", len(sharedByOthers)),
zap.Int("excluded_self_owned", len(collections)-len(sharedByOthers)))
//
// STEP 4: Map domain models to response DTOs
//
response := &CollectionsResponseDTO{
Collections: make([]*CollectionResponseDTO, len(sharedByOthers)),
}
for i, collection := range sharedByOthers {
// Use the file count stored in the collection itself (no N+1 query)
ownerEmail := getOwnerEmailFromMembers(collection)
response.Collections[i] = mapCollectionToDTO(collection, int(collection.FileCount), ownerEmail)
}
svc.logger.Info("🔍 LIST SHARED COLLECTIONS: Completed successfully",
zap.Int("count", len(sharedByOthers)),
zap.String("user_id", userID.String()))
return response, nil
}

View file

@ -0,0 +1,153 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/move_collection.go
package collection
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type MoveCollectionRequestDTO struct {
CollectionID gocql.UUID `json:"collection_id"`
NewParentID gocql.UUID `json:"new_parent_id"`
UpdatedAncestors []gocql.UUID `json:"updated_ancestors"`
UpdatedPathSegments []string `json:"updated_path_segments"`
}
type MoveCollectionResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type MoveCollectionService interface {
Execute(ctx context.Context, req *MoveCollectionRequestDTO) (*MoveCollectionResponseDTO, error)
}
type moveCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewMoveCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) MoveCollectionService {
logger = logger.Named("MoveCollectionService")
return &moveCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *moveCollectionServiceImpl) Execute(ctx context.Context, req *MoveCollectionRequestDTO) (*MoveCollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Move details are required")
}
e := make(map[string]string)
if req.CollectionID.String() == "" {
e["collection_id"] = "Collection ID is required"
}
if req.NewParentID.String() == "" {
e["new_parent_id"] = "New parent ID is required"
}
if len(req.UpdatedAncestors) == 0 {
e["updated_ancestors"] = "Updated ancestors are required"
}
if len(req.UpdatedPathSegments) == 0 {
e["updated_path_segments"] = "Updated path segments are required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if user has write access to the collection
//
hasAccess, err := svc.repo.CheckAccess(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized collection move attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to move this collection")
}
//
// STEP 4: Check if user has write access to the new parent
//
hasParentAccess, err := svc.repo.CheckAccess(ctx, req.NewParentID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check access to new parent",
zap.Any("error", err),
zap.Any("new_parent_id", req.NewParentID),
zap.Any("user_id", userID))
return nil, err
}
if !hasParentAccess {
svc.logger.Warn("Unauthorized destination parent access",
zap.Any("user_id", userID),
zap.Any("new_parent_id", req.NewParentID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to move to this destination")
}
//
// STEP 5: Move the collection
//
err = svc.repo.MoveCollection(
ctx,
req.CollectionID,
req.NewParentID,
req.UpdatedAncestors,
req.UpdatedPathSegments,
)
if err != nil {
svc.logger.Error("Failed to move collection",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("new_parent_id", req.NewParentID))
return nil, err
}
svc.logger.Info("Collection moved successfully",
zap.Any("collection_id", req.CollectionID),
zap.Any("new_parent_id", req.NewParentID))
return &MoveCollectionResponseDTO{
Success: true,
Message: "Collection moved successfully",
}, nil
}

View file

@ -0,0 +1,170 @@
package collection
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/ratelimit"
)
// Wire providers for collection services
func ProvideCreateCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
repo dom_collection.CollectionRepository,
tagRepo dom_tag.Repository,
) CreateCollectionService {
return NewCreateCollectionService(cfg, logger, userGetByIDUseCase, repo, tagRepo)
}
func ProvideGetCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
authFailureRateLimiter ratelimit.AuthFailureRateLimiter,
) GetCollectionService {
return NewGetCollectionService(cfg, logger, repo, userGetByIDUseCase, authFailureRateLimiter)
}
func ProvideUpdateCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
authFailureRateLimiter ratelimit.AuthFailureRateLimiter,
) UpdateCollectionService {
return NewUpdateCollectionService(cfg, logger, repo, authFailureRateLimiter)
}
func ProvideSoftDeleteCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
getCollectionUseCase uc_collection.GetCollectionUseCase,
updateCollectionUseCase uc_collection.UpdateCollectionUseCase,
hardDeleteCollectionUseCase uc_collection.HardDeleteCollectionUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteCollectionService {
return NewSoftDeleteCollectionService(
cfg,
logger,
repo,
fileRepo,
getCollectionUseCase,
updateCollectionUseCase,
hardDeleteCollectionUseCase,
deleteMultipleDataUseCase,
storageQuotaHelperUseCase,
createStorageUsageEventUseCase,
updateStorageUsageUseCase,
)
}
func ProvideArchiveCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) ArchiveCollectionService {
return NewArchiveCollectionService(cfg, logger, repo)
}
func ProvideRestoreCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) RestoreCollectionService {
return NewRestoreCollectionService(cfg, logger, repo)
}
func ProvideListUserCollectionsService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
) ListUserCollectionsService {
return NewListUserCollectionsService(cfg, logger, repo, fileRepo)
}
func ProvideListSharedCollectionsService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
) ListSharedCollectionsService {
return NewListSharedCollectionsService(cfg, logger, repo, fileRepo)
}
func ProvideFindRootCollectionsService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) FindRootCollectionsService {
return NewFindRootCollectionsService(cfg, logger, repo)
}
func ProvideFindCollectionsByParentService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) FindCollectionsByParentService {
return NewFindCollectionsByParentService(cfg, logger, repo)
}
func ProvideGetCollectionSyncDataService(
cfg *config.Configuration,
logger *zap.Logger,
getCollectionSyncDataUseCase uc_collection.GetCollectionSyncDataUseCase,
) GetCollectionSyncDataService {
return NewGetCollectionSyncDataService(cfg, logger, getCollectionSyncDataUseCase)
}
func ProvideMoveCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) MoveCollectionService {
return NewMoveCollectionService(cfg, logger, repo)
}
func ProvideGetFilteredCollectionsService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) GetFilteredCollectionsService {
return NewGetFilteredCollectionsService(cfg, logger, repo)
}
func ProvideShareCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
checkBlockedEmailUC uc_blockedemail.CheckBlockedEmailUseCase,
userGetByIDUC uc_user.UserGetByIDUseCase,
emailer mailgun.Emailer,
) ShareCollectionService {
return NewShareCollectionService(cfg, logger, repo, checkBlockedEmailUC, userGetByIDUC, emailer)
}
func ProvideRemoveMemberService(
cfg *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) RemoveMemberService {
return NewRemoveMemberService(cfg, logger, repo)
}

View file

@ -0,0 +1,183 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/remove_member.go
package collection
import (
"context"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"github.com/gocql/gocql"
)
type RemoveMemberRequestDTO struct {
CollectionID gocql.UUID `json:"collection_id"`
RecipientID gocql.UUID `json:"recipient_id"`
RemoveFromDescendants bool `json:"remove_from_descendants"`
}
type RemoveMemberResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type RemoveMemberService interface {
Execute(ctx context.Context, req *RemoveMemberRequestDTO) (*RemoveMemberResponseDTO, error)
}
type removeMemberServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewRemoveMemberService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) RemoveMemberService {
logger = logger.Named("RemoveMemberService")
return &removeMemberServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *removeMemberServiceImpl) Execute(ctx context.Context, req *RemoveMemberRequestDTO) (*RemoveMemberResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Remove member details are required")
}
e := make(map[string]string)
if req.CollectionID.String() == "" {
e["collection_id"] = "Collection ID is required"
}
if req.RecipientID.String() == "" {
e["recipient_id"] = "Recipient ID is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if user has admin access to the collection
//
hasAccess, err := svc.repo.CheckAccess(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionAdmin)
if err != nil {
svc.logger.Error("Failed to check access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
// Collection owners and admin members can remove members
if !hasAccess {
isOwner, _ := svc.repo.IsCollectionOwner(ctx, req.CollectionID, userID)
if !isOwner {
svc.logger.Warn("Unauthorized member removal attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to remove members from this collection")
}
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("remove-member", svc.logger)
//
// STEP 4: Retrieve the membership before removing (needed for compensation)
//
existingMembership, err := svc.repo.GetCollectionMembership(ctx, req.CollectionID, req.RecipientID)
if err != nil {
svc.logger.Error("Failed to get collection membership",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID))
return nil, err
}
if existingMembership == nil {
svc.logger.Debug("Member not found in collection",
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Member not found in this collection")
}
//
// STEP 5: Remove the member
//
var err2 error
if req.RemoveFromDescendants {
err2 = svc.repo.RemoveMemberFromHierarchy(ctx, req.CollectionID, req.RecipientID)
} else {
err2 = svc.repo.RemoveMember(ctx, req.CollectionID, req.RecipientID)
}
if err2 != nil {
svc.logger.Error("Failed to remove member",
zap.Any("error", err2),
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID),
zap.Bool("remove_from_descendants", req.RemoveFromDescendants))
saga.Rollback(ctx) // Rollback any previous operations
return nil, err2
}
//
// SAGA: Register compensation to re-add the member if needed
// IMPORTANT: Capture by value for closure
//
membershipCaptured := existingMembership
collectionIDCaptured := req.CollectionID
removeFromDescendantsCaptured := req.RemoveFromDescendants
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-adding member to collection",
zap.String("collection_id", collectionIDCaptured.String()),
zap.String("recipient_id", membershipCaptured.RecipientID.String()),
zap.Bool("add_to_descendants", removeFromDescendantsCaptured))
if removeFromDescendantsCaptured {
// Re-add to hierarchy if it was removed from hierarchy
return svc.repo.AddMemberToHierarchy(ctx, collectionIDCaptured, membershipCaptured)
}
// Re-add to single collection if it was removed from single collection
return svc.repo.AddMember(ctx, collectionIDCaptured, membershipCaptured)
})
svc.logger.Info("Member removed successfully",
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID),
zap.Bool("removed_from_descendants", req.RemoveFromDescendants))
return &RemoveMemberResponseDTO{
Success: true,
Message: "Member removed successfully",
}, nil
}

View file

@ -0,0 +1,135 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/restore.go
package collection
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RestoreCollectionRequestDTO struct {
ID gocql.UUID `json:"id"`
}
type RestoreCollectionResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type RestoreCollectionService interface {
Execute(ctx context.Context, req *RestoreCollectionRequestDTO) (*RestoreCollectionResponseDTO, error)
}
type restoreCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
}
func NewRestoreCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
) RestoreCollectionService {
logger = logger.Named("RestoreCollectionService")
return &restoreCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
}
}
func (svc *restoreCollectionServiceImpl) Execute(ctx context.Context, req *RestoreCollectionRequestDTO) (*RestoreCollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection ID is required")
}
if req.ID.String() == "" {
svc.logger.Warn("Empty collection ID")
return nil, httperror.NewForBadRequestWithSingleField("id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Retrieve existing collection (including non-active states for restoration)
//
collection, err := svc.repo.Get(ctx, req.ID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", req.ID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection not found")
}
//
// STEP 4: Check if user has rights to restore this collection
//
if collection.OwnerID != userID {
svc.logger.Warn("Unauthorized collection restore attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
return nil, httperror.NewForForbiddenWithSingleField("message", "Only the collection owner can restore a collection")
}
//
// STEP 5: Validate state transition
//
err = dom_collection.IsValidStateTransition(collection.State, dom_collection.CollectionStateActive)
if err != nil {
svc.logger.Warn("Invalid state transition for collection restore",
zap.Any("collection_id", req.ID),
zap.String("current_state", collection.State),
zap.String("target_state", dom_collection.CollectionStateActive),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Restore the collection
//
collection.State = dom_collection.CollectionStateActive
collection.Version++ // Update mutation means we increment version.
collection.ModifiedAt = time.Now()
collection.ModifiedByUserID = userID
err = svc.repo.Update(ctx, collection)
if err != nil {
svc.logger.Error("Failed to restore collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
svc.logger.Info("Collection restored successfully",
zap.Any("collection_id", req.ID),
zap.Any("user_id", userID))
return &RestoreCollectionResponseDTO{
Success: true,
Message: "Collection restored successfully",
}, nil
}

View file

@ -0,0 +1,406 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/share_collection.go
package collection
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_blockedemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/blockedemail"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
"github.com/gocql/gocql"
)
type ShareCollectionRequestDTO struct {
CollectionID gocql.UUID `json:"collection_id"`
RecipientID gocql.UUID `json:"recipient_id"`
RecipientEmail string `json:"recipient_email"`
PermissionLevel string `json:"permission_level"`
EncryptedCollectionKey []byte `json:"encrypted_collection_key"`
ShareWithDescendants bool `json:"share_with_descendants"`
}
type ShareCollectionResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
MembershipsCreated int `json:"memberships_created,omitempty"`
}
type ShareCollectionService interface {
Execute(ctx context.Context, req *ShareCollectionRequestDTO) (*ShareCollectionResponseDTO, error)
}
type shareCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
checkBlockedEmailUC uc_blockedemail.CheckBlockedEmailUseCase
userGetByIDUC uc_user.UserGetByIDUseCase
emailer mailgun.Emailer
}
func NewShareCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
checkBlockedEmailUC uc_blockedemail.CheckBlockedEmailUseCase,
userGetByIDUC uc_user.UserGetByIDUseCase,
emailer mailgun.Emailer,
) ShareCollectionService {
logger = logger.Named("ShareCollectionService")
return &shareCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
checkBlockedEmailUC: checkBlockedEmailUC,
userGetByIDUC: userGetByIDUC,
emailer: emailer,
}
}
func (svc *shareCollectionServiceImpl) Execute(ctx context.Context, req *ShareCollectionRequestDTO) (*ShareCollectionResponseDTO, error) {
//
// STEP 1: Enhanced Validation with Detailed Logging
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewBadRequestError("Share details are required")
}
// Log the incoming request for debugging (PII masked for security)
svc.logger.Debug("received share collection request",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.String("recipient_email", validation.MaskEmail(req.RecipientEmail)),
zap.String("permission_level", req.PermissionLevel),
zap.Int("encrypted_key_length", len(req.EncryptedCollectionKey)),
zap.Bool("share_with_descendants", req.ShareWithDescendants))
e := make(map[string]string)
if req.CollectionID.String() == "" {
e["collection_id"] = "Collection ID is required"
}
if req.RecipientID.String() == "" {
e["recipient_id"] = "Recipient ID is required"
}
if req.RecipientEmail == "" {
e["recipient_email"] = "Recipient email is required"
}
if req.PermissionLevel == "" {
// Will default to read-only in repository
} else if req.PermissionLevel != dom_collection.CollectionPermissionReadOnly &&
req.PermissionLevel != dom_collection.CollectionPermissionReadWrite &&
req.PermissionLevel != dom_collection.CollectionPermissionAdmin {
e["permission_level"] = "Invalid permission level"
}
// CRITICAL: Validate encrypted collection key is present and has valid format
// Note: We use generic error messages to avoid revealing cryptographic implementation details
const (
minEncryptedKeySize = 32 // Minimum expected size for encrypted key
maxEncryptedKeySize = 1024 // Maximum reasonable size to prevent abuse
)
if len(req.EncryptedCollectionKey) == 0 {
svc.logger.Error("encrypted collection key validation failed",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.Int("encrypted_key_length", len(req.EncryptedCollectionKey)))
e["encrypted_collection_key"] = "Encrypted collection key is required"
} else if len(req.EncryptedCollectionKey) < minEncryptedKeySize || len(req.EncryptedCollectionKey) > maxEncryptedKeySize {
// Generic error message - don't reveal size expectations
svc.logger.Error("encrypted collection key has invalid size",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.Int("encrypted_key_length", len(req.EncryptedCollectionKey)))
e["encrypted_collection_key"] = "Encrypted collection key is invalid"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewValidationError(e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewInternalServerError("Authentication context error")
}
//
// STEP 3: Retrieve existing collection
//
collection, err := svc.repo.Get(ctx, req.CollectionID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewNotFoundError("Collection")
}
//
// STEP 4: Check if user has rights to share this collection
//
hasSharePermission := false
// Owner always has share permission
if collection.OwnerID == userID {
hasSharePermission = true
} else {
// Check if user is an admin member
for _, member := range collection.Members {
if member.RecipientID == userID && member.PermissionLevel == dom_collection.CollectionPermissionAdmin {
hasSharePermission = true
break
}
}
}
if !hasSharePermission {
svc.logger.Warn("Unauthorized collection sharing attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForbiddenError("You don't have permission to share this collection")
}
//
// STEP 5: Validate that we're not sharing with the owner (redundant)
//
if req.RecipientID == collection.OwnerID {
svc.logger.Warn("Attempt to share collection with its owner",
zap.String("collection_id", req.CollectionID.String()),
zap.String("owner_id", collection.OwnerID.String()),
zap.String("recipient_id", req.RecipientID.String()))
return nil, httperror.NewValidationError(map[string]string{"recipient_id": "Cannot share collection with its owner"})
}
//
// STEP 5.5: Check if the recipient has blocked the sender
//
// Get the sender's email by looking up the user
sender, err := svc.userGetByIDUC.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get sender user info",
zap.Any("error", err),
zap.String("user_id", userID.String()))
// Don't block the sharing if we can't get user info - continue without check
} else if sender != nil && sender.Email != "" {
isBlocked, err := svc.checkBlockedEmailUC.Execute(ctx, req.RecipientID, sender.Email)
if err != nil {
svc.logger.Error("Failed to check blocked email status",
zap.Any("error", err),
zap.String("recipient_id", req.RecipientID.String()),
zap.String("sender_email", validation.MaskEmail(sender.Email)))
// Don't block the sharing if we can't check - log and continue
} else if isBlocked {
svc.logger.Info("Sharing blocked by recipient",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.String("sender_email", validation.MaskEmail(sender.Email)))
return nil, httperror.NewForbiddenError("Unable to share with this user. You may have been blocked.")
}
}
//
// STEP 6: Create membership with EXPLICIT validation
//
svc.logger.Info("creating membership with validated encrypted key",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.Int("encrypted_key_length", len(req.EncryptedCollectionKey)),
zap.String("permission_level", req.PermissionLevel))
membership := &dom_collection.CollectionMembership{
ID: gocql.TimeUUID(),
CollectionID: req.CollectionID,
RecipientID: req.RecipientID,
RecipientEmail: req.RecipientEmail,
GrantedByID: userID,
EncryptedCollectionKey: req.EncryptedCollectionKey, // This should NEVER be nil for shared members
PermissionLevel: req.PermissionLevel,
CreatedAt: time.Now(),
IsInherited: false,
}
// DOUBLE-CHECK: Verify the membership has the encrypted key before proceeding
if len(membership.EncryptedCollectionKey) == 0 {
svc.logger.Error("CRITICAL: Membership created without encrypted collection key",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.String("membership_id", membership.ID.String()))
return nil, httperror.NewInternalServerError("Failed to create membership with encrypted key")
}
svc.logger.Info("membership created successfully with encrypted key",
zap.String("collection_id", req.CollectionID.String()),
zap.String("recipient_id", req.RecipientID.String()),
zap.String("membership_id", membership.ID.String()),
zap.Int("encrypted_key_length", len(membership.EncryptedCollectionKey)))
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("share-collection", svc.logger)
//
// STEP 7: Add membership to collection
//
var membershipsCreated int = 1
if req.ShareWithDescendants {
// Add member to collection and all descendants
err = svc.repo.AddMemberToHierarchy(ctx, req.CollectionID, membership)
if err != nil {
svc.logger.Error("Failed to add member to collection hierarchy",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID))
saga.Rollback(ctx) // Rollback any previous operations
return nil, err
}
// SAGA: Register compensation for hierarchical membership addition
// IMPORTANT: Capture by value for closure
collectionIDCaptured := req.CollectionID
recipientIDCaptured := req.RecipientID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: removing member from collection hierarchy",
zap.String("collection_id", collectionIDCaptured.String()),
zap.String("recipient_id", recipientIDCaptured.String()))
return svc.repo.RemoveMemberFromHierarchy(ctx, collectionIDCaptured, recipientIDCaptured)
})
// Get the number of descendants to report how many memberships were created
descendants, err := svc.repo.FindDescendants(ctx, req.CollectionID)
if err == nil {
membershipsCreated += len(descendants)
}
} else {
// Add member just to this collection
err = svc.repo.AddMember(ctx, req.CollectionID, membership)
if err != nil {
svc.logger.Error("Failed to add member to collection",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID))
saga.Rollback(ctx) // Rollback any previous operations
return nil, err
}
// SAGA: Register compensation for single membership addition
// IMPORTANT: Capture by value for closure
collectionIDCaptured := req.CollectionID
recipientIDCaptured := req.RecipientID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: removing member from collection",
zap.String("collection_id", collectionIDCaptured.String()),
zap.String("recipient_id", recipientIDCaptured.String()))
return svc.repo.RemoveMember(ctx, collectionIDCaptured, recipientIDCaptured)
})
}
svc.logger.Info("Collection shared successfully",
zap.Any("collection_id", req.CollectionID),
zap.Any("recipient_id", req.RecipientID),
zap.Any("granted_by", userID),
zap.String("permission_level", req.PermissionLevel),
zap.Bool("shared_with_descendants", req.ShareWithDescendants),
zap.Int("memberships_created", membershipsCreated))
//
// STEP 8: Send email notification to recipient (best effort)
//
go svc.sendShareNotificationEmail(ctx, req.RecipientID, req.RecipientEmail)
return &ShareCollectionResponseDTO{
Success: true,
Message: "Collection shared successfully",
MembershipsCreated: membershipsCreated,
}, nil
}
// sendShareNotificationEmail sends a notification email to the recipient about a shared collection.
// This is a best-effort operation - failures are logged but don't affect the share operation.
// Note: This function creates its own background context since it runs in a goroutine after the
// HTTP request context may be canceled.
func (svc *shareCollectionServiceImpl) sendShareNotificationEmail(_ context.Context, recipientID gocql.UUID, recipientEmail string) {
// Create a new background context with timeout for the async email operation
// We don't use the request context because it gets canceled when the response is sent
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Get recipient user to check notification preferences
recipient, err := svc.userGetByIDUC.Execute(ctx, recipientID)
if err != nil {
svc.logger.Warn("Failed to get recipient for email notification",
zap.Error(err),
zap.String("recipient_id", recipientID.String()))
return
}
if recipient == nil {
svc.logger.Warn("Recipient not found for email notification",
zap.String("recipient_id", recipientID.String()))
return
}
// Check if recipient has disabled share notifications
// Default to true (enabled) if not set
if recipient.ProfileData != nil &&
recipient.ProfileData.ShareNotificationsEnabled != nil &&
!*recipient.ProfileData.ShareNotificationsEnabled {
svc.logger.Debug("Recipient has disabled share notifications",
zap.String("recipient_id", recipientID.String()),
zap.String("recipient_email", validation.MaskEmail(recipientEmail)))
return
}
// Build email content
subject := "You have a new shared collection on MapleFile"
sender := svc.emailer.GetSenderEmail()
frontendURL := svc.emailer.GetFrontendDomainName()
htmlContent := fmt.Sprintf(`
<html>
<body>
<h2>Hello,</h2>
<p>Someone has shared a collection with you on MapleFile.</p>
<p><a href="https://%s" style="color: #4CAF50;">Log in to view it</a></p>
<br>
<p style="font-size: 12px; color: #666;">
You can disable these notifications in your profile settings.
</p>
</body>
</html>
`, frontendURL)
// Send the email
if err := svc.emailer.Send(ctx, sender, subject, recipientEmail, htmlContent); err != nil {
svc.logger.Warn("Failed to send share notification email",
zap.Error(err),
zap.String("recipient_email", validation.MaskEmail(recipientEmail)))
return
}
svc.logger.Debug("Share notification email sent",
zap.String("recipient_email", validation.MaskEmail(recipientEmail)))
}

View file

@ -0,0 +1,488 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/softdelete.go
package collection
import (
"context"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
"github.com/gocql/gocql"
)
type SoftDeleteCollectionRequestDTO struct {
ID gocql.UUID `json:"id"`
ForceHardDelete bool `json:"force_hard_delete"` // Skip tombstone for GDPR right-to-be-forgotten
}
type SoftDeleteCollectionResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type SoftDeleteCollectionService interface {
Execute(ctx context.Context, req *SoftDeleteCollectionRequestDTO) (*SoftDeleteCollectionResponseDTO, error)
}
type softDeleteCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
fileRepo dom_file.FileMetadataRepository
getCollectionUseCase uc_collection.GetCollectionUseCase
updateCollectionUseCase uc_collection.UpdateCollectionUseCase
hardDeleteCollectionUseCase uc_collection.HardDeleteCollectionUseCase
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase
// Storage quota management
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewSoftDeleteCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
fileRepo dom_file.FileMetadataRepository,
getCollectionUseCase uc_collection.GetCollectionUseCase,
updateCollectionUseCase uc_collection.UpdateCollectionUseCase,
hardDeleteCollectionUseCase uc_collection.HardDeleteCollectionUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteCollectionService {
logger = logger.Named("SoftDeleteCollectionService")
return &softDeleteCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
fileRepo: fileRepo,
getCollectionUseCase: getCollectionUseCase,
updateCollectionUseCase: updateCollectionUseCase,
hardDeleteCollectionUseCase: hardDeleteCollectionUseCase,
deleteMultipleDataUseCase: deleteMultipleDataUseCase,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *softDeleteCollectionServiceImpl) Execute(ctx context.Context, req *SoftDeleteCollectionRequestDTO) (*SoftDeleteCollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection ID is required")
}
if req.ID.String() == "" {
svc.logger.Warn("Empty collection ID")
return nil, httperror.NewForBadRequestWithSingleField("id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Retrieve related records
//
collection, err := svc.getCollectionUseCase.Execute(ctx, req.ID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", req.ID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection not found")
}
//
// STEP 4: Check if user has rights to delete this collection
//
if collection.OwnerID != userID {
svc.logger.Warn("Unauthorized collection deletion attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
return nil, httperror.NewForForbiddenWithSingleField("message", "Only the collection owner can delete a collection")
}
// Check valid transitions.
if err := dom_collection.IsValidStateTransition(collection.State, dom_collection.CollectionStateDeleted); err != nil {
svc.logger.Warn("Invalid collection state transition",
zap.Any("user_id", userID),
zap.Error(err))
return nil, err
}
svc.logger.Info("Starting soft delete of collection hierarchy",
zap.String("collection_id", collection.ID.String()),
zap.Int("member_count", len(collection.Members)))
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("soft-delete-collection", svc.logger)
//
// STEP 5: Find all descendant collections
//
descendants, err := svc.repo.FindDescendants(ctx, req.ID)
if err != nil {
svc.logger.Error("Failed to check for descendant collections",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
svc.logger.Info("Found descendant collections for deletion",
zap.Any("collection_id", req.ID),
zap.Int("descendants_count", len(descendants)))
//
// STEP 6: Delete all files in the parent collection
//
parentFiles, err := svc.fileRepo.GetByCollection(req.ID)
if err != nil {
svc.logger.Error("Failed to get files for parent collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
// Collect all S3 storage paths to delete and calculate total storage to release
var allStoragePaths []string
var totalStorageToRelease int64 = 0
if len(parentFiles) > 0 {
parentFileIDs := make([]gocql.UUID, len(parentFiles))
for i, file := range parentFiles {
parentFileIDs[i] = file.ID
// Collect S3 paths for deletion
allStoragePaths = append(allStoragePaths, file.EncryptedFileObjectKey)
if file.EncryptedThumbnailObjectKey != "" {
allStoragePaths = append(allStoragePaths, file.EncryptedThumbnailObjectKey)
}
// Calculate storage to release (only for active files)
if file.State == dom_file.FileStateActive {
totalStorageToRelease += file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
}
}
// Execute parent file deletion (hard or soft based on flag)
if req.ForceHardDelete {
svc.logger.Info("Hard deleting parent collection files (GDPR mode)",
zap.Int("file_count", len(parentFileIDs)))
if err := svc.fileRepo.HardDeleteMany(parentFileIDs); err != nil {
svc.logger.Error("Failed to hard-delete files in parent collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID),
zap.Int("file_count", len(parentFileIDs)))
saga.Rollback(ctx)
return nil, err
}
// No compensation for hard delete - GDPR requires permanent deletion
} else {
if err := svc.fileRepo.SoftDeleteMany(parentFileIDs); err != nil {
svc.logger.Error("Failed to soft-delete files in parent collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID),
zap.Int("file_count", len(parentFileIDs)))
saga.Rollback(ctx) // Rollback any previous operations
return nil, err
}
// SAGA: Register compensation for parent files deletion
// IMPORTANT: Capture parentFileIDs by value for closure
parentFileIDsCaptured := parentFileIDs
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring parent collection files",
zap.String("collection_id", req.ID.String()),
zap.Int("file_count", len(parentFileIDsCaptured)))
return svc.fileRepo.RestoreMany(parentFileIDsCaptured)
})
}
svc.logger.Info("Deleted files in parent collection",
zap.Any("collection_id", req.ID),
zap.Int("file_count", len(parentFileIDs)))
}
//
// STEP 7: Delete all files in descendant collections
//
totalDescendantFiles := 0
for _, descendant := range descendants {
descendantFiles, err := svc.fileRepo.GetByCollection(descendant.ID)
if err != nil {
svc.logger.Error("Failed to get files for descendant collection",
zap.Any("error", err),
zap.Any("descendant_id", descendant.ID))
saga.Rollback(ctx) // Rollback all previous operations
return nil, err
}
if len(descendantFiles) > 0 {
descendantFileIDs := make([]gocql.UUID, len(descendantFiles))
for i, file := range descendantFiles {
descendantFileIDs[i] = file.ID
// Collect S3 paths for deletion
allStoragePaths = append(allStoragePaths, file.EncryptedFileObjectKey)
if file.EncryptedThumbnailObjectKey != "" {
allStoragePaths = append(allStoragePaths, file.EncryptedThumbnailObjectKey)
}
// Calculate storage to release (only for active files)
if file.State == dom_file.FileStateActive {
totalStorageToRelease += file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
}
}
// Execute descendant file deletion (hard or soft based on flag)
if req.ForceHardDelete {
if err := svc.fileRepo.HardDeleteMany(descendantFileIDs); err != nil {
svc.logger.Error("Failed to hard-delete files in descendant collection",
zap.Any("error", err),
zap.Any("descendant_id", descendant.ID),
zap.Int("file_count", len(descendantFileIDs)))
saga.Rollback(ctx)
return nil, err
}
// No compensation for hard delete - GDPR requires permanent deletion
} else {
if err := svc.fileRepo.SoftDeleteMany(descendantFileIDs); err != nil {
svc.logger.Error("Failed to soft-delete files in descendant collection",
zap.Any("error", err),
zap.Any("descendant_id", descendant.ID),
zap.Int("file_count", len(descendantFileIDs)))
saga.Rollback(ctx) // Rollback all previous operations
return nil, err
}
// SAGA: Register compensation for this batch of descendant files
// IMPORTANT: Capture by value for closure
descendantFileIDsCaptured := descendantFileIDs
descendantIDCaptured := descendant.ID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring descendant collection files",
zap.String("descendant_id", descendantIDCaptured.String()),
zap.Int("file_count", len(descendantFileIDsCaptured)))
return svc.fileRepo.RestoreMany(descendantFileIDsCaptured)
})
}
totalDescendantFiles += len(descendantFileIDs)
svc.logger.Debug("Deleted files in descendant collection",
zap.Any("descendant_id", descendant.ID),
zap.Int("file_count", len(descendantFileIDs)))
}
}
svc.logger.Info("Soft-deleted all files in descendant collections",
zap.Int("total_descendant_files", totalDescendantFiles),
zap.Int("descendants_count", len(descendants)))
//
// STEP 8: Delete all descendant collections
//
for _, descendant := range descendants {
// Execute descendant collection deletion (hard or soft based on flag)
if req.ForceHardDelete {
if err := svc.hardDeleteCollectionUseCase.Execute(ctx, descendant.ID); err != nil {
svc.logger.Error("Failed to hard-delete descendant collection",
zap.Any("error", err),
zap.Any("descendant_id", descendant.ID))
saga.Rollback(ctx)
return nil, err
}
// No compensation for hard delete - GDPR requires permanent deletion
} else {
if err := svc.repo.SoftDelete(ctx, descendant.ID); err != nil {
svc.logger.Error("Failed to soft-delete descendant collection",
zap.Any("error", err),
zap.Any("descendant_id", descendant.ID))
saga.Rollback(ctx) // Rollback all previous operations
return nil, err
}
// SAGA: Register compensation for this descendant collection
// IMPORTANT: Capture by value for closure
descendantIDCaptured := descendant.ID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring descendant collection",
zap.String("descendant_id", descendantIDCaptured.String()))
return svc.repo.Restore(ctx, descendantIDCaptured)
})
}
svc.logger.Debug("Deleted descendant collection",
zap.Any("descendant_id", descendant.ID),
zap.String("descendant_name", descendant.EncryptedName))
}
svc.logger.Info("Deleted all descendant collections",
zap.Int("descendants_count", len(descendants)))
//
// STEP 9: Finally, delete the parent collection
//
if req.ForceHardDelete {
svc.logger.Info("Hard deleting parent collection (GDPR mode)",
zap.String("collection_id", req.ID.String()))
if err := svc.hardDeleteCollectionUseCase.Execute(ctx, req.ID); err != nil {
svc.logger.Error("Failed to hard-delete parent collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
saga.Rollback(ctx)
return nil, err
}
// No compensation for hard delete - GDPR requires permanent deletion
} else {
if err := svc.repo.SoftDelete(ctx, req.ID); err != nil {
svc.logger.Error("Failed to soft-delete parent collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
saga.Rollback(ctx) // Rollback all previous operations
return nil, err
}
// SAGA: Register compensation for parent collection deletion
// IMPORTANT: Capture by value for closure
parentCollectionIDCaptured := req.ID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring parent collection",
zap.String("collection_id", parentCollectionIDCaptured.String()))
return svc.repo.Restore(ctx, parentCollectionIDCaptured)
})
}
//
// STEP 10: Update storage tracking (quota, events, daily usage)
//
if totalStorageToRelease > 0 {
svc.logger.Info("Releasing storage quota for collection deletion",
zap.String("collection_id", req.ID.String()),
zap.Int64("total_storage_to_release", totalStorageToRelease))
// Release storage quota
err = svc.storageQuotaHelperUseCase.OnFileDeleted(ctx, userID, totalStorageToRelease)
if err != nil {
svc.logger.Error("Failed to release storage quota after collection deletion",
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: re-reserve the released quota
totalStorageCaptured := totalStorageToRelease
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving released storage quota",
zap.Int64("size", totalStorageCaptured))
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, totalStorageCaptured)
})
// Create storage usage event
err = svc.createStorageUsageEventUseCase.Execute(ctx, userID, totalStorageToRelease, "remove")
if err != nil {
svc.logger.Error("Failed to create storage usage event for collection deletion",
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: create compensating "add" event
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event")
return svc.createStorageUsageEventUseCase.Execute(ctx, userIDCaptured, totalStorageCaptured, "add")
})
// Update daily storage usage
today := time.Now().Truncate(24 * time.Hour)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: userID,
UsageDay: &today,
TotalBytes: -totalStorageToRelease,
AddBytes: 0,
RemoveBytes: totalStorageToRelease,
IsIncrement: true,
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage for collection deletion",
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update")
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: userIDCaptured,
UsageDay: &today,
TotalBytes: totalStorageCaptured, // Positive to reverse
AddBytes: totalStorageCaptured,
RemoveBytes: 0,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
svc.logger.Info("Storage quota released successfully",
zap.Int64("released_bytes", totalStorageToRelease))
}
//
// STEP 11: Delete all S3 objects
//
if len(allStoragePaths) > 0 {
svc.logger.Info("Deleting S3 objects for collection",
zap.Any("collection_id", req.ID),
zap.Int("s3_objects_count", len(allStoragePaths)))
if err := svc.deleteMultipleDataUseCase.Execute(allStoragePaths); err != nil {
// Log but don't fail - S3 deletion is best effort after metadata is deleted
svc.logger.Error("Failed to delete some S3 objects (continuing anyway)",
zap.Any("error", err),
zap.Int("s3_objects_count", len(allStoragePaths)))
} else {
svc.logger.Info("Successfully deleted all S3 objects",
zap.Int("s3_objects_deleted", len(allStoragePaths)))
}
}
svc.logger.Info("Collection hierarchy deleted successfully",
zap.Any("collection_id", req.ID),
zap.Int("parent_files_deleted", len(parentFiles)),
zap.Int("descendant_files_deleted", totalDescendantFiles),
zap.Int("descendants_deleted", len(descendants)),
zap.Int("total_files_deleted", len(parentFiles)+totalDescendantFiles),
zap.Int("s3_objects_deleted", len(allStoragePaths)))
return &SoftDeleteCollectionResponseDTO{
Success: true,
Message: "Collection, descendants, and all associated files deleted successfully",
}, nil
}

View file

@ -0,0 +1,240 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/update.go
package collection
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/ratelimit"
)
type UpdateCollectionRequestDTO struct {
ID gocql.UUID `json:"id"`
EncryptedName string `json:"encrypted_name"`
CollectionType string `json:"collection_type,omitempty"`
EncryptedCollectionKey *crypto.EncryptedCollectionKey `json:"encrypted_collection_key,omitempty"`
Version uint64 `json:"version,omitempty"`
}
type UpdateCollectionService interface {
Execute(ctx context.Context, req *UpdateCollectionRequestDTO) (*CollectionResponseDTO, error)
}
type updateCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
repo dom_collection.CollectionRepository
authFailureRateLimiter ratelimit.AuthFailureRateLimiter
}
func NewUpdateCollectionService(
config *config.Configuration,
logger *zap.Logger,
repo dom_collection.CollectionRepository,
authFailureRateLimiter ratelimit.AuthFailureRateLimiter,
) UpdateCollectionService {
logger = logger.Named("UpdateCollectionService")
return &updateCollectionServiceImpl{
config: config,
logger: logger,
repo: repo,
authFailureRateLimiter: authFailureRateLimiter,
}
}
func (svc *updateCollectionServiceImpl) Execute(ctx context.Context, req *UpdateCollectionRequestDTO) (*CollectionResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection details are required")
}
e := make(map[string]string)
if req.ID.String() == "" {
e["id"] = "Collection ID is required"
}
if req.EncryptedName == "" {
e["encrypted_name"] = "Collection name is required"
}
if req.CollectionType != "" && req.CollectionType != dom_collection.CollectionTypeFolder && req.CollectionType != dom_collection.CollectionTypeAlbum {
e["collection_type"] = "Collection type must be either 'folder' or 'album'"
}
if req.EncryptedCollectionKey == nil {
e["encrypted_collection_key"] = "Encrypted collection key is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Retrieve existing collection
//
collection, err := svc.repo.Get(ctx, req.ID)
if err != nil {
svc.logger.Error("Failed to get collection",
zap.Any("error", err),
zap.Any("collection_id", req.ID))
return nil, err
}
if collection == nil {
svc.logger.Debug("Collection not found",
zap.Any("collection_id", req.ID))
return nil, httperror.NewForNotFoundWithSingleField("message", "Collection not found")
}
//
// STEP 4: Check rate limiting for authorization failures
//
// Check if user has exceeded authorization failure limits before checking access
if svc.authFailureRateLimiter != nil {
allowed, remainingAttempts, resetTime, err := svc.authFailureRateLimiter.CheckAuthFailure(
ctx,
userID.String(),
req.ID.String(),
"collection:update")
if err != nil {
// Log error but continue - fail open for availability
svc.logger.Error("Failed to check auth failure rate limit",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
} else if !allowed {
svc.logger.Warn("User blocked due to excessive authorization failures",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID),
zap.Int("remaining_attempts", remainingAttempts),
zap.Time("reset_time", resetTime))
return nil, httperror.NewTooManyRequestsError(
"Too many authorization failures. Please try again later")
}
}
//
// STEP 5: Check if user has rights to update this collection
//
if collection.OwnerID != userID {
// Check if user is a member with admin permissions
isAdmin := false
for _, member := range collection.Members {
if member.RecipientID == userID && member.PermissionLevel == dom_collection.CollectionPermissionAdmin {
isAdmin = true
break
}
}
if !isAdmin {
// Record authorization failure for rate limiting
if svc.authFailureRateLimiter != nil {
if err := svc.authFailureRateLimiter.RecordAuthFailure(
ctx,
userID.String(),
req.ID.String(),
"collection:update",
"insufficient_permission"); err != nil {
svc.logger.Error("Failed to record auth failure",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
}
}
svc.logger.Warn("Unauthorized collection update attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to update this collection")
}
}
// Record successful authorization
if svc.authFailureRateLimiter != nil {
if err := svc.authFailureRateLimiter.RecordAuthSuccess(
ctx,
userID.String(),
req.ID.String(),
"collection:update"); err != nil {
svc.logger.Debug("Failed to record auth success",
zap.Error(err),
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID))
}
}
//
// STEP 6: Check if submitted collection request is in-sync with our backend's collection copy.
//
// Developers note:
// What is the purpose of this check?
// Our server has multiple clients sharing data and hence our backend needs to ensure that the collection being updated is the most recent version.
if collection.Version != req.Version {
svc.logger.Warn("Outdated collection update attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID),
zap.Any("submitted_version", req.Version),
zap.Any("current_version", collection.Version))
return nil, httperror.NewForBadRequestWithSingleField("message", "Collection has been updated since you last fetched it")
}
//
// STEP 6: Update collection
//
collection.EncryptedName = req.EncryptedName
collection.ModifiedAt = time.Now()
collection.ModifiedByUserID = userID
collection.Version++ // Update mutation means we increment version.
// Only update optional fields if they are provided
if req.CollectionType != "" {
collection.CollectionType = req.CollectionType
}
if req.EncryptedCollectionKey.Ciphertext != nil && len(req.EncryptedCollectionKey.Ciphertext) > 0 &&
req.EncryptedCollectionKey.Nonce != nil && len(req.EncryptedCollectionKey.Nonce) > 0 {
collection.EncryptedCollectionKey = req.EncryptedCollectionKey
}
//
// STEP 7: Save updated collection
//
err = svc.repo.Update(ctx, collection)
if err != nil {
svc.logger.Error("Failed to update collection",
zap.Any("error", err),
zap.Any("collection_id", collection.ID))
return nil, err
}
//
// STEP 8: Map domain model to response DTO
//
ownerEmail := getOwnerEmailFromMembers(collection)
response := mapCollectionToDTO(collection, 0, ownerEmail)
svc.logger.Debug("Collection updated successfully",
zap.Any("collection_id", collection.ID))
return response, nil
}

View file

@ -0,0 +1,158 @@
// monorepo/cloud/backend/internal/maplefile/service/collection/utils.go
package collection
import (
"time"
"github.com/gocql/gocql"
"go.uber.org/zap"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
// Helper function to get owner email from members list
// The owner is always a member with their email, so we can look them up
func getOwnerEmailFromMembers(collection *dom_collection.Collection) string {
if collection == nil {
return ""
}
for _, member := range collection.Members {
if member.RecipientID == collection.OwnerID {
return member.RecipientEmail
}
}
return ""
}
// Helper function to map a CollectionMembershipDTO to a CollectionMembership domain model
// This assumes a direct field-by-field copy is intended by the DTO structure.
func mapMembershipDTOToDomain(dto *CollectionMembershipDTO) dom_collection.CollectionMembership {
return dom_collection.CollectionMembership{
ID: dto.ID, // Copy DTO ID
CollectionID: dto.CollectionID, // Copy DTO CollectionID
RecipientID: dto.RecipientID, // Copy DTO RecipientID
RecipientEmail: dto.RecipientEmail, // Copy DTO RecipientEmail
GrantedByID: dto.GrantedByID, // Copy DTO GrantedByID
EncryptedCollectionKey: dto.EncryptedCollectionKey, // Copy DTO EncryptedCollectionKey
PermissionLevel: dto.PermissionLevel, // Copy DTO PermissionLevel
CreatedAt: dto.CreatedAt, // Copy DTO CreatedAt
IsInherited: dto.IsInherited, // Copy DTO IsInherited
InheritedFromID: dto.InheritedFromID, // Copy DTO InheritedFromID
// Note: ModifiedAt/By, Version are not in Membership DTO/Domain
}
}
// Helper function to map a CreateCollectionRequestDTO to a Collection domain model.
// This function recursively maps all fields, including nested members and children,
// copying values directly from the DTO. Server-side overrides for fields like
// ID, OwnerID, timestamps, and version are applied *after* this mapping in the Execute method.
// userID and now are passed for potential use in recursive calls if needed for consistency,
// though the primary goal here is to copy DTO values.
func mapCollectionDTOToDomain(dto *CreateCollectionRequestDTO, userID gocql.UUID, now time.Time) *dom_collection.Collection {
if dto == nil {
return nil
}
collection := &dom_collection.Collection{
// Copy all scalar/pointer fields directly from the DTO as requested by the prompt.
// Fields like ID, OwnerID, timestamps, and version from the DTO
// represent the client's proposed state and will be potentially
// overridden by server-managed values later in the Execute method.
ID: dto.ID,
OwnerID: dto.OwnerID,
EncryptedName: dto.EncryptedName,
EncryptedCustomIcon: dto.EncryptedCustomIcon,
CollectionType: dto.CollectionType,
EncryptedCollectionKey: dto.EncryptedCollectionKey,
ParentID: dto.ParentID,
AncestorIDs: dto.AncestorIDs,
CreatedAt: dto.CreatedAt,
CreatedByUserID: dto.CreatedByUserID,
ModifiedAt: dto.ModifiedAt,
ModifiedByUserID: dto.ModifiedByUserID,
}
// Map members slice from DTO to domain model slice
if len(dto.Members) > 0 {
collection.Members = make([]dom_collection.CollectionMembership, len(dto.Members))
for i, memberDTO := range dto.Members {
collection.Members[i] = mapMembershipDTOToDomain(memberDTO)
}
}
return collection
}
// Helper function to map a Collection domain model to a CollectionResponseDTO
// This function should ideally exclude sensitive data (like recipient-specific keys)
// that should not be part of a general response.
// fileCount is the number of active files in this collection (pass 0 if not known)
// ownerEmail is the email address of the collection owner (pass "" if not known)
func mapCollectionToDTO(collection *dom_collection.Collection, fileCount int, ownerEmail string) *CollectionResponseDTO {
if collection == nil {
return nil
}
responseDTO := &CollectionResponseDTO{
ID: collection.ID,
OwnerID: collection.OwnerID,
OwnerEmail: ownerEmail,
EncryptedName: collection.EncryptedName,
EncryptedCustomIcon: collection.EncryptedCustomIcon,
CollectionType: collection.CollectionType,
ParentID: collection.ParentID,
AncestorIDs: collection.AncestorIDs,
Tags: collection.Tags,
// Note: EncryptedCollectionKey from the domain model is the owner's key.
// Including it in the general response DTO might be acceptable if the response
// is only sent to the owner and contains *their* key. Otherwise, this field
// might need conditional inclusion or exclusion. The prompt does not require
// changing this, so we keep the original mapping which copies the owner's key.
EncryptedCollectionKey: collection.EncryptedCollectionKey,
CreatedAt: collection.CreatedAt,
ModifiedAt: collection.ModifiedAt,
FileCount: fileCount,
Version: collection.Version,
// Members slice needs mapping to MembershipResponseDTO
Members: make([]MembershipResponseDTO, len(collection.Members)),
}
// Map members
for i, member := range collection.Members {
responseDTO.Members[i] = MembershipResponseDTO{
ID: member.ID,
RecipientID: member.RecipientID,
RecipientEmail: member.RecipientEmail, // Email for display
PermissionLevel: member.PermissionLevel,
GrantedByID: member.GrantedByID,
CollectionID: member.CollectionID, // Redundant but useful
IsInherited: member.IsInherited,
InheritedFromID: member.InheritedFromID,
CreatedAt: member.CreatedAt,
// Note: EncryptedCollectionKey for this member is recipient-specific
// and should NOT be included in a general response DTO unless
// filtered for the specific recipient receiving the response.
// The MembershipResponseDTO does not have a field for this, which is correct.
EncryptedCollectionKey: member.EncryptedCollectionKey,
}
}
// Debug: Log what we're sending in the DTO
logger, _ := zap.NewDevelopment()
logger.Info("🔍 mapCollectionToDTO: Mapping collection to DTO",
zap.String("collection_id", collection.ID.String()),
zap.Int("domain_members_count", len(collection.Members)),
zap.Int("dto_members_count", len(responseDTO.Members)),
zap.Int("domain_tags_count", len(collection.Tags)),
zap.Int("dto_tags_count", len(responseDTO.Tags)))
for i, member := range responseDTO.Members {
logger.Info("🔍 mapCollectionToDTO: DTO member",
zap.Int("index", i),
zap.String("recipient_email", validation.MaskEmail(member.RecipientEmail)),
zap.String("recipient_id", member.RecipientID.String()),
zap.Int("encrypted_key_length", len(member.EncryptedCollectionKey)))
}
return responseDTO
}

View file

@ -0,0 +1,56 @@
// cloud/maplefile-backend/internal/maplefile/service/dashboard/dto.go
package dashboard
import (
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
)
// GetDashboardResponseDTO represents the complete dashboard response
type GetDashboardResponseDTO struct {
Dashboard *DashboardDataDTO `json:"dashboard"`
Success bool `json:"success"`
Message string `json:"message"`
}
// DashboardDataDTO contains all the dashboard information
type DashboardDataDTO struct {
Summary SummaryDTO `json:"summary"`
StorageUsageTrend StorageUsageTrendDTO `json:"storage_usage_trend"`
RecentFiles []file.RecentFileResponseDTO `json:"recent_files"`
CollectionKeys []CollectionKeyDTO `json:"collection_keys,omitempty"`
}
// CollectionKeyDTO contains the encrypted collection key for client-side decryption
// This is safe to include because the collection key is encrypted with the user's master key
type CollectionKeyDTO struct {
CollectionID string `json:"collection_id"`
EncryptedCollectionKey string `json:"encrypted_collection_key"`
EncryptedCollectionKeyNonce string `json:"encrypted_collection_key_nonce"`
}
// SummaryDTO contains the main dashboard statistics
type SummaryDTO struct {
TotalFiles int `json:"total_files"`
TotalFolders int `json:"total_folders"`
StorageUsed StorageAmountDTO `json:"storage_used"`
StorageLimit StorageAmountDTO `json:"storage_limit"`
StorageUsagePercentage int `json:"storage_usage_percentage"`
}
// StorageAmountDTO represents a storage value with its unit
type StorageAmountDTO struct {
Value float64 `json:"value"`
Unit string `json:"unit"`
}
// StorageUsageTrendDTO contains the trend chart data
type StorageUsageTrendDTO struct {
Period string `json:"period"`
DataPoints []DataPointDTO `json:"data_points"`
}
// DataPointDTO represents a single point in the storage usage trend
type DataPointDTO struct {
Date string `json:"date"`
Usage StorageAmountDTO `json:"usage"`
}

View file

@ -0,0 +1,372 @@
// cloud/maplefile-backend/internal/maplefile/service/dashboard/get_dashboard.go
package dashboard
import (
"context"
"encoding/base64"
"math"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/storagedailyusage"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
file_service "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"github.com/gocql/gocql"
)
type GetDashboardService interface {
Execute(ctx context.Context) (*GetDashboardResponseDTO, error)
}
type getDashboardServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listRecentFilesService file_service.ListRecentFilesService
userGetByIDUseCase uc_user.UserGetByIDUseCase
countUserFilesUseCase uc_filemetadata.CountUserFilesUseCase
countUserFoldersUseCase uc_collection.CountUserFoldersUseCase
getStorageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase
getCollectionUseCase uc_collection.GetCollectionUseCase
}
func NewGetDashboardService(
config *config.Configuration,
logger *zap.Logger,
listRecentFilesService file_service.ListRecentFilesService,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
countUserFilesUseCase uc_filemetadata.CountUserFilesUseCase,
countUserFoldersUseCase uc_collection.CountUserFoldersUseCase,
getStorageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase,
getCollectionUseCase uc_collection.GetCollectionUseCase,
) GetDashboardService {
logger = logger.Named("GetDashboardService")
return &getDashboardServiceImpl{
config: config,
logger: logger,
listRecentFilesService: listRecentFilesService,
userGetByIDUseCase: userGetByIDUseCase,
countUserFilesUseCase: countUserFilesUseCase,
countUserFoldersUseCase: countUserFoldersUseCase,
getStorageTrendUseCase: getStorageTrendUseCase,
getCollectionUseCase: getCollectionUseCase,
}
}
func (svc *getDashboardServiceImpl) Execute(ctx context.Context) (*GetDashboardResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Validation
//
e := make(map[string]string)
if userID.String() == "" {
e["user_id"] = "User ID is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validating get dashboard",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 3: Get user information for storage data
//
user, err := svc.userGetByIDUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get user for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
if user == nil {
svc.logger.Warn("User not found for dashboard",
zap.String("user_id", userID.String()))
return nil, httperror.NewForNotFoundWithSingleField("user_id", "User not found")
}
//
// STEP 4: Get file count
//
fileCountResp, err := svc.countUserFilesUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to count user files for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
//
// STEP 5: Get folder count (folders only, not albums)
//
folderCountResp, err := svc.countUserFoldersUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to count user folders for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
// Debug logging for folder count
svc.logger.Debug("Folder count debug info",
zap.String("user_id", userID.String()),
zap.Int("total_folders_returned", folderCountResp.TotalFolders))
//
// STEP 6: Get storage usage trend (last 7 days)
//
trendReq := &uc_storagedailyusage.GetStorageDailyUsageTrendRequest{
UserID: userID,
TrendPeriod: "7days",
}
storageTrend, err := svc.getStorageTrendUseCase.Execute(ctx, trendReq)
if err != nil {
svc.logger.Warn("Failed to get storage trend for dashboard, using empty trend",
zap.String("user_id", userID.String()),
zap.Error(err))
// Don't fail the entire dashboard for trend data
storageTrend = nil
}
//
// STEP 7: Get recent files using the working Recent Files Service
//
var recentFiles []file_service.RecentFileResponseDTO
recentFilesResp, err := svc.listRecentFilesService.Execute(ctx, nil, 5)
if err != nil {
svc.logger.Warn("Failed to get recent files for dashboard, using empty list",
zap.String("user_id", userID.String()),
zap.Error(err))
// Don't fail the entire dashboard for recent files
recentFiles = []file_service.RecentFileResponseDTO{}
} else {
recentFiles = recentFilesResp.Files
}
//
// STEP 8: Fetch collection keys for recent files
// This allows clients to decrypt file metadata without making additional API calls
//
collectionKeys := svc.fetchCollectionKeysForFiles(ctx, recentFiles)
//
// STEP 9: Build dashboard response
//
dashboard := &DashboardDataDTO{
Summary: svc.buildSummary(user, fileCountResp.TotalFiles, folderCountResp.TotalFolders, storageTrend), // Pass storageTrend to calculate actual storage
StorageUsageTrend: svc.buildStorageUsageTrend(storageTrend),
RecentFiles: recentFiles,
CollectionKeys: collectionKeys,
}
response := &GetDashboardResponseDTO{
Dashboard: dashboard,
Success: true,
Message: "Dashboard data retrieved successfully",
}
svc.logger.Info("Dashboard data retrieved successfully",
zap.String("user_id", userID.String()),
zap.Int("total_files", fileCountResp.TotalFiles),
zap.Int("total_folders", folderCountResp.TotalFolders), // CHANGED: Use TotalFolders
zap.Int("recent_files_count", len(recentFiles)))
return response, nil
}
func (svc *getDashboardServiceImpl) buildSummary(user *dom_user.User, totalFiles, totalFolders int, storageTrend *storagedailyusage.StorageUsageTrend) SummaryDTO {
// Calculate storage from the most recent daily usage data
var storageUsedBytes int64 = 0
// Debug logging for storage trend
if storageTrend != nil {
svc.logger.Debug("Storage trend received in buildSummary",
zap.Int("daily_usages_count", len(storageTrend.DailyUsages)),
zap.Int64("total_added", storageTrend.TotalAdded),
zap.Int64("net_change", storageTrend.NetChange))
if len(storageTrend.DailyUsages) > 0 {
// Get the most recent day's total bytes (last element in the sorted array)
mostRecentDay := storageTrend.DailyUsages[len(storageTrend.DailyUsages)-1]
storageUsedBytes = mostRecentDay.TotalBytes
// BUGFIX: Ensure storage never goes negative
// This can happen if deletion events exceed actual storage (edge case with storage tracking)
if storageUsedBytes < 0 {
svc.logger.Warn("Storage used bytes is negative, resetting to 0",
zap.Int64("negative_value", storageUsedBytes),
zap.Time("usage_day", mostRecentDay.UsageDay))
storageUsedBytes = 0
}
svc.logger.Debug("Using storage from most recent day",
zap.Time("usage_day", mostRecentDay.UsageDay),
zap.Int64("total_bytes", mostRecentDay.TotalBytes),
zap.Int64("total_add_bytes", mostRecentDay.TotalAddBytes),
zap.Int64("total_remove_bytes", mostRecentDay.TotalRemoveBytes))
} else {
svc.logger.Debug("No daily usage entries found in storage trend")
}
} else {
svc.logger.Debug("Storage trend is nil")
}
var storageLimitBytes int64 = 10 * 1024 * 1024 * 1024 // 10GB default limit
// Convert storage used to human-readable format
storageUsed := svc.convertBytesToStorageAmount(storageUsedBytes)
storageLimit := svc.convertBytesToStorageAmount(storageLimitBytes)
// Calculate storage percentage with proper rounding
storagePercentage := 0
if storageLimitBytes > 0 {
percentage := (float64(storageUsedBytes) / float64(storageLimitBytes)) * 100
// Use math.Round for proper rounding instead of truncation
storagePercentage = int(math.Round(percentage))
// If there's actual usage but percentage rounds to 0, show at least 1%
if storagePercentage == 0 && storageUsedBytes > 0 {
storagePercentage = 1
}
}
// Debug logging for storage calculation
svc.logger.Debug("Storage calculation debug",
zap.Int64("storage_used_bytes", storageUsedBytes),
zap.Int64("storage_limit_bytes", storageLimitBytes),
zap.Int("calculated_percentage", storagePercentage))
return SummaryDTO{
TotalFiles: totalFiles,
TotalFolders: totalFolders, // Now this will be actual folders only
StorageUsed: storageUsed,
StorageLimit: storageLimit,
StorageUsagePercentage: storagePercentage,
}
}
func (svc *getDashboardServiceImpl) buildStorageUsageTrend(trend *storagedailyusage.StorageUsageTrend) StorageUsageTrendDTO {
if trend == nil || len(trend.DailyUsages) == 0 {
return StorageUsageTrendDTO{
Period: "Last 7 days",
DataPoints: []DataPointDTO{},
}
}
dataPoints := make([]DataPointDTO, len(trend.DailyUsages))
for i, daily := range trend.DailyUsages {
dataPoints[i] = DataPointDTO{
Date: daily.UsageDay.Format("2006-01-02"),
Usage: svc.convertBytesToStorageAmount(daily.TotalBytes),
}
}
return StorageUsageTrendDTO{
Period: "Last 7 days",
DataPoints: dataPoints,
}
}
func (svc *getDashboardServiceImpl) convertBytesToStorageAmount(bytes int64) StorageAmountDTO {
const (
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
)
switch {
case bytes >= TB:
return StorageAmountDTO{
Value: float64(bytes) / TB,
Unit: "TB",
}
case bytes >= GB:
return StorageAmountDTO{
Value: float64(bytes) / GB,
Unit: "GB",
}
case bytes >= MB:
return StorageAmountDTO{
Value: float64(bytes) / MB,
Unit: "MB",
}
case bytes >= KB:
return StorageAmountDTO{
Value: float64(bytes) / KB,
Unit: "KB",
}
default:
return StorageAmountDTO{
Value: float64(bytes),
Unit: "B",
}
}
}
// fetchCollectionKeysForFiles fetches the encrypted collection keys for the collections
// referenced by the recent files. This allows clients to decrypt file metadata without
// making additional API calls for each collection.
func (svc *getDashboardServiceImpl) fetchCollectionKeysForFiles(ctx context.Context, files []file_service.RecentFileResponseDTO) []CollectionKeyDTO {
if len(files) == 0 {
return nil
}
// Collect unique collection IDs from the files
collectionIDSet := make(map[string]gocql.UUID)
for _, f := range files {
collectionIDStr := f.CollectionID.String()
if _, exists := collectionIDSet[collectionIDStr]; !exists {
collectionIDSet[collectionIDStr] = f.CollectionID
}
}
// Fetch each unique collection and extract its encrypted key
collectionKeys := make([]CollectionKeyDTO, 0, len(collectionIDSet))
for collectionIDStr, collectionID := range collectionIDSet {
collection, err := svc.getCollectionUseCase.Execute(ctx, collectionID)
if err != nil {
svc.logger.Warn("Failed to fetch collection for dashboard collection keys",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
continue
}
if collection == nil {
svc.logger.Warn("Collection not found for dashboard collection keys",
zap.String("collection_id", collectionIDStr))
continue
}
// Only include if we have the encrypted collection key
if collection.EncryptedCollectionKey != nil && len(collection.EncryptedCollectionKey.Ciphertext) > 0 {
collectionKeys = append(collectionKeys, CollectionKeyDTO{
CollectionID: collectionIDStr,
EncryptedCollectionKey: base64.StdEncoding.EncodeToString(collection.EncryptedCollectionKey.Ciphertext),
EncryptedCollectionKeyNonce: base64.StdEncoding.EncodeToString(collection.EncryptedCollectionKey.Nonce),
})
}
}
svc.logger.Debug("Fetched collection keys for dashboard",
zap.Int("unique_collections", len(collectionIDSet)),
zap.Int("keys_returned", len(collectionKeys)))
return collectionKeys
}

View file

@ -0,0 +1,27 @@
package dashboard
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
file_service "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// Wire providers for dashboard services
func ProvideGetDashboardService(
cfg *config.Configuration,
logger *zap.Logger,
listRecentFilesService file_service.ListRecentFilesService,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
countUserFilesUseCase uc_filemetadata.CountUserFilesUseCase,
countUserFoldersUseCase uc_collection.CountUserFoldersUseCase,
getStorageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase,
getCollectionUseCase uc_collection.GetCollectionUseCase,
) GetDashboardService {
return NewGetDashboardService(cfg, logger, listRecentFilesService, userGetByIDUseCase, countUserFilesUseCase, countUserFoldersUseCase, getStorageTrendUseCase, getCollectionUseCase)
}

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/service/file/archive.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ArchiveFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
}
type ArchiveFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type ArchiveFileService interface {
Execute(ctx context.Context, req *ArchiveFileRequestDTO) (*ArchiveFileResponseDTO, error)
}
type archiveFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewArchiveFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) ArchiveFileService {
logger = logger.Named("ArchiveFileService")
return &archiveFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *archiveFileServiceImpl) Execute(ctx context.Context, req *ArchiveFileRequestDTO) (*ArchiveFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata (including any state for archiving)
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file archive attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to archive this file")
}
//
// STEP 5: Validate state transition
//
err = dom_file.IsValidStateTransition(file.State, dom_file.FileStateArchived)
if err != nil {
svc.logger.Warn("Invalid state transition for file archive",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State),
zap.String("target_state", dom_file.FileStateArchived),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Archive the file
//
file.State = dom_file.FileStateArchived
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to archive file",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
svc.logger.Info("File archived successfully",
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return &ArchiveFileResponseDTO{
Success: true,
Message: "File archived successfully",
}, nil
}

View file

@ -0,0 +1,442 @@
// monorepo/cloud/backend/internal/maplefile/service/file/complete_file_upload.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type CompleteFileUploadRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
// Optional: Client can provide actual file size for validation
ActualFileSizeInBytes int64 `json:"actual_file_size_in_bytes,omitempty"`
// Optional: Client can provide actual thumbnail size for validation
ActualThumbnailSizeInBytes int64 `json:"actual_thumbnail_size_in_bytes,omitempty"`
// Optional: Client can confirm successful upload
UploadConfirmed bool `json:"upload_confirmed,omitempty"`
ThumbnailUploadConfirmed bool `json:"thumbnail_upload_confirmed,omitempty"`
}
type CompleteFileUploadResponseDTO struct {
File *FileResponseDTO `json:"file"`
Success bool `json:"success"`
Message string `json:"message"`
ActualFileSize int64 `json:"actual_file_size"`
ActualThumbnailSize int64 `json:"actual_thumbnail_size"`
UploadVerified bool `json:"upload_verified"`
ThumbnailVerified bool `json:"thumbnail_verified"`
StorageAdjustment int64 `json:"storage_adjustment"` // Positive if more space used, negative if less
}
type CompleteFileUploadService interface {
Execute(ctx context.Context, req *CompleteFileUploadRequestDTO) (*CompleteFileUploadResponseDTO, error)
}
type completeFileUploadServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewCompleteFileUploadService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase,
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) CompleteFileUploadService {
logger = logger.Named("CompleteFileUploadService")
return &completeFileUploadServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
verifyObjectExistsUseCase: verifyObjectExistsUseCase,
getObjectSizeUseCase: getObjectSizeUseCase,
deleteDataUseCase: deleteDataUseCase,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *completeFileUploadServiceImpl) Execute(ctx context.Context, req *CompleteFileUploadRequestDTO) (*CompleteFileUploadResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File completion details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("⚠️ Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("🔴 Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
// Developers note: Use `ExecuteWithAnyState` because initially created `FileMetadata` object has state set to `pending`.
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("🔴 Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Verify user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("🔴 Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("⚠️ Unauthorized file completion attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to complete this file upload")
}
//
// STEP 5: Verify file is in pending state
//
if file.State != dom_file.FileStatePending {
svc.logger.Warn("⚠️ File is not in pending state",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State))
return nil, httperror.NewForBadRequestWithSingleField("file_id", fmt.Sprintf("File is not in pending state (current state: %s)", file.State))
}
//
// STEP 6: Verify file exists in object storage and get actual size
//
fileExists, err := svc.verifyObjectExistsUseCase.Execute(file.EncryptedFileObjectKey)
if err != nil {
svc.logger.Error("🔴 Failed to verify file exists in storage",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to verify file upload")
}
if !fileExists {
svc.logger.Warn("⚠️ File does not exist in storage",
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File has not been uploaded yet")
}
// Get actual file size from storage
actualFileSize, err := svc.getObjectSizeUseCase.Execute(file.EncryptedFileObjectKey)
if err != nil {
svc.logger.Error("🔴 Failed to get file size from storage",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to verify file size")
}
//
// STEP 7: Verify thumbnail if expected
//
var actualThumbnailSize int64 = 0
var thumbnailVerified bool = true
if file.EncryptedThumbnailObjectKey != "" {
thumbnailExists, err := svc.verifyObjectExistsUseCase.Execute(file.EncryptedThumbnailObjectKey)
if err != nil {
svc.logger.Warn("⚠️ Failed to verify thumbnail exists, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
thumbnailVerified = false
} else if thumbnailExists {
actualThumbnailSize, err = svc.getObjectSizeUseCase.Execute(file.EncryptedThumbnailObjectKey)
if err != nil {
svc.logger.Warn("⚠️ Failed to get thumbnail size, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
thumbnailVerified = false
}
} else {
// Thumbnail was expected but not uploaded - clear the path
file.EncryptedThumbnailObjectKey = ""
thumbnailVerified = false
}
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("complete-file-upload", svc.logger)
//
// STEP 8: Calculate storage adjustment and update quota
//
expectedTotalSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
actualTotalSize := actualFileSize + actualThumbnailSize
storageAdjustment := actualTotalSize - expectedTotalSize
svc.logger.Info("Starting file upload completion with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.Int64("expected_file_size", file.EncryptedFileSizeInBytes),
zap.Int64("actual_file_size", actualFileSize),
zap.Int64("expected_thumbnail_size", file.EncryptedThumbnailSizeInBytes),
zap.Int64("actual_thumbnail_size", actualThumbnailSize),
zap.Int64("expected_total", expectedTotalSize),
zap.Int64("actual_total", actualTotalSize),
zap.Int64("adjustment", storageAdjustment))
// Handle storage quota adjustment (SAGA protected)
if storageAdjustment != 0 {
if storageAdjustment > 0 {
// Need more quota than originally reserved
err = svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userID, storageAdjustment)
if err != nil {
svc.logger.Error("Failed to reserve additional storage quota",
zap.String("user_id", userID.String()),
zap.Int64("additional_size", storageAdjustment),
zap.Error(err))
// Clean up the uploaded file since we can't complete due to quota
// Note: This is an exceptional case - quota exceeded before any SAGA operations
if deleteErr := svc.deleteDataUseCase.Execute(file.EncryptedFileObjectKey); deleteErr != nil {
svc.logger.Error("Failed to clean up file after quota exceeded", zap.Error(deleteErr))
}
if file.EncryptedThumbnailObjectKey != "" {
if deleteErr := svc.deleteDataUseCase.Execute(file.EncryptedThumbnailObjectKey); deleteErr != nil {
svc.logger.Error("Failed to clean up thumbnail after quota exceeded", zap.Error(deleteErr))
}
}
saga.Rollback(ctx)
return nil, err
}
// Register compensation: release the additional quota if later steps fail
storageAdjustmentCaptured := storageAdjustment
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: releasing additional reserved quota",
zap.Int64("size", storageAdjustmentCaptured))
return svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userIDCaptured, storageAdjustmentCaptured)
})
} else {
// Used less quota than originally reserved, release the difference
err = svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, -storageAdjustment)
if err != nil {
svc.logger.Error("Failed to release excess quota",
zap.String("user_id", userID.String()),
zap.Int64("excess_size", -storageAdjustment),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: re-reserve the released quota if later steps fail
excessQuotaCaptured := -storageAdjustment
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving released excess quota",
zap.Int64("size", excessQuotaCaptured))
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, excessQuotaCaptured)
})
}
}
//
// STEP 9: Validate file size if client provided it
//
if req.ActualFileSizeInBytes > 0 && req.ActualFileSizeInBytes != actualFileSize {
svc.logger.Warn("⚠️ File size mismatch between client and storage",
zap.Any("file_id", req.FileID),
zap.Int64("client_reported_size", req.ActualFileSizeInBytes),
zap.Int64("storage_actual_size", actualFileSize))
// Continue with storage size as authoritative
}
//
// STEP 10: Update file metadata to active state (SAGA protected)
//
originalState := file.State
originalFileSizeInBytes := file.EncryptedFileSizeInBytes
originalThumbnailSizeInBytes := file.EncryptedThumbnailSizeInBytes
originalVersion := file.Version
file.EncryptedFileSizeInBytes = actualFileSize
file.EncryptedThumbnailSizeInBytes = actualThumbnailSize
file.State = dom_file.FileStateActive
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
file.Version++ // Every mutation we need to keep a track of.
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to update file metadata to active state",
zap.Error(err),
zap.String("file_id", req.FileID.String()))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: restore original metadata state
fileIDCaptured := file.ID
originalStateCaptured := originalState
originalFileSizeCaptured := originalFileSizeInBytes
originalThumbnailSizeCaptured := originalThumbnailSizeInBytes
originalVersionCaptured := originalVersion
collectionIDCaptured := file.CollectionID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata to pending state",
zap.String("file_id", fileIDCaptured.String()))
restoredFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
restoredFile.State = originalStateCaptured
restoredFile.EncryptedFileSizeInBytes = originalFileSizeCaptured
restoredFile.EncryptedThumbnailSizeInBytes = originalThumbnailSizeCaptured
restoredFile.Version = originalVersionCaptured
restoredFile.ModifiedAt = time.Now()
// Note: The repository Update method handles file count adjustments based on state changes,
// so restoring to pending state will automatically decrement the file count
return svc.updateMetadataUseCase.Execute(ctx, restoredFile)
})
// Note: File count increment is handled by the repository's Update method when state changes
// from pending to active. No explicit increment needed here to avoid double counting.
//
// STEP 11: Create storage usage event (SAGA protected)
//
_ = collectionIDCaptured // Keep variable for potential future use
err = svc.createStorageUsageEventUseCase.Execute(ctx, file.OwnerID, actualTotalSize, "add")
if err != nil {
svc.logger.Error("Failed to create storage usage event",
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("file_size", actualTotalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: create compensating "remove" event
ownerIDCaptured := file.OwnerID
actualTotalSizeCaptured := actualTotalSize
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event")
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, actualTotalSizeCaptured, "remove")
})
//
// STEP 13: Update daily storage usage (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: file.OwnerID,
UsageDay: &today,
TotalBytes: actualTotalSize,
AddBytes: actualTotalSize,
RemoveBytes: 0,
IsIncrement: true, // Increment the existing values
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage",
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("file_size", actualTotalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update")
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: -actualTotalSizeCaptured, // Negative to reverse
AddBytes: 0,
RemoveBytes: actualTotalSizeCaptured,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("File upload completed successfully with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.String("collection_id", file.CollectionID.String()),
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("actual_file_size", actualFileSize),
zap.Int64("actual_thumbnail_size", actualThumbnailSize),
zap.Int64("storage_adjustment", storageAdjustment))
return &CompleteFileUploadResponseDTO{
File: mapFileToDTO(file),
Success: true,
Message: "File upload completed successfully with storage quota updated",
ActualFileSize: actualFileSize,
ActualThumbnailSize: actualThumbnailSize,
UploadVerified: true,
ThumbnailVerified: thumbnailVerified,
StorageAdjustment: storageAdjustment,
}, nil
}

View file

@ -0,0 +1,395 @@
// monorepo/cloud/backend/internal/maplefile/service/file/create_pending_file.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreatePendingFileRequestDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
// Optional: expected file size for validation (in bytes)
ExpectedFileSizeInBytes int64 `json:"expected_file_size_in_bytes,omitempty"`
// Optional: expected thumbnail size for validation (in bytes)
ExpectedThumbnailSizeInBytes int64 `json:"expected_thumbnail_size_in_bytes,omitempty"`
// Optional: content type for file upload validation (e.g., "image/jpeg", "video/mp4")
// Required for album uploads to enforce photo/video restrictions
ContentType string `json:"content_type,omitempty"`
// Optional: tag IDs to embed in file at creation time
TagIDs []gocql.UUID `json:"tag_ids,omitempty"`
}
type FileResponseDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
OwnerID gocql.UUID `json:"owner_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedThumbnailSizeInBytes int64 `json:"encrypted_thumbnail_size_in_bytes"`
Tags []dom_tag.EmbeddedTag `json:"tags"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
TombstoneVersion uint64 `json:"tombstone_version"`
TombstoneExpiry time.Time `json:"tombstone_expiry"`
}
type CreatePendingFileResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
UploadURLExpirationTime time.Time `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type CreatePendingFileService interface {
Execute(ctx context.Context, req *CreatePendingFileRequestDTO) (*CreatePendingFileResponseDTO, error)
}
type createPendingFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getCollectionUseCase uc_collection.GetCollectionUseCase
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
tagRepo dom_tag.Repository
fileValidator *FileValidator
}
func NewCreatePendingFileService(
config *config.Configuration,
logger *zap.Logger,
getCollectionUseCase uc_collection.GetCollectionUseCase,
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase,
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase,
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
tagRepo dom_tag.Repository,
) CreatePendingFileService {
logger = logger.Named("CreatePendingFileService")
return &createPendingFileServiceImpl{
config: config,
logger: logger,
getCollectionUseCase: getCollectionUseCase,
checkCollectionAccessUseCase: checkCollectionAccessUseCase,
checkFileExistsUseCase: checkFileExistsUseCase,
createMetadataUseCase: createMetadataUseCase,
generatePresignedUploadURLUseCase: generatePresignedUploadURLUseCase,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
tagRepo: tagRepo,
fileValidator: NewFileValidator(),
}
}
func (svc *createPendingFileServiceImpl) Execute(ctx context.Context, req *CreatePendingFileRequestDTO) (*CreatePendingFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File creation details are required")
}
e := make(map[string]string)
if req.ID.String() == "" {
e["id"] = "Client-side generated ID is required"
}
doesExist, err := svc.checkFileExistsUseCase.Execute(req.ID)
if err != nil {
e["id"] = fmt.Sprintf("Client-side generated ID causes error: %v", req.ID)
}
if doesExist {
e["id"] = "Client-side generated ID already exists"
}
if req.CollectionID.String() == "" {
e["collection_id"] = "Collection ID is required"
}
if req.EncryptedMetadata == "" {
e["encrypted_metadata"] = "Encrypted metadata is required"
}
if req.EncryptedFileKey.Ciphertext == nil || len(req.EncryptedFileKey.Ciphertext) == 0 {
e["encrypted_file_key"] = "Encrypted file key is required"
}
if req.EncryptionVersion == "" {
e["encryption_version"] = "Encryption version is required"
}
if req.EncryptedHash == "" {
e["encrypted_hash"] = "Encrypted hash is required"
}
if req.ExpectedFileSizeInBytes <= 0 {
e["expected_file_size_in_bytes"] = "Expected file size must be greater than 0"
}
if len(e) != 0 {
svc.logger.Warn("⚠️ Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("❌ Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check storage quota BEFORE creating file
//
totalExpectedSize := req.ExpectedFileSizeInBytes + req.ExpectedThumbnailSizeInBytes
err = svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userID, totalExpectedSize)
if err != nil {
svc.logger.Warn("⚠️ Storage quota check failed",
zap.String("user_id", userID.String()),
zap.Int64("requested_size", totalExpectedSize),
zap.Error(err))
return nil, err // This will be a proper HTTP error from the quota helper
}
svc.logger.Info("✅ Storage quota reserved successfully",
zap.String("user_id", userID.String()),
zap.Int64("reserved_size", totalExpectedSize))
//
// STEP 4: Check if user has write access to the collection
//
hasAccess, err := svc.checkCollectionAccessUseCase.Execute(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after collection access check error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
// Release reserved quota on access denied
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after access denied", zap.Error(releaseErr))
}
svc.logger.Warn("⚠️ Unauthorized file creation attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to create files in this collection")
}
//
// STEP 5: Get collection details and validate file upload
//
// CWE-434: Unrestricted Upload of File with Dangerous Type
// OWASP A04:2021: Insecure Design - File upload validation
collection, err := svc.getCollectionUseCase.Execute(ctx, req.CollectionID)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after collection retrieval error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to get collection details",
zap.Error(err),
zap.Any("collection_id", req.CollectionID))
return nil, err
}
// Validate file upload based on collection type
if err := svc.fileValidator.ValidateFileUpload(
collection.CollectionType,
req.ExpectedFileSizeInBytes,
req.ExpectedThumbnailSizeInBytes,
req.ContentType,
); err != nil {
// Release reserved quota on validation error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after validation error", zap.Error(releaseErr))
}
svc.logger.Warn("⚠️ File upload validation failed",
zap.Error(err),
zap.String("collection_type", collection.CollectionType),
zap.Int64("file_size", req.ExpectedFileSizeInBytes),
zap.String("content_type", req.ContentType))
return nil, httperror.NewForBadRequestWithSingleField("file", err.Error())
}
svc.logger.Info("✅ File upload validated successfully",
zap.String("collection_type", collection.CollectionType),
zap.Int64("file_size", req.ExpectedFileSizeInBytes),
zap.String("content_type", req.ContentType))
//
// STEP 6: Generate storage paths.
//
storagePath := generateStoragePath(userID.String(), req.ID.String())
thumbnailStoragePath := generateThumbnailStoragePath(userID.String(), req.ID.String())
//
// STEP 6: Generate presigned upload URLs
//
uploadURLDuration := 1 * time.Hour // URLs valid for 1 hour
expirationTime := time.Now().Add(uploadURLDuration)
presignedUploadURL, err := svc.generatePresignedUploadURLUseCase.Execute(ctx, storagePath, uploadURLDuration)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after presigned URL generation error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to generate presigned upload URL",
zap.Any("error", err),
zap.Any("file_id", req.ID),
zap.String("storage_path", storagePath))
return nil, err
}
// Generate thumbnail upload URL (optional)
var presignedThumbnailURL string
if req.ExpectedThumbnailSizeInBytes > 0 {
presignedThumbnailURL, err = svc.generatePresignedUploadURLUseCase.Execute(ctx, thumbnailStoragePath, uploadURLDuration)
if err != nil {
svc.logger.Warn("⚠️ Failed to generate thumbnail presigned upload URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.ID),
zap.String("thumbnail_storage_path", thumbnailStoragePath))
}
}
//
// STEP 7: Look up and embed tags if TagIDs were provided
//
var embeddedTags []dom_tag.EmbeddedTag
if len(req.TagIDs) > 0 {
svc.logger.Debug("🏷️ Looking up tags to embed in file",
zap.Int("tagCount", len(req.TagIDs)))
for _, tagID := range req.TagIDs {
tagObj, err := svc.tagRepo.GetByID(ctx, tagID)
if err != nil {
svc.logger.Warn("Failed to get tag for embedding, skipping",
zap.String("tagID", tagID.String()),
zap.Error(err))
continue
}
// Verify tag belongs to the user
if tagObj.UserID != userID {
svc.logger.Warn("Tag does not belong to user, skipping",
zap.String("tagID", tagID.String()),
zap.String("userID", userID.String()))
continue
}
embeddedTags = append(embeddedTags, *tagObj.ToEmbeddedTag())
}
svc.logger.Info("✅ Tags embedded in file",
zap.Int("embeddedCount", len(embeddedTags)),
zap.Int("requestedCount", len(req.TagIDs)))
}
//
// STEP 8: Create pending file metadata record
//
now := time.Now()
file := &dom_file.File{
ID: req.ID,
CollectionID: req.CollectionID,
OwnerID: userID,
EncryptedMetadata: req.EncryptedMetadata,
EncryptedFileKey: req.EncryptedFileKey,
EncryptionVersion: req.EncryptionVersion,
EncryptedHash: req.EncryptedHash,
EncryptedFileObjectKey: storagePath,
EncryptedFileSizeInBytes: req.ExpectedFileSizeInBytes, // Will be updated when upload completes
EncryptedThumbnailObjectKey: thumbnailStoragePath,
EncryptedThumbnailSizeInBytes: req.ExpectedThumbnailSizeInBytes, // Will be updated when upload completes
Tags: embeddedTags,
CreatedAt: now,
CreatedByUserID: userID,
ModifiedAt: now,
ModifiedByUserID: userID,
Version: 1, // File creation always starts mutation version at 1.
State: dom_file.FileStatePending, // File creation always starts state in a pending upload.
}
err = svc.createMetadataUseCase.Execute(file)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after metadata creation error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to create pending file metadata",
zap.Any("error", err),
zap.Any("file_id", req.ID))
return nil, err
}
//
// STEP 9: Prepare response
//
response := &CreatePendingFileResponseDTO{
File: mapFileToDTO(file),
PresignedUploadURL: presignedUploadURL,
PresignedThumbnailURL: presignedThumbnailURL,
UploadURLExpirationTime: expirationTime,
Success: true,
Message: "Pending file created successfully. Storage quota reserved. Use the presigned URL to upload your file.",
}
svc.logger.Info("✅ Pending file created successfully with quota reservation",
zap.Any("file_id", req.ID),
zap.Any("collection_id", req.CollectionID),
zap.Any("owner_id", userID),
zap.String("storage_path", storagePath),
zap.Int64("reserved_size", totalExpectedSize),
zap.Time("url_expiration", expirationTime))
return response, nil
}
// Helper function to generate consistent storage path
func generateStoragePath(ownerID, fileID string) string {
return fmt.Sprintf("users/%s/files/%s", ownerID, fileID)
}
// Helper function to generate consistent thumbnail storage path
func generateThumbnailStoragePath(ownerID, fileID string) string {
return fmt.Sprintf("users/%s/files/%s_thumb", ownerID, fileID)
}

View file

@ -0,0 +1,386 @@
// monorepo/cloud/backend/internal/maplefile/service/file/delete_multiple.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type DeleteMultipleFilesRequestDTO struct {
FileIDs []gocql.UUID `json:"file_ids"`
}
type DeleteMultipleFilesResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
DeletedCount int `json:"deleted_count"`
SkippedCount int `json:"skipped_count"`
TotalRequested int `json:"total_requested"`
}
type DeleteMultipleFilesService interface {
Execute(ctx context.Context, req *DeleteMultipleFilesRequestDTO) (*DeleteMultipleFilesResponseDTO, error)
}
type deleteMultipleFilesServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewDeleteMultipleFilesService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase,
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) DeleteMultipleFilesService {
logger = logger.Named("DeleteMultipleFilesService")
return &deleteMultipleFilesServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataByIDsUseCase: getMetadataByIDsUseCase,
deleteMetadataManyUseCase: deleteMetadataManyUseCase,
deleteMultipleDataUseCase: deleteMultipleDataUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *deleteMultipleFilesServiceImpl) Execute(ctx context.Context, req *DeleteMultipleFilesRequestDTO) (*DeleteMultipleFilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File IDs are required")
}
if req.FileIDs == nil || len(req.FileIDs) == 0 {
svc.logger.Warn("Empty file IDs provided")
return nil, httperror.NewForBadRequestWithSingleField("file_ids", "File IDs are required")
}
// Validate individual file IDs
e := make(map[string]string)
for i, fileID := range req.FileIDs {
if fileID.String() == "" {
e[fmt.Sprintf("file_ids[%d]", i)] = "File ID is required"
}
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata for all files
//
files, err := svc.getMetadataByIDsUseCase.Execute(req.FileIDs)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_ids", req.FileIDs))
return nil, err
}
//
// STEP 4: Group files by collection to optimize permission checks
//
filesByCollection := make(map[gocql.UUID][]*dom_file.File)
for _, file := range files {
filesByCollection[file.CollectionID] = append(filesByCollection[file.CollectionID], file)
}
//
// STEP 5: Pre-fetch collection access permissions (eliminates N+1 query)
//
collectionAccess := make(map[gocql.UUID]bool)
for collectionID := range filesByCollection {
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, collectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Warn("Failed to check access for collection",
zap.Any("error", err),
zap.Any("collection_id", collectionID))
collectionAccess[collectionID] = false
continue
}
collectionAccess[collectionID] = hasAccess
}
//
// STEP 6: Filter files that the user has permission to delete and track storage by owner
//
var deletableFiles []*dom_file.File
var storagePaths []string
skippedCount := 0
storageByOwner := make(map[gocql.UUID]int64) // Track total storage to release per owner
filesPerCollection := make(map[gocql.UUID]int) // Track files to delete per collection for count updates
for _, file := range files {
// Use pre-fetched access permission
hasAccess := collectionAccess[file.CollectionID]
if !hasAccess {
svc.logger.Warn("User doesn't have permission to delete file, skipping",
zap.Any("user_id", userID),
zap.Any("file_id", file.ID),
zap.Any("collection_id", file.CollectionID))
skippedCount++
continue
}
// Check valid transitions.
if err := dom_collection.IsValidStateTransition(file.State, dom_file.FileStateDeleted); err != nil {
svc.logger.Warn("Invalid file state transition",
zap.Any("user_id", userID),
zap.Error(err))
skippedCount++
continue
}
deletableFiles = append(deletableFiles, file)
storagePaths = append(storagePaths, file.EncryptedFileObjectKey)
// Add thumbnail paths if they exist
if file.EncryptedThumbnailObjectKey != "" {
storagePaths = append(storagePaths, file.EncryptedThumbnailObjectKey)
}
// Track storage by owner for active files
if file.State == dom_file.FileStateActive {
totalFileSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
storageByOwner[file.OwnerID] += totalFileSize
// Track files per collection for count updates
filesPerCollection[file.CollectionID]++
}
}
if len(deletableFiles) == 0 {
return &DeleteMultipleFilesResponseDTO{
Success: true,
Message: "No files could be deleted due to permission restrictions",
DeletedCount: 0,
SkippedCount: len(req.FileIDs),
TotalRequested: len(req.FileIDs),
}, nil
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("delete-multiple-files", svc.logger)
svc.logger.Info("Starting multiple file deletion with SAGA protection",
zap.Int("deletable_files_count", len(deletableFiles)),
zap.Int("skipped_count", skippedCount),
zap.Int("total_requested", len(req.FileIDs)))
// Note: Version tracking is not needed for hard delete since the file is being
// completely removed. Version tracking is handled in SoftDeleteFileService for
// soft deletes where tombstone records are maintained.
//
// STEP 7: Delete file metadata (SAGA protected)
//
deletableFileIDs := make([]gocql.UUID, len(deletableFiles))
deletableFilesCaptured := make([]*dom_file.File, len(deletableFiles))
for i, file := range deletableFiles {
deletableFileIDs[i] = file.ID
// Deep copy for compensation
fileCopy := *file
deletableFilesCaptured[i] = &fileCopy
}
err = svc.deleteMetadataManyUseCase.Execute(deletableFileIDs)
if err != nil {
svc.logger.Error("Failed to delete file metadata",
zap.Error(err),
zap.Int("file_count", len(deletableFileIDs)))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: This is a hard delete, so we cannot easily restore
// The compensation logs the failure - manual intervention may be required
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: hard delete cannot be automatically reversed",
zap.Int("deleted_file_count", len(deletableFilesCaptured)),
zap.String("note", "Manual restoration from backup may be required"))
// For hard delete, we can't restore deleted metadata without backup
// This compensation serves as an audit trail
return nil
})
//
// STEP 8: Update file counts for affected collections (SAGA protected)
//
for collectionID, fileCount := range filesPerCollection {
if fileCount > 0 {
// Decrement the file count for this collection
for i := 0; i < fileCount; i++ {
err = svc.collectionRepo.DecrementFileCount(ctx, collectionID)
if err != nil {
svc.logger.Error("Failed to decrement file count for collection",
zap.String("collection_id", collectionID.String()),
zap.Int("file_count", fileCount),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
}
// Register compensation: increment the count back
collectionIDCaptured := collectionID
fileCountCaptured := fileCount
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file count",
zap.String("collection_id", collectionIDCaptured.String()),
zap.Int("file_count", fileCountCaptured))
for i := 0; i < fileCountCaptured; i++ {
if err := svc.collectionRepo.IncrementFileCount(ctx, collectionIDCaptured); err != nil {
svc.logger.Error("Failed to restore file count during compensation",
zap.String("collection_id", collectionIDCaptured.String()),
zap.Error(err))
return err
}
}
return nil
})
}
}
//
// STEP 9: Create storage usage events and update daily usage for each owner (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
for ownerID, totalSize := range storageByOwner {
if totalSize > 0 {
// Create storage usage event (SAGA protected)
err = svc.createStorageUsageEventUseCase.Execute(ctx, ownerID, totalSize, "remove")
if err != nil {
svc.logger.Error("Failed to create storage usage event for bulk deletion",
zap.String("owner_id", ownerID.String()),
zap.Int64("total_size", totalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: create compensating "add" event
ownerIDCaptured := ownerID
totalSizeCaptured := totalSize
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event",
zap.String("owner_id", ownerIDCaptured.String()))
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, totalSizeCaptured, "add")
})
// Update daily storage usage (SAGA protected)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerID,
UsageDay: &today,
TotalBytes: -totalSize, // Negative because we're removing
AddBytes: 0,
RemoveBytes: totalSize,
IsIncrement: true, // Increment the existing values
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage for bulk deletion",
zap.String("owner_id", ownerID.String()),
zap.Int64("total_size", totalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update",
zap.String("owner_id", ownerIDCaptured.String()))
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: totalSizeCaptured, // Positive to reverse
AddBytes: totalSizeCaptured,
RemoveBytes: 0,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
}
}
//
// STEP 10: Delete S3 objects
//
if len(storagePaths) > 0 {
svc.logger.Info("Deleting S3 objects for multiple files",
zap.Int("s3_objects_count", len(storagePaths)))
if err := svc.deleteMultipleDataUseCase.Execute(storagePaths); err != nil {
// Log but don't fail - S3 deletion is best effort after metadata is deleted
svc.logger.Error("Failed to delete some S3 objects (continuing anyway)",
zap.Error(err),
zap.Int("s3_objects_count", len(storagePaths)))
} else {
svc.logger.Info("Successfully deleted all S3 objects",
zap.Int("s3_objects_deleted", len(storagePaths)))
}
}
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("Multiple files deleted successfully with SAGA protection",
zap.Int("deleted_count", len(deletableFiles)),
zap.Int("skipped_count", skippedCount),
zap.Int("total_requested", len(req.FileIDs)),
zap.String("user_id", userID.String()),
zap.Int("affected_owners", len(storageByOwner)),
zap.Int("s3_objects_deleted", len(storagePaths)))
return &DeleteMultipleFilesResponseDTO{
Success: true,
Message: fmt.Sprintf("Successfully deleted %d files", len(deletableFiles)),
DeletedCount: len(deletableFiles),
SkippedCount: skippedCount,
TotalRequested: len(req.FileIDs),
}, nil
}

View file

@ -0,0 +1,188 @@
package file
import (
"fmt"
"strings"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
)
// File size limits (in bytes)
const (
// MaxFileSizeGeneral is the maximum file size for general folders (500MB)
MaxFileSizeGeneral = 500 * 1024 * 1024
// MaxFileSizeAlbum is the maximum file size for album uploads (100MB)
// Albums are for photos/videos, so we use a more restrictive limit
MaxFileSizeAlbum = 100 * 1024 * 1024
// MaxThumbnailSize is the maximum thumbnail size (10MB)
MaxThumbnailSize = 10 * 1024 * 1024
)
// Allowed content types for albums (photos and videos only)
var AlbumAllowedContentTypes = []string{
// Image formats
"image/jpeg",
"image/jpg",
"image/png",
"image/gif",
"image/webp",
"image/heic",
"image/heif",
"image/bmp",
"image/tiff",
"image/svg+xml",
// Video formats
"video/mp4",
"video/mpeg",
"video/quicktime", // .mov files
"video/x-msvideo", // .avi files
"video/x-matroska", // .mkv files
"video/webm",
"video/3gpp",
"video/x-flv",
}
// FileValidator provides file upload validation based on collection type
type FileValidator struct{}
// NewFileValidator creates a new file validator
func NewFileValidator() *FileValidator {
return &FileValidator{}
}
// ValidateFileUpload validates a file upload based on collection type and file properties
// CWE-434: Unrestricted Upload of File with Dangerous Type
// OWASP A01:2021: Broken Access Control - File upload restrictions
func (v *FileValidator) ValidateFileUpload(
collectionType string,
fileSize int64,
thumbnailSize int64,
contentType string,
) error {
// Validate file size based on collection type
if err := v.validateFileSize(collectionType, fileSize); err != nil {
return err
}
// Validate thumbnail size if provided
if thumbnailSize > 0 {
if err := v.validateThumbnailSize(thumbnailSize); err != nil {
return err
}
}
// Validate content type for albums (photos/videos only)
if collectionType == dom_collection.CollectionTypeAlbum {
if err := v.validateContentType(contentType); err != nil {
return err
}
}
// For folders (non-albums), no content-type restrictions
// Users can upload any file type to regular folders
return nil
}
// validateFileSize checks if the file size is within limits
func (v *FileValidator) validateFileSize(collectionType string, fileSize int64) error {
if fileSize <= 0 {
return fmt.Errorf("file size must be greater than 0")
}
var maxSize int64
var collectionTypeName string
if collectionType == dom_collection.CollectionTypeAlbum {
maxSize = MaxFileSizeAlbum
collectionTypeName = "album"
} else {
maxSize = MaxFileSizeGeneral
collectionTypeName = "folder"
}
if fileSize > maxSize {
return fmt.Errorf(
"file size (%s) exceeds maximum allowed size for %s (%s)",
formatBytes(fileSize),
collectionTypeName,
formatBytes(maxSize),
)
}
return nil
}
// validateThumbnailSize checks if the thumbnail size is within limits
func (v *FileValidator) validateThumbnailSize(thumbnailSize int64) error {
if thumbnailSize <= 0 {
return nil // Thumbnail is optional
}
if thumbnailSize > MaxThumbnailSize {
return fmt.Errorf(
"thumbnail size (%s) exceeds maximum allowed size (%s)",
formatBytes(thumbnailSize),
formatBytes(MaxThumbnailSize),
)
}
return nil
}
// validateContentType checks if the content type is allowed for albums
func (v *FileValidator) validateContentType(contentType string) error {
if contentType == "" {
return fmt.Errorf("content type is required for album uploads")
}
// Normalize content type (lowercase and trim)
normalizedType := strings.ToLower(strings.TrimSpace(contentType))
// Check if content type is in allowed list
for _, allowedType := range AlbumAllowedContentTypes {
if normalizedType == allowedType {
return nil
}
}
return fmt.Errorf(
"content type '%s' is not allowed in albums. Only photos and videos are permitted",
contentType,
)
}
// GetAllowedContentTypes returns the list of allowed content types for albums
func (v *FileValidator) GetAllowedContentTypes() []string {
return AlbumAllowedContentTypes
}
// GetMaxFileSize returns the maximum file size for a collection type
func (v *FileValidator) GetMaxFileSize(collectionType string) int64 {
if collectionType == dom_collection.CollectionTypeAlbum {
return MaxFileSizeAlbum
}
return MaxFileSizeGeneral
}
// GetMaxThumbnailSize returns the maximum thumbnail size
func (v *FileValidator) GetMaxThumbnailSize() int64 {
return MaxThumbnailSize
}
// formatBytes formats byte size into human-readable format
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View file

@ -0,0 +1,436 @@
package file
import (
"strings"
"testing"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
)
// TestValidateFileUpload_FolderValidCases tests valid folder uploads
func TestValidateFileUpload_FolderValidCases(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
contentType string
}{
{
name: "valid document upload to folder",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 0,
contentType: "application/pdf",
},
{
name: "valid large file to folder",
fileSize: 500 * 1024 * 1024, // 500MB (max)
thumbnailSize: 0,
contentType: "application/zip",
},
{
name: "valid executable to folder",
fileSize: 50 * 1024 * 1024, // 50MB
thumbnailSize: 0,
contentType: "application/x-executable",
},
{
name: "valid image with thumbnail to folder",
fileSize: 20 * 1024 * 1024, // 20MB
thumbnailSize: 5 * 1024 * 1024, // 5MB
contentType: "image/png",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
tt.fileSize,
tt.thumbnailSize,
tt.contentType,
)
if err != nil {
t.Errorf("Expected valid folder upload, got error: %v", err)
}
})
}
}
// TestValidateFileUpload_AlbumValidCases tests valid album uploads
func TestValidateFileUpload_AlbumValidCases(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
contentType string
}{
{
name: "valid JPEG image to album",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 1 * 1024 * 1024, // 1MB
contentType: "image/jpeg",
},
{
name: "valid PNG image to album",
fileSize: 20 * 1024 * 1024, // 20MB
thumbnailSize: 2 * 1024 * 1024, // 2MB
contentType: "image/png",
},
{
name: "valid MP4 video to album",
fileSize: 100 * 1024 * 1024, // 100MB (max)
thumbnailSize: 5 * 1024 * 1024, // 5MB
contentType: "video/mp4",
},
{
name: "valid HEIC image to album",
fileSize: 15 * 1024 * 1024, // 15MB
thumbnailSize: 0,
contentType: "image/heic",
},
{
name: "valid WebP image to album",
fileSize: 8 * 1024 * 1024, // 8MB
thumbnailSize: 500 * 1024, // 500KB
contentType: "image/webp",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
tt.fileSize,
tt.thumbnailSize,
tt.contentType,
)
if err != nil {
t.Errorf("Expected valid album upload, got error: %v", err)
}
})
}
}
// TestValidateFileUpload_FolderSizeLimits tests folder size limit enforcement
func TestValidateFileUpload_FolderSizeLimits(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
expectError bool
errorContains string
}{
{
name: "file exceeds folder limit",
fileSize: 501 * 1024 * 1024, // 501MB (over limit)
thumbnailSize: 0,
expectError: true,
errorContains: "exceeds maximum allowed size for folder",
},
{
name: "file at folder limit",
fileSize: 500 * 1024 * 1024, // 500MB (exact limit)
thumbnailSize: 0,
expectError: false,
},
{
name: "thumbnail exceeds limit",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 11 * 1024 * 1024, // 11MB (over limit)
expectError: true,
errorContains: "thumbnail size",
},
{
name: "zero file size",
fileSize: 0,
thumbnailSize: 0,
expectError: true,
errorContains: "must be greater than 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
tt.fileSize,
tt.thumbnailSize,
"application/pdf",
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_AlbumSizeLimits tests album size limit enforcement
func TestValidateFileUpload_AlbumSizeLimits(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
expectError bool
errorContains string
}{
{
name: "file exceeds album limit",
fileSize: 101 * 1024 * 1024, // 101MB (over limit)
expectError: true,
errorContains: "exceeds maximum allowed size for album",
},
{
name: "file at album limit",
fileSize: 100 * 1024 * 1024, // 100MB (exact limit)
expectError: false,
},
{
name: "file below album limit",
fileSize: 50 * 1024 * 1024, // 50MB
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
tt.fileSize,
0,
"image/jpeg",
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_AlbumContentTypeRestrictions tests album content type validation
func TestValidateFileUpload_AlbumContentTypeRestrictions(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
contentType string
expectError bool
errorContains string
}{
{
name: "valid JPEG",
contentType: "image/jpeg",
expectError: false,
},
{
name: "valid PNG",
contentType: "image/png",
expectError: false,
},
{
name: "valid MP4",
contentType: "video/mp4",
expectError: false,
},
{
name: "invalid PDF",
contentType: "application/pdf",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "invalid ZIP",
contentType: "application/zip",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "invalid executable",
contentType: "application/x-executable",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "empty content type",
contentType: "",
expectError: true,
errorContains: "content type is required",
},
{
name: "case insensitive IMAGE/JPEG",
contentType: "IMAGE/JPEG",
expectError: false,
},
{
name: "content type with extra spaces",
contentType: " image/png ",
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
10*1024*1024, // 10MB
0,
tt.contentType,
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_FolderNoContentTypeRestrictions tests that folders allow any content type
func TestValidateFileUpload_FolderNoContentTypeRestrictions(t *testing.T) {
validator := NewFileValidator()
contentTypes := []string{
"application/pdf",
"application/zip",
"application/x-executable",
"text/plain",
"application/json",
"application/octet-stream",
"image/jpeg", // Photos are also allowed in folders
"video/mp4", // Videos are also allowed in folders
"", // Even empty content type is OK for folders
}
for _, contentType := range contentTypes {
t.Run("folder allows "+contentType, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
10*1024*1024, // 10MB
0,
contentType,
)
if err != nil {
t.Errorf("Expected folder to allow content type '%s', got error: %v", contentType, err)
}
})
}
}
// TestGetMaxFileSize tests the GetMaxFileSize helper method
func TestGetMaxFileSize(t *testing.T) {
validator := NewFileValidator()
folderMax := validator.GetMaxFileSize(dom_collection.CollectionTypeFolder)
if folderMax != MaxFileSizeGeneral {
t.Errorf("Expected folder max size %d, got %d", MaxFileSizeGeneral, folderMax)
}
albumMax := validator.GetMaxFileSize(dom_collection.CollectionTypeAlbum)
if albumMax != MaxFileSizeAlbum {
t.Errorf("Expected album max size %d, got %d", MaxFileSizeAlbum, albumMax)
}
}
// TestGetMaxThumbnailSize tests the GetMaxThumbnailSize helper method
func TestGetMaxThumbnailSize(t *testing.T) {
validator := NewFileValidator()
maxThumb := validator.GetMaxThumbnailSize()
if maxThumb != MaxThumbnailSize {
t.Errorf("Expected max thumbnail size %d, got %d", MaxThumbnailSize, maxThumb)
}
}
// TestGetAllowedContentTypes tests the GetAllowedContentTypes helper method
func TestGetAllowedContentTypes(t *testing.T) {
validator := NewFileValidator()
allowedTypes := validator.GetAllowedContentTypes()
if len(allowedTypes) == 0 {
t.Error("Expected non-empty list of allowed content types")
}
// Check that common photo/video types are included
expectedTypes := []string{"image/jpeg", "image/png", "video/mp4"}
for _, expected := range expectedTypes {
found := false
for _, allowed := range allowedTypes {
if allowed == expected {
found = true
break
}
}
if !found {
t.Errorf("Expected allowed type '%s' not found in list", expected)
}
}
}
// TestFormatBytes tests the formatBytes helper function
func TestFormatBytes(t *testing.T) {
tests := []struct {
bytes int64
expected string
}{
{bytes: 0, expected: "0 B"},
{bytes: 1023, expected: "1023 B"},
{bytes: 1024, expected: "1.0 KB"},
{bytes: 1024 * 1024, expected: "1.0 MB"},
{bytes: 500 * 1024 * 1024, expected: "500.0 MB"},
{bytes: 1024 * 1024 * 1024, expected: "1.0 GB"},
}
for _, tt := range tests {
result := formatBytes(tt.bytes)
if result != tt.expected {
t.Errorf("formatBytes(%d) = %s, expected %s", tt.bytes, result, tt.expected)
}
}
}
// TestValidateFileUpload_AllAllowedAlbumContentTypes tests all allowed album content types
func TestValidateFileUpload_AllAllowedAlbumContentTypes(t *testing.T) {
validator := NewFileValidator()
for _, contentType := range AlbumAllowedContentTypes {
t.Run("album allows "+contentType, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
10*1024*1024, // 10MB
0,
contentType,
)
if err != nil {
t.Errorf("Expected album to allow content type '%s', got error: %v", contentType, err)
}
})
}
}

View file

@ -0,0 +1,98 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetFileService interface {
Execute(ctx context.Context, fileID gocql.UUID) (*FileResponseDTO, error)
}
type getFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
}
func NewGetFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
) GetFileService {
logger = logger.Named("GetFileService")
return &getFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
}
}
func (svc *getFileServiceImpl) Execute(ctx context.Context, fileID gocql.UUID) (*FileResponseDTO, error) {
//
// STEP 1: Validation
//
if fileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(fileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", fileID))
return nil, err
}
//
// STEP 4: Check if user has access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file access attempt",
zap.Any("user_id", userID),
zap.Any("file_id", fileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to access this file")
}
//
// STEP 5: Map domain model to response DTO
//
response := mapFileToDTO(file)
return response, nil
}

View file

@ -0,0 +1,165 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get_presigned_download_url.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedDownloadURLRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
URLDuration time.Duration `json:"url_duration,omitempty"` // Optional, defaults to 1 hour
}
type GetPresignedDownloadURLResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedDownloadURL string `json:"presigned_download_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
DownloadURLExpirationTime time.Time `json:"download_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetPresignedDownloadURLService interface {
Execute(ctx context.Context, req *GetPresignedDownloadURLRequestDTO) (*GetPresignedDownloadURLResponseDTO, error)
}
type getPresignedDownloadURLServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase
}
func NewGetPresignedDownloadURLService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase,
) GetPresignedDownloadURLService {
logger = logger.Named("GetPresignedDownloadURLService")
return &getPresignedDownloadURLServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
generatePresignedDownloadURLUseCase: generatePresignedDownloadURLUseCase,
}
}
func (svc *getPresignedDownloadURLServiceImpl) Execute(ctx context.Context, req *GetPresignedDownloadURLRequestDTO) (*GetPresignedDownloadURLResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("⚠️ Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
// Set default URL duration if not provided
if req.URLDuration == 0 {
req.URLDuration = 1 * time.Hour
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("🔴 Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("🔴 Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has read access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("🔴 Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("⚠️ Unauthorized presigned download URL request",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to download this file")
}
//
// STEP 5: Generate presigned download URLs
//
expirationTime := time.Now().Add(req.URLDuration)
presignedDownloadURL, err := svc.generatePresignedDownloadURLUseCase.Execute(ctx, file.EncryptedFileObjectKey, req.URLDuration)
if err != nil {
svc.logger.Error("🔴 Failed to generate presigned download URL",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, err
}
// Generate thumbnail download URL if thumbnail path exists
var presignedThumbnailURL string
if file.EncryptedThumbnailObjectKey != "" {
presignedThumbnailURL, err = svc.generatePresignedDownloadURLUseCase.Execute(ctx, file.EncryptedThumbnailObjectKey, req.URLDuration)
if err != nil {
svc.logger.Warn("⚠️ Failed to generate thumbnail presigned download URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
}
}
//
// STEP 6: Prepare response
//
response := &GetPresignedDownloadURLResponseDTO{
File: mapFileToDTO(file),
PresignedDownloadURL: presignedDownloadURL,
PresignedThumbnailURL: presignedThumbnailURL,
DownloadURLExpirationTime: expirationTime,
Success: true,
Message: "Presigned download URLs generated successfully",
}
svc.logger.Info("✅ Presigned download URLs generated successfully",
zap.Any("file_id", req.FileID),
zap.Any("user_id", userID),
zap.Time("url_expiration", expirationTime))
return response, nil
}

View file

@ -0,0 +1,165 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get_presigned_upload_url.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedUploadURLRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
URLDuration time.Duration `json:"url_duration,omitempty"` // Optional, defaults to 1 hour
}
type GetPresignedUploadURLResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
UploadURLExpirationTime time.Time `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetPresignedUploadURLService interface {
Execute(ctx context.Context, req *GetPresignedUploadURLRequestDTO) (*GetPresignedUploadURLResponseDTO, error)
}
type getPresignedUploadURLServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase
}
func NewGetPresignedUploadURLService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
) GetPresignedUploadURLService {
logger = logger.Named("GetPresignedUploadURLService")
return &getPresignedUploadURLServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
generatePresignedUploadURLUseCase: generatePresignedUploadURLUseCase,
}
}
func (svc *getPresignedUploadURLServiceImpl) Execute(ctx context.Context, req *GetPresignedUploadURLRequestDTO) (*GetPresignedUploadURLResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
// Set default URL duration if not provided
if req.URLDuration == 0 {
req.URLDuration = 1 * time.Hour
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized presigned URL request",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to upload to this file")
}
//
// STEP 5: Generate presigned upload URLs
//
expirationTime := time.Now().Add(req.URLDuration)
presignedUploadURL, err := svc.generatePresignedUploadURLUseCase.Execute(ctx, file.EncryptedFileObjectKey, req.URLDuration)
if err != nil {
svc.logger.Error("Failed to generate presigned upload URL",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, err
}
// Generate thumbnail upload URL if thumbnail path exists
var presignedThumbnailURL string
if file.EncryptedThumbnailObjectKey != "" {
presignedThumbnailURL, err = svc.generatePresignedUploadURLUseCase.Execute(ctx, file.EncryptedThumbnailObjectKey, req.URLDuration)
if err != nil {
svc.logger.Warn("Failed to generate thumbnail presigned upload URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
}
}
//
// STEP 6: Prepare response
//
response := &GetPresignedUploadURLResponseDTO{
File: mapFileToDTO(file),
PresignedUploadURL: presignedUploadURL,
PresignedThumbnailURL: presignedThumbnailURL,
UploadURLExpirationTime: expirationTime,
Success: true,
Message: "Presigned upload URLs generated successfully",
}
svc.logger.Info("Presigned upload URLs generated successfully",
zap.Any("file_id", req.FileID),
zap.Any("user_id", userID),
zap.Time("url_expiration", expirationTime))
return response, nil
}

View file

@ -0,0 +1,120 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_collection.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByCollectionRequestDTO struct {
CollectionID gocql.UUID `json:"collection_id"`
}
type FilesResponseDTO struct {
Files []*FileResponseDTO `json:"files"`
}
type ListFilesByCollectionService interface {
Execute(ctx context.Context, req *ListFilesByCollectionRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase
}
func NewListFilesByCollectionService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase,
) ListFilesByCollectionService {
logger = logger.Named("ListFilesByCollectionService")
return &listFilesByCollectionServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getFilesByCollectionUseCase: getFilesByCollectionUseCase,
}
}
func (svc *listFilesByCollectionServiceImpl) Execute(ctx context.Context, req *ListFilesByCollectionRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection ID is required")
}
if req.CollectionID.String() == "" {
svc.logger.Warn("Empty collection ID provided")
return nil, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if user has access to the collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized collection access attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to access this collection")
}
//
// STEP 4: Get files by collection
//
files, err := svc.getFilesByCollectionUseCase.Execute(req.CollectionID)
if err != nil {
svc.logger.Error("Failed to get files by collection",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by collection",
zap.Int("count", len(files)),
zap.Any("collection_id", req.CollectionID))
return response, nil
}

View file

@ -0,0 +1,104 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_created_by_user_id.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByCreatedByUserIDRequestDTO struct {
CreatedByUserID gocql.UUID `json:"created_by_user_id"`
}
type ListFilesByCreatedByUserIDService interface {
Execute(ctx context.Context, req *ListFilesByCreatedByUserIDRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByCreatedByUserIDServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase
}
func NewListFilesByCreatedByUserIDService(
config *config.Configuration,
logger *zap.Logger,
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase,
) ListFilesByCreatedByUserIDService {
logger = logger.Named("ListFilesByCreatedByUserIDService")
return &listFilesByCreatedByUserIDServiceImpl{
config: config,
logger: logger,
getFilesByCreatedByUserIDUseCase: getFilesByCreatedByUserIDUseCase,
}
}
func (svc *listFilesByCreatedByUserIDServiceImpl) Execute(ctx context.Context, req *ListFilesByCreatedByUserIDRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Created by user ID is required")
}
if req.CreatedByUserID.String() == "" {
svc.logger.Warn("Empty created by user ID provided")
return nil, httperror.NewForBadRequestWithSingleField("created_by_user_id", "Created by user ID is required")
}
//
// STEP 2: Get user ID from context (for authorization)
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if the requesting user can access files created by the specified user
// Only allow users to see their own created files for privacy
//
if userID != req.CreatedByUserID {
svc.logger.Warn("Unauthorized attempt to list files created by another user",
zap.Any("requesting_user_id", userID),
zap.Any("created_by_user_id", req.CreatedByUserID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You can only view files you have created")
}
//
// STEP 4: Get files by created_by_user_id
//
files, err := svc.getFilesByCreatedByUserIDUseCase.Execute(req.CreatedByUserID)
if err != nil {
svc.logger.Error("Failed to get files by created_by_user_id",
zap.Any("error", err),
zap.Any("created_by_user_id", req.CreatedByUserID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by created_by_user_id",
zap.Int("count", len(files)),
zap.Any("created_by_user_id", req.CreatedByUserID))
return response, nil
}

View file

@ -0,0 +1,104 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_owner_id.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByOwnerIDRequestDTO struct {
OwnerID gocql.UUID `json:"owner_id"`
}
type ListFilesByOwnerIDService interface {
Execute(ctx context.Context, req *ListFilesByOwnerIDRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByOwnerIDServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase
}
func NewListFilesByOwnerIDService(
config *config.Configuration,
logger *zap.Logger,
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase,
) ListFilesByOwnerIDService {
logger = logger.Named("ListFilesByOwnerIDService")
return &listFilesByOwnerIDServiceImpl{
config: config,
logger: logger,
getFilesByOwnerIDUseCase: getFilesByOwnerIDUseCase,
}
}
func (svc *listFilesByOwnerIDServiceImpl) Execute(ctx context.Context, req *ListFilesByOwnerIDRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Owner ID is required")
}
if req.OwnerID.String() == "" {
svc.logger.Warn("Empty owner ID provided")
return nil, httperror.NewForBadRequestWithSingleField("owner_id", "Owner ID is required")
}
//
// STEP 2: Get user ID from context (for authorization)
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if the requesting user can access files created by the specified user
// Only allow users to see their own created files for privacy
//
if userID != req.OwnerID {
svc.logger.Warn("Unauthorized attempt to list files created by another user",
zap.Any("requesting_user_id", userID),
zap.Any("owner_id", req.OwnerID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You can only view files you have created")
}
//
// STEP 4: Get files by owner_id
//
files, err := svc.getFilesByOwnerIDUseCase.Execute(req.OwnerID)
if err != nil {
svc.logger.Error("Failed to get files by owner_id",
zap.Any("error", err),
zap.Any("owner_id", req.OwnerID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by owner_id",
zap.Int("count", len(files)),
zap.Any("owner_id", req.OwnerID))
return response, nil
}

View file

@ -0,0 +1,225 @@
// cloud/maplefile-backend/internal/maplefile/service/file/list_recent_files.go
package file
import (
"context"
"encoding/base64"
"encoding/json"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
// RecentFileResponseDTO represents a recent file in the response
type RecentFileResponseDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
OwnerID gocql.UUID `json:"owner_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedThumbnailSizeInBytes int64 `json:"encrypted_thumbnail_size_in_bytes"`
Tags []dom_tag.EmbeddedTag `json:"tags"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
}
// ListRecentFilesResponseDTO represents the response for listing recent files
type ListRecentFilesResponseDTO struct {
Files []RecentFileResponseDTO `json:"files"`
NextCursor *string `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more"`
TotalCount int `json:"total_count"`
}
type ListRecentFilesService interface {
Execute(ctx context.Context, cursor *string, limit int64) (*ListRecentFilesResponseDTO, error)
}
type listRecentFilesServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase
}
func NewListRecentFilesService(
config *config.Configuration,
logger *zap.Logger,
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase,
) ListRecentFilesService {
logger = logger.Named("ListRecentFilesService")
return &listRecentFilesServiceImpl{
config: config,
logger: logger,
listRecentFilesUseCase: listRecentFilesUseCase,
}
}
func (svc *listRecentFilesServiceImpl) Execute(ctx context.Context, cursor *string, limit int64) (*ListRecentFilesResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Parse cursor if provided
//
var parsedCursor *dom_file.RecentFilesCursor
if cursor != nil && *cursor != "" {
// Decode base64 cursor
cursorBytes, err := base64.StdEncoding.DecodeString(*cursor)
if err != nil {
svc.logger.Error("Failed to decode cursor",
zap.String("cursor", *cursor),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format")
}
// Parse JSON cursor
var cursorData dom_file.RecentFilesCursor
if err := json.Unmarshal(cursorBytes, &cursorData); err != nil {
svc.logger.Error("Failed to parse cursor",
zap.String("cursor", *cursor),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format")
}
parsedCursor = &cursorData
}
//
// STEP 3: Set default limit if not provided
//
if limit <= 0 {
limit = 30 // Default limit
}
if limit > 100 {
limit = 100 // Max limit
}
svc.logger.Debug("Processing recent files request",
zap.Any("user_id", userID),
zap.Int64("limit", limit),
zap.Any("cursor", parsedCursor))
//
// STEP 4: Call use case to get recent files
//
response, err := svc.listRecentFilesUseCase.Execute(ctx, userID, parsedCursor, limit)
if err != nil {
svc.logger.Error("Failed to get recent files",
zap.Any("user_id", userID),
zap.Error(err))
return nil, err
}
//
// STEP 5: Convert domain response to service DTO
//
files := make([]RecentFileResponseDTO, len(response.Files))
for i, file := range response.Files {
// Deserialize encrypted file key
var encryptedFileKey crypto.EncryptedFileKey
if file.EncryptedFileKey == "" {
svc.logger.Warn("Encrypted file key is empty in database for file",
zap.String("file_id", file.ID.String()))
// Continue with empty key rather than failing entirely
} else if err := json.Unmarshal([]byte(file.EncryptedFileKey), &encryptedFileKey); err != nil {
svc.logger.Warn("Failed to deserialize encrypted file key for file",
zap.String("file_id", file.ID.String()),
zap.Int("encrypted_key_length", len(file.EncryptedFileKey)),
zap.String("encrypted_key_preview", truncateString(file.EncryptedFileKey, 100)),
zap.Error(err))
// Continue with empty key rather than failing entirely
} else if len(encryptedFileKey.Ciphertext) == 0 || len(encryptedFileKey.Nonce) == 0 {
// Deserialization succeeded but resulted in empty ciphertext/nonce
// This can happen if the base64 decoding in custom UnmarshalJSON fails silently
svc.logger.Warn("Encrypted file key deserialized but has empty ciphertext or nonce",
zap.String("file_id", file.ID.String()),
zap.Int("ciphertext_len", len(encryptedFileKey.Ciphertext)),
zap.Int("nonce_len", len(encryptedFileKey.Nonce)),
zap.String("encrypted_key_preview", truncateString(file.EncryptedFileKey, 200)))
} else {
// Successfully deserialized - log for debugging
svc.logger.Debug("Successfully deserialized encrypted file key",
zap.String("file_id", file.ID.String()),
zap.Int("ciphertext_len", len(encryptedFileKey.Ciphertext)),
zap.Int("nonce_len", len(encryptedFileKey.Nonce)),
zap.Int("key_version", encryptedFileKey.KeyVersion))
}
files[i] = RecentFileResponseDTO{
ID: file.ID,
CollectionID: file.CollectionID,
OwnerID: file.OwnerID,
EncryptedMetadata: file.EncryptedMetadata,
EncryptedFileKey: encryptedFileKey,
EncryptionVersion: file.EncryptionVersion,
EncryptedHash: file.EncryptedHash,
EncryptedFileSizeInBytes: file.EncryptedFileSizeInBytes,
EncryptedThumbnailSizeInBytes: file.EncryptedThumbnailSizeInBytes,
Tags: file.Tags,
CreatedAt: file.CreatedAt.Format("2006-01-02T15:04:05Z07:00"),
ModifiedAt: file.ModifiedAt.Format("2006-01-02T15:04:05Z07:00"),
Version: file.Version,
State: file.State,
}
}
//
// STEP 6: Encode next cursor if present
//
var encodedNextCursor *string
if response.NextCursor != nil {
cursorBytes, err := json.Marshal(response.NextCursor)
if err != nil {
svc.logger.Error("Failed to marshal next cursor",
zap.Any("cursor", response.NextCursor),
zap.Error(err))
} else {
cursorStr := base64.StdEncoding.EncodeToString(cursorBytes)
encodedNextCursor = &cursorStr
}
}
//
// STEP 7: Prepare response
//
serviceResponse := &ListRecentFilesResponseDTO{
Files: files,
NextCursor: encodedNextCursor,
HasMore: response.HasMore,
TotalCount: len(files),
}
svc.logger.Info("Successfully served recent files",
zap.Any("user_id", userID),
zap.Int("files_count", len(files)),
zap.Bool("has_more", response.HasMore),
zap.Any("next_cursor", encodedNextCursor))
return serviceResponse, nil
}
// truncateString truncates a string to maxLen characters, appending "..." if truncated
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View file

@ -0,0 +1,143 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_sync_data.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFileSyncDataService interface {
Execute(ctx context.Context, cursor *dom_file.FileSyncCursor, limit int64) (*dom_file.FileSyncResponse, error)
}
type listFileSyncDataServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listFileSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase
collectionRepository dom_collection.CollectionRepository
}
func NewListFileSyncDataService(
config *config.Configuration,
logger *zap.Logger,
listFileSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase,
collectionRepository dom_collection.CollectionRepository,
) ListFileSyncDataService {
logger = logger.Named("ListFileSyncDataService")
return &listFileSyncDataServiceImpl{
config: config,
logger: logger,
listFileSyncDataUseCase: listFileSyncDataUseCase,
collectionRepository: collectionRepository,
}
}
func (svc *listFileSyncDataServiceImpl) Execute(ctx context.Context, cursor *dom_file.FileSyncCursor, limit int64) (*dom_file.FileSyncResponse, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Get accessible collections for the user
//
svc.logger.Debug("Getting accessible collections for file sync",
zap.String("user_id", userID.String()))
// Get collections where user is owner
ownedCollections, err := svc.collectionRepository.GetAllByUserID(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get owned collections",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to get accessible collections")
}
// Get collections shared with user
sharedCollections, err := svc.collectionRepository.GetCollectionsSharedWithUser(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get shared collections",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to get accessible collections")
}
// Combine owned and shared collections
var accessibleCollectionIDs []gocql.UUID
for _, coll := range ownedCollections {
if coll.State == "active" { // Only include active collections
accessibleCollectionIDs = append(accessibleCollectionIDs, coll.ID)
}
}
for _, coll := range sharedCollections {
if coll.State == "active" { // Only include active collections
accessibleCollectionIDs = append(accessibleCollectionIDs, coll.ID)
}
}
svc.logger.Debug("Found accessible collections for file sync",
zap.String("user_id", userID.String()),
zap.Int("owned_count", len(ownedCollections)),
zap.Int("shared_count", len(sharedCollections)),
zap.Int("total_accessible", len(accessibleCollectionIDs)))
// If no accessible collections, return empty response
if len(accessibleCollectionIDs) == 0 {
svc.logger.Info("User has no accessible collections for file sync",
zap.String("user_id", userID.String()))
return &dom_file.FileSyncResponse{
Files: []dom_file.FileSyncItem{},
NextCursor: nil,
HasMore: false,
}, nil
}
//
// STEP 3: List file sync data for accessible collections
//
syncData, err := svc.listFileSyncDataUseCase.Execute(ctx, userID, cursor, limit, accessibleCollectionIDs)
if err != nil {
svc.logger.Error("Failed to list file sync data",
zap.Any("error", err),
zap.String("user_id", userID.String()))
return nil, err
}
if syncData == nil {
svc.logger.Debug("File sync data not found",
zap.String("user_id", userID.String()))
return nil, httperror.NewForNotFoundWithSingleField("message", "File sync results not found")
}
// Log sync data with all fields including EncryptedFileSizeInBytes
svc.logger.Debug("File sync data successfully retrieved",
zap.String("user_id", userID.String()),
zap.Any("next_cursor", syncData.NextCursor),
zap.Int("files_count", len(syncData.Files)))
// Verify each item has all fields populated including EncryptedFileSizeInBytes
for i, item := range syncData.Files {
svc.logger.Debug("Returning file sync item",
zap.Int("index", i),
zap.String("file_id", item.ID.String()),
zap.String("collection_id", item.CollectionID.String()),
zap.Uint64("version", item.Version),
zap.String("state", item.State),
zap.Int64("encrypted_file_size_in_bytes", item.EncryptedFileSizeInBytes))
}
return syncData, nil
}

View file

@ -0,0 +1,178 @@
package file
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// Wire providers for file services
func ProvideGetFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
) GetFileService {
return NewGetFileService(cfg, logger, collectionRepo, getMetadataUseCase)
}
func ProvideUpdateFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) UpdateFileService {
return NewUpdateFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}
func ProvideSoftDeleteFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase,
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
listFilesByOwnerIDService ListFilesByOwnerIDService,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteFileService {
return NewSoftDeleteFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateFileMetadataUseCase, softDeleteMetadataUseCase, hardDeleteMetadataUseCase, deleteDataUseCase, listFilesByOwnerIDService, storageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideDeleteMultipleFilesService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase,
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) DeleteMultipleFilesService {
return NewDeleteMultipleFilesService(cfg, logger, collectionRepo, getMetadataByIDsUseCase, deleteMetadataManyUseCase, deleteMultipleDataUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideListFilesByCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase,
) ListFilesByCollectionService {
return NewListFilesByCollectionService(cfg, logger, collectionRepo, getFilesByCollectionUseCase)
}
func ProvideListFilesByCreatedByUserIDService(
cfg *config.Configuration,
logger *zap.Logger,
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase,
) ListFilesByCreatedByUserIDService {
return NewListFilesByCreatedByUserIDService(cfg, logger, getFilesByCreatedByUserIDUseCase)
}
func ProvideListFilesByOwnerIDService(
cfg *config.Configuration,
logger *zap.Logger,
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase,
) ListFilesByOwnerIDService {
return NewListFilesByOwnerIDService(cfg, logger, getFilesByOwnerIDUseCase)
}
func ProvideListRecentFilesService(
cfg *config.Configuration,
logger *zap.Logger,
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase,
) ListRecentFilesService {
return NewListRecentFilesService(cfg, logger, listRecentFilesUseCase)
}
func ProvideListFileSyncDataService(
cfg *config.Configuration,
logger *zap.Logger,
listSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase,
collectionRepo dom_collection.CollectionRepository,
) ListFileSyncDataService {
return NewListFileSyncDataService(cfg, logger, listSyncDataUseCase, collectionRepo)
}
func ProvideCreatePendingFileService(
cfg *config.Configuration,
logger *zap.Logger,
getCollectionUseCase uc_collection.GetCollectionUseCase,
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase,
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase,
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
tagRepo dom_tag.Repository,
) CreatePendingFileService {
return NewCreatePendingFileService(cfg, logger, getCollectionUseCase, checkCollectionAccessUseCase, checkFileExistsUseCase, createMetadataUseCase, generatePresignedUploadURLUseCase, storageQuotaHelperUseCase, tagRepo)
}
func ProvideCompleteFileUploadService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase,
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) CompleteFileUploadService {
return NewCompleteFileUploadService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase, verifyObjectExistsUseCase, getObjectSizeUseCase, deleteDataUseCase, storageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideGetPresignedUploadURLService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
) GetPresignedUploadURLService {
return NewGetPresignedUploadURLService(cfg, logger, collectionRepo, getMetadataUseCase, generatePresignedUploadURLUseCase)
}
func ProvideGetPresignedDownloadURLService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase,
) GetPresignedDownloadURLService {
return NewGetPresignedDownloadURLService(cfg, logger, collectionRepo, getMetadataUseCase, generatePresignedDownloadURLUseCase)
}
func ProvideArchiveFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) ArchiveFileService {
return NewArchiveFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}
func ProvideRestoreFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) RestoreFileService {
return NewRestoreFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/service/file/restore.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RestoreFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
}
type RestoreFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type RestoreFileService interface {
Execute(ctx context.Context, req *RestoreFileRequestDTO) (*RestoreFileResponseDTO, error)
}
type restoreFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewRestoreFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) RestoreFileService {
logger = logger.Named("RestoreFileService")
return &restoreFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *restoreFileServiceImpl) Execute(ctx context.Context, req *RestoreFileRequestDTO) (*RestoreFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata (including any state for restoration)
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file restore attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to restore this file")
}
//
// STEP 5: Validate state transition
//
err = dom_file.IsValidStateTransition(file.State, dom_file.FileStateActive)
if err != nil {
svc.logger.Warn("Invalid state transition for file restore",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State),
zap.String("target_state", dom_file.FileStateActive),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Restore the file
//
file.State = dom_file.FileStateActive
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to restore file",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
svc.logger.Info("File restored successfully",
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return &RestoreFileResponseDTO{
Success: true,
Message: "File restored successfully",
}, nil
}

View file

@ -0,0 +1,429 @@
// monorepo/cloud/backend/internal/maplefile/service/file/softdelete.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type SoftDeleteFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
ForceHardDelete bool `json:"force_hard_delete"` // Skip tombstone for GDPR right-to-be-forgotten
}
type SoftDeleteFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
ReleasedBytes int64 `json:"released_bytes"` // Amount of storage quota released
}
type SoftDeleteFileService interface {
Execute(ctx context.Context, req *SoftDeleteFileRequestDTO) (*SoftDeleteFileResponseDTO, error)
}
type softDeleteFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase
listFilesByOwnerIDService ListFilesByOwnerIDService
// Storage quota management
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewSoftDeleteFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase,
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
listFilesByOwnerIDService ListFilesByOwnerIDService,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteFileService {
logger = logger.Named("SoftDeleteFileService")
return &softDeleteFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateFileMetadataUseCase: updateFileMetadataUseCase,
softDeleteMetadataUseCase: softDeleteMetadataUseCase,
hardDeleteMetadataUseCase: hardDeleteMetadataUseCase,
deleteDataUseCase: deleteDataUseCase,
listFilesByOwnerIDService: listFilesByOwnerIDService,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *softDeleteFileServiceImpl) Execute(ctx context.Context, req *SoftDeleteFileRequestDTO) (*SoftDeleteFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
svc.logger.Debug("Debugging started, will list all files that belong to the authenticated user")
currentFiles, err := svc.listFilesByOwnerIDService.Execute(ctx, &ListFilesByOwnerIDRequestDTO{OwnerID: userID})
if err != nil {
svc.logger.Error("Failed to list files by owner ID",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
for _, file := range currentFiles.Files {
svc.logger.Debug("File",
zap.Any("id", file.ID))
}
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file deletion attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to delete this file")
}
// Check valid transitions.
if err := dom_file.IsValidStateTransition(file.State, dom_file.FileStateDeleted); err != nil {
svc.logger.Warn("Invalid file state transition",
zap.Any("user_id", userID),
zap.Error(err))
return nil, err
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("soft-delete-file", svc.logger)
//
// STEP 5: Calculate storage space to be released
//
totalFileSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
svc.logger.Info("Starting file soft-delete with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.Int64("file_size", file.EncryptedFileSizeInBytes),
zap.Int64("thumbnail_size", file.EncryptedThumbnailSizeInBytes),
zap.Int64("total_size_to_release", totalFileSize))
//
// STEP 6: Update file metadata with tombstone (SAGA protected)
//
originalState := file.State
originalTombstoneVersion := file.TombstoneVersion
originalTombstoneExpiry := file.TombstoneExpiry
file.State = dom_file.FileStateDeleted
file.Version++
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
file.TombstoneVersion = file.Version
file.TombstoneExpiry = time.Now().Add(30 * 24 * time.Hour)
if err := svc.updateFileMetadataUseCase.Execute(ctx, file); err != nil {
svc.logger.Error("Failed to update file metadata with tombstone",
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: restore original metadata
fileIDCaptured := file.ID
originalStateCaptured := originalState
originalTombstoneVersionCaptured := originalTombstoneVersion
originalTombstoneExpiryCaptured := originalTombstoneExpiry
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata",
zap.String("file_id", fileIDCaptured.String()))
restoredFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
restoredFile.State = originalStateCaptured
restoredFile.TombstoneVersion = originalTombstoneVersionCaptured
restoredFile.TombstoneExpiry = originalTombstoneExpiryCaptured
restoredFile.ModifiedAt = time.Now()
return svc.updateFileMetadataUseCase.Execute(ctx, restoredFile)
})
//
// STEP 7: Delete file metadata record (SAGA protected)
//
if req.ForceHardDelete {
// Hard delete - permanent removal for GDPR right-to-be-forgotten
svc.logger.Info("Performing hard delete (GDPR mode) - no tombstone",
zap.String("file_id", req.FileID.String()))
err = svc.hardDeleteMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to hard-delete file metadata",
zap.Error(err))
saga.Rollback(ctx) // Restores tombstone metadata
return nil, err
}
// No compensation for hard delete - GDPR compliance requires permanent deletion
} else {
// Soft delete - 30-day tombstone (standard deletion)
err = svc.softDeleteMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to soft-delete file metadata",
zap.Error(err))
saga.Rollback(ctx) // Restores tombstone metadata
return nil, err
}
// Register compensation: restore metadata record to active state
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata record to active state",
zap.String("file_id", fileIDCaptured.String()))
// Get the soft-deleted file
deletedFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
// Restore to active state
deletedFile.State = dom_file.FileStateActive
deletedFile.ModifiedAt = time.Now()
deletedFile.Version++
deletedFile.TombstoneVersion = 0
deletedFile.TombstoneExpiry = time.Time{}
return svc.updateFileMetadataUseCase.Execute(ctx, deletedFile)
})
}
//
// STEP 8: Update collection file count (SAGA protected)
//
if originalState == dom_file.FileStateActive {
err = svc.collectionRepo.DecrementFileCount(ctx, file.CollectionID)
if err != nil {
svc.logger.Error("Failed to decrement file count for collection",
zap.String("collection_id", file.CollectionID.String()),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: increment the count back
collectionIDCaptured := file.CollectionID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file count",
zap.String("collection_id", collectionIDCaptured.String()))
return svc.collectionRepo.IncrementFileCount(ctx, collectionIDCaptured)
})
}
//
// STEP 9: Release storage quota (SAGA protected)
//
var releasedBytes int64 = 0
if originalState == dom_file.FileStateActive && totalFileSize > 0 {
err = svc.storageQuotaHelperUseCase.OnFileDeleted(ctx, userID, totalFileSize)
if err != nil {
svc.logger.Error("Failed to release storage quota after file deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores metadata + tombstone
return nil, err
}
// Register compensation: re-reserve the released quota
totalFileSizeCaptured := totalFileSize
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving released storage quota",
zap.Int64("size", totalFileSizeCaptured))
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, totalFileSizeCaptured)
})
releasedBytes = totalFileSize
svc.logger.Info("Storage quota released successfully",
zap.Int64("released_bytes", releasedBytes))
//
// STEP 10: Create storage usage event (SAGA protected)
//
err = svc.createStorageUsageEventUseCase.Execute(ctx, file.OwnerID, totalFileSize, "remove")
if err != nil {
svc.logger.Error("Failed to create storage usage event for deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores quota + metadata
return nil, err
}
// Register compensation: create compensating "add" event
ownerIDCaptured := file.OwnerID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event")
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, totalFileSizeCaptured, "add")
})
//
// STEP 11: Update daily storage usage (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: file.OwnerID,
UsageDay: &today,
TotalBytes: -totalFileSize,
AddBytes: 0,
RemoveBytes: totalFileSize,
IsIncrement: true,
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage for deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores everything
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update")
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: totalFileSizeCaptured, // Positive to reverse
AddBytes: totalFileSizeCaptured,
RemoveBytes: 0,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
} else if originalState == dom_file.FileStatePending {
// For pending files, release the reserved quota (SAGA protected)
err = svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalFileSize)
if err != nil {
svc.logger.Error("Failed to release reserved storage quota for pending file",
zap.Error(err))
saga.Rollback(ctx) // Restores metadata + tombstone
return nil, err
}
// Register compensation: re-reserve the released quota
totalFileSizeCaptured := totalFileSize
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving pending file quota")
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, totalFileSizeCaptured)
})
releasedBytes = totalFileSize
svc.logger.Info("Reserved storage quota released for pending file",
zap.Int64("released_bytes", releasedBytes))
}
//
// STEP 12: Delete S3 objects
//
var storagePaths []string
storagePaths = append(storagePaths, file.EncryptedFileObjectKey)
if file.EncryptedThumbnailObjectKey != "" {
storagePaths = append(storagePaths, file.EncryptedThumbnailObjectKey)
}
svc.logger.Info("Deleting S3 objects for file",
zap.String("file_id", req.FileID.String()),
zap.Int("s3_objects_count", len(storagePaths)))
for _, storagePath := range storagePaths {
if err := svc.deleteDataUseCase.Execute(storagePath); err != nil {
// Log but don't fail - S3 deletion is best effort after metadata is deleted
svc.logger.Error("Failed to delete S3 object (continuing anyway)",
zap.String("storage_path", storagePath),
zap.Error(err))
}
}
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("File deleted successfully with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.String("collection_id", file.CollectionID.String()),
zap.Int64("released_bytes", releasedBytes),
zap.Int("s3_objects_deleted", len(storagePaths)))
return &SoftDeleteFileResponseDTO{
Success: true,
Message: "File deleted successfully",
ReleasedBytes: releasedBytes,
}, nil
}

View file

@ -0,0 +1,178 @@
// monorepo/cloud/backend/internal/maplefile/service/file/update.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type UpdateFileRequestDTO struct {
ID gocql.UUID `json:"id"`
EncryptedMetadata string `json:"encrypted_metadata,omitempty"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key,omitempty"`
EncryptionVersion string `json:"encryption_version,omitempty"`
EncryptedHash string `json:"encrypted_hash,omitempty"`
Version uint64 `json:"version,omitempty"`
}
type UpdateFileService interface {
Execute(ctx context.Context, req *UpdateFileRequestDTO) (*FileResponseDTO, error)
}
type updateFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewUpdateFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) UpdateFileService {
logger = logger.Named("UpdateFileService")
return &updateFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *updateFileServiceImpl) Execute(ctx context.Context, req *UpdateFileRequestDTO) (*FileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File update details are required")
}
if req.ID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get existing file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.ID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.ID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file update attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.ID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to update this file")
}
//
// STEP 5: Check if submitted collection request is in-sync with our backend's collection copy.
//
// Developers note:
// What is the purpose of this check?
// Our server has multiple clients sharing data and hence our backend needs to ensure that the file being updated is the most recent version.
if file.Version != req.Version {
svc.logger.Warn("Outdated collection update attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID),
zap.Any("submitted_version", req.Version),
zap.Any("current_version", file.Version))
return nil, httperror.NewForBadRequestWithSingleField("message", "Collection has been updated since you last fetched it")
}
//
// STEP 6: Update file metadata
//
updated := false
if req.EncryptedMetadata != "" {
file.EncryptedMetadata = req.EncryptedMetadata
updated = true
}
if req.EncryptedFileKey.Ciphertext != nil && len(req.EncryptedFileKey.Ciphertext) > 0 {
file.EncryptedFileKey = req.EncryptedFileKey
updated = true
}
if req.EncryptionVersion != "" {
file.EncryptionVersion = req.EncryptionVersion
updated = true
}
if req.EncryptedHash != "" {
file.EncryptedHash = req.EncryptedHash
updated = true
}
if !updated {
svc.logger.Warn("No fields to update provided")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "At least one field must be provided for update")
}
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
//
// STEP 6: Save updated file
//
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to update file metadata",
zap.Any("error", err),
zap.Any("file_id", file.ID))
return nil, err
}
//
// STEP 7: Map domain model to response DTO
//
response := mapFileToDTO(file)
svc.logger.Debug("File updated successfully",
zap.Any("file_id", file.ID))
return response, nil
}

View file

@ -0,0 +1,28 @@
// monorepo/cloud/backend/internal/maplefile/service/file/utils.go
package file
import (
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
)
// Helper function to map a File domain model to a FileResponseDTO
func mapFileToDTO(file *dom_file.File) *FileResponseDTO {
return &FileResponseDTO{
ID: file.ID,
CollectionID: file.CollectionID,
OwnerID: file.OwnerID,
EncryptedMetadata: file.EncryptedMetadata,
EncryptedFileKey: file.EncryptedFileKey,
EncryptionVersion: file.EncryptionVersion,
EncryptedHash: file.EncryptedHash,
EncryptedFileSizeInBytes: file.EncryptedFileSizeInBytes,
EncryptedThumbnailSizeInBytes: file.EncryptedThumbnailSizeInBytes,
Tags: file.Tags,
CreatedAt: file.CreatedAt,
ModifiedAt: file.ModifiedAt,
Version: file.Version,
State: file.State,
TombstoneVersion: file.TombstoneVersion,
TombstoneExpiry: file.TombstoneExpiry,
}
}

View file

@ -0,0 +1,21 @@
package inviteemail
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/inviteemailratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
)
// ProvideSendInviteEmailService provides the send invite email service for Wire DI
func ProvideSendInviteEmailService(
cfg *config.Config,
logger *zap.Logger,
userRepo dom_user.Repository,
rateLimitRepo inviteemailratelimit.Repository,
emailer mailgun.Emailer,
) SendInviteEmailService {
return NewSendInviteEmailService(cfg, logger, userRepo, rateLimitRepo, emailer)
}

View file

@ -0,0 +1,234 @@
// Package inviteemail provides services for sending invitation emails
// to non-registered users when someone wants to share a collection with them.
package inviteemail
import (
"context"
"fmt"
"strings"
"time"
"github.com/gocql/gocql"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_inviteemail "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/inviteemail"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/repo/inviteemailratelimit"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/emailer/mailgun"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
// SendInviteEmailRequestDTO represents the request to send an invitation email
type SendInviteEmailRequestDTO struct {
Email string `json:"email"`
}
// SendInviteEmailResponseDTO represents the response after sending an invitation email
type SendInviteEmailResponseDTO struct {
Success bool `json:"success"`
RemainingToday int `json:"remaining_invites_today"`
Message string `json:"message"`
}
// SendInviteEmailService defines the interface for sending invitation emails
type SendInviteEmailService interface {
Execute(ctx context.Context, inviterID gocql.UUID, req *SendInviteEmailRequestDTO) (*SendInviteEmailResponseDTO, error)
}
type sendInviteEmailServiceImpl struct {
config *config.Config
logger *zap.Logger
userRepo dom_user.Repository
rateLimitRepo inviteemailratelimit.Repository
emailer mailgun.Emailer
maxEmailsPerDay int
}
// NewSendInviteEmailService creates a new instance of the send invite email service
func NewSendInviteEmailService(
cfg *config.Config,
logger *zap.Logger,
userRepo dom_user.Repository,
rateLimitRepo inviteemailratelimit.Repository,
emailer mailgun.Emailer,
) SendInviteEmailService {
logger = logger.Named("SendInviteEmailService")
// Get max emails per day from config, fallback to default
maxEmails := cfg.InviteEmail.MaxEmailsPerDay
if maxEmails <= 0 {
maxEmails = dom_inviteemail.DefaultMaxInviteEmailsPerDay
}
return &sendInviteEmailServiceImpl{
config: cfg,
logger: logger,
userRepo: userRepo,
rateLimitRepo: rateLimitRepo,
emailer: emailer,
maxEmailsPerDay: maxEmails,
}
}
func (svc *sendInviteEmailServiceImpl) Execute(ctx context.Context, inviterID gocql.UUID, req *SendInviteEmailRequestDTO) (*SendInviteEmailResponseDTO, error) {
//
// STEP 1: Sanitize input
//
req.Email = strings.ToLower(strings.TrimSpace(req.Email))
svc.logger.Debug("Processing invite email request",
zap.String("inviter_id", inviterID.String()),
zap.String("invited_email", validation.MaskEmail(req.Email)))
//
// STEP 2: Validate input
//
e := make(map[string]string)
if req.Email == "" {
e["email"] = "Email is required"
} else if !validation.IsValidEmail(req.Email) {
e["email"] = "Invalid email format"
} else if len(req.Email) > 255 {
e["email"] = "Email is too long"
}
if len(e) != 0 {
svc.logger.Warn("Validation failed", zap.Any("errors", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 3: Get inviter info
//
inviter, err := svc.userRepo.GetByID(ctx, inviterID)
if err != nil {
svc.logger.Error("Failed to get inviter info",
zap.String("inviter_id", inviterID.String()),
zap.Error(err))
return nil, httperror.NewForInternalServerError("Failed to process request")
}
if inviter == nil {
svc.logger.Error("Inviter not found",
zap.String("inviter_id", inviterID.String()))
return nil, httperror.NewForUnauthorizedWithSingleField("user", "User not found")
}
//
// STEP 4: Check rate limit
//
today := time.Now().UTC().Truncate(24 * time.Hour)
dailyCount, err := svc.rateLimitRepo.GetDailyEmailCount(ctx, inviterID, today)
if err != nil {
svc.logger.Warn("Failed to get rate limit count, proceeding with caution",
zap.String("inviter_id", inviterID.String()),
zap.Error(err))
// Fail open but log - don't block users due to rate limit DB issues
dailyCount = 0
}
if dailyCount >= svc.maxEmailsPerDay {
svc.logger.Warn("Rate limit exceeded",
zap.String("inviter_id", inviterID.String()),
zap.Int("daily_count", dailyCount),
zap.Int("max_per_day", svc.maxEmailsPerDay))
return &SendInviteEmailResponseDTO{
Success: false,
RemainingToday: 0,
Message: "Daily invitation limit reached. You can send more invitations tomorrow.",
}, nil
}
//
// STEP 5: Check if recipient already has an account
//
exists, err := svc.userRepo.CheckIfExistsByEmail(ctx, req.Email)
if err != nil {
svc.logger.Error("Failed to check if user exists",
zap.String("email", validation.MaskEmail(req.Email)),
zap.Error(err))
return nil, httperror.NewForInternalServerError("Failed to process request")
}
if exists {
svc.logger.Debug("User already has account",
zap.String("email", validation.MaskEmail(req.Email)))
return &SendInviteEmailResponseDTO{
Success: false,
RemainingToday: svc.maxEmailsPerDay - dailyCount,
Message: "This user already has an account. You can share with them directly.",
}, nil
}
//
// STEP 6: Send invitation email
//
if err := svc.sendInvitationEmail(ctx, inviter.Email, req.Email); err != nil {
svc.logger.Error("Failed to send invitation email",
zap.String("invited_email", validation.MaskEmail(req.Email)),
zap.Error(err))
return nil, httperror.NewForInternalServerError("Failed to send invitation email. Please try again.")
}
//
// STEP 7: Increment rate limit counter
//
if err := svc.rateLimitRepo.IncrementDailyEmailCount(ctx, inviterID, today); err != nil {
svc.logger.Warn("Failed to increment rate limit counter",
zap.String("inviter_id", inviterID.String()),
zap.Error(err))
// Don't fail the request, email was already sent
}
remaining := svc.maxEmailsPerDay - dailyCount - 1
svc.logger.Info("Invitation email sent successfully",
zap.String("inviter_id", inviterID.String()),
zap.String("invited_email", validation.MaskEmail(req.Email)),
zap.Int("remaining_today", remaining))
return &SendInviteEmailResponseDTO{
Success: true,
RemainingToday: remaining,
Message: fmt.Sprintf("Invitation sent to %s", req.Email),
}, nil
}
func (svc *sendInviteEmailServiceImpl) sendInvitationEmail(ctx context.Context, inviterEmail, recipientEmail string) error {
frontendURL := svc.emailer.GetFrontendDomainName()
registerLink := fmt.Sprintf("%s/register", frontendURL)
subject := fmt.Sprintf("%s wants to share files with you on MapleFile", inviterEmail)
htmlContent := fmt.Sprintf(`
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body style="font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif; line-height: 1.6; color: #333; max-width: 600px; margin: 0 auto; padding: 20px;">
<h2 style="color: #1e40af;">You've been invited to MapleFile!</h2>
<p><strong>%s</strong> wants to share encrypted files with you.</p>
<p>MapleFile is a secure, end-to-end encrypted file storage service. To receive the shared files, you'll need to create a free account.</p>
<p style="margin: 30px 0;">
<a href="%s" style="background-color: #1e40af; color: white; padding: 12px 24px; text-decoration: none; border-radius: 6px; display: inline-block; font-weight: 500;">
Create Your Account
</a>
</p>
<p>Once you've registered, let <strong>%s</strong> know and they can share their files with you.</p>
<hr style="border: none; border-top: 1px solid #eee; margin: 30px 0;">
<p style="color: #666; font-size: 14px;">If you didn't expect this invitation, you can safely ignore this email.</p>
</body>
</html>
`, inviterEmail, registerLink, inviterEmail)
return svc.emailer.Send(ctx, svc.emailer.GetSenderEmail(), subject, recipientEmail, htmlContent)
}

View file

@ -0,0 +1,99 @@
package ipanonymization
import (
"context"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// AnonymizeOldIPsService handles the business logic for anonymizing old IP addresses
type AnonymizeOldIPsService interface {
Execute(ctx context.Context) error
}
type anonymizeOldIPsServiceImpl struct {
config *config.Config
logger *zap.Logger
userAnonymizeUseCase uc_user.AnonymizeOldIPsUseCase
collectionAnonymizeUseCase uc_collection.AnonymizeOldIPsUseCase
fileMetadataAnonymizeUseCase uc_filemetadata.AnonymizeOldIPsUseCase
}
// NewAnonymizeOldIPsService creates a new service for anonymizing old IP addresses
func NewAnonymizeOldIPsService(
cfg *config.Config,
logger *zap.Logger,
userAnonymizeUseCase uc_user.AnonymizeOldIPsUseCase,
collectionAnonymizeUseCase uc_collection.AnonymizeOldIPsUseCase,
fileMetadataAnonymizeUseCase uc_filemetadata.AnonymizeOldIPsUseCase,
) AnonymizeOldIPsService {
logger = logger.Named("AnonymizeOldIPsService")
return &anonymizeOldIPsServiceImpl{
config: cfg,
logger: logger,
userAnonymizeUseCase: userAnonymizeUseCase,
collectionAnonymizeUseCase: collectionAnonymizeUseCase,
fileMetadataAnonymizeUseCase: fileMetadataAnonymizeUseCase,
}
}
// Execute runs the IP anonymization process for all tables
func (s *anonymizeOldIPsServiceImpl) Execute(ctx context.Context) error {
if !s.config.Security.IPAnonymizationEnabled {
s.logger.Info("IP anonymization is disabled, skipping")
return nil
}
retentionDays := s.config.Security.IPAnonymizationRetentionDays
cutoffDate := time.Now().AddDate(0, 0, -retentionDays)
s.logger.Info("Starting IP anonymization process",
zap.Int("retention_days", retentionDays),
zap.Time("cutoff_date", cutoffDate))
totalAnonymized := 0
// Anonymize user tables using use-case
userCount, err := s.userAnonymizeUseCase.Execute(ctx, cutoffDate)
if err != nil {
s.logger.Error("Failed to anonymize user tables",
zap.Error(err),
zap.Int("records_anonymized_before_error", totalAnonymized))
return err
}
totalAnonymized += userCount
// Anonymize collection tables using use-case
collectionCount, err := s.collectionAnonymizeUseCase.Execute(ctx, cutoffDate)
if err != nil {
s.logger.Error("Failed to anonymize collection tables",
zap.Error(err),
zap.Int("records_anonymized_before_error", totalAnonymized))
return err
}
totalAnonymized += collectionCount
// Anonymize file tables using use-case
fileCount, err := s.fileMetadataAnonymizeUseCase.Execute(ctx, cutoffDate)
if err != nil {
s.logger.Error("Failed to anonymize file tables",
zap.Error(err),
zap.Int("records_anonymized_before_error", totalAnonymized))
return err
}
totalAnonymized += fileCount
s.logger.Info("IP anonymization process completed successfully",
zap.Int("total_rows_anonymized", totalAnonymized),
zap.Int("user_rows", userCount),
zap.Int("collection_rows", collectionCount),
zap.Int("file_rows", fileCount))
return nil
}

View file

@ -0,0 +1,22 @@
package ipanonymization
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// Wire providers for IP anonymization services
func ProvideAnonymizeOldIPsService(
cfg *config.Config,
logger *zap.Logger,
userAnonymizeUseCase uc_user.AnonymizeOldIPsUseCase,
collectionAnonymizeUseCase uc_collection.AnonymizeOldIPsUseCase,
fileMetadataAnonymizeUseCase uc_filemetadata.AnonymizeOldIPsUseCase,
) AnonymizeOldIPsService {
return NewAnonymizeOldIPsService(cfg, logger, userAnonymizeUseCase, collectionAnonymizeUseCase, fileMetadataAnonymizeUseCase)
}

View file

@ -0,0 +1,146 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me/delete.go
package me
import (
"context"
"errors"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
svc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
sstring "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/security/securestring"
)
type DeleteMeRequestDTO struct {
Password string `json:"password"`
}
type DeleteMeService interface {
Execute(sessCtx context.Context, req *DeleteMeRequestDTO) error
}
type deleteMeServiceImpl struct {
config *config.Configuration
logger *zap.Logger
completeUserDeletionService svc_user.CompleteUserDeletionService
}
func NewDeleteMeService(
config *config.Configuration,
logger *zap.Logger,
completeUserDeletionService svc_user.CompleteUserDeletionService,
) DeleteMeService {
logger = logger.Named("DeleteMeService")
return &deleteMeServiceImpl{
config: config,
logger: logger,
completeUserDeletionService: completeUserDeletionService,
}
}
func (svc *deleteMeServiceImpl) Execute(sessCtx context.Context, req *DeleteMeRequestDTO) error {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return httperror.NewForBadRequestWithSingleField("non_field_error", "Password is required")
}
e := make(map[string]string)
if req.Password == "" {
e["password"] = "Password is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get required from context.
//
sessionUserID, ok := sessCtx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting local user id",
zap.Any("error", "Not found in context: user_id"))
return errors.New("user id not found in context")
}
// Defend against admin deleting themselves
sessionUserRole, _ := sessCtx.Value(constants.SessionUserRole).(int8)
if sessionUserRole == dom_user.UserRoleRoot {
svc.logger.Warn("admin is not allowed to delete themselves",
zap.Any("error", ""))
return httperror.NewForForbiddenWithSingleField("message", "admins do not have permission to delete themselves")
}
//
// STEP 3: Verify password (intent confirmation).
//
securePassword, err := sstring.NewSecureString(req.Password)
if err != nil {
svc.logger.Error("Failed to create secure string", zap.Any("error", err))
return err
}
defer securePassword.Wipe()
// NOTE: In this E2EE architecture, the server does not store password hashes.
// Password verification happens client-side during key derivation.
// The frontend must verify the password locally before calling this endpoint
// by successfully deriving the KEK and decrypting the master key.
// If the password is wrong, the client-side decryption will fail.
//
// The password field in the request serves as a confirmation that the user
// intentionally wants to delete their account (not cryptographic verification).
_ = securePassword // Password used for user intent confirmation
//
// STEP 4: Execute GDPR right-to-be-forgotten complete deletion
//
svc.logger.Info("Starting GDPR right-to-be-forgotten complete user deletion",
zap.String("user_id", sessionUserID.String()))
deletionReq := &svc_user.CompleteUserDeletionRequest{
UserID: sessionUserID,
Password: req.Password,
}
result, err := svc.completeUserDeletionService.Execute(sessCtx, deletionReq)
if err != nil {
svc.logger.Error("Failed to complete user deletion",
zap.Error(err),
zap.String("user_id", sessionUserID.String()))
return err
}
//
// SUCCESS: User account and all data permanently deleted (GDPR compliant)
//
svc.logger.Info("User account successfully deleted (GDPR right-to-be-forgotten)",
zap.String("user_id", sessionUserID.String()),
zap.Int("files_deleted", result.FilesDeleted),
zap.Int("collections_deleted", result.CollectionsDeleted),
zap.Int("s3_objects_deleted", result.S3ObjectsDeleted),
zap.Int("memberships_removed", result.MembershipsRemoved),
zap.Int64("data_size_bytes", result.TotalDataSizeBytes),
zap.Int("non_fatal_errors", len(result.Errors)))
if len(result.Errors) > 0 {
svc.logger.Warn("Deletion completed with non-fatal errors",
zap.Strings("errors", result.Errors))
}
return nil
}

View file

@ -0,0 +1,159 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me/get.go
package me
import (
"context"
"errors"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
type MeResponseDTO struct {
ID gocql.UUID `bson:"_id" json:"id"`
Email string `bson:"email" json:"email"`
FirstName string `bson:"first_name" json:"first_name"`
LastName string `bson:"last_name" json:"last_name"`
Name string `bson:"name" json:"name"`
LexicalName string `bson:"lexical_name" json:"lexical_name"`
Role int8 `bson:"role" json:"role"`
// WasEmailVerified bool `bson:"was_email_verified" json:"was_email_verified,omitempty"`
// EmailVerificationCode string `bson:"email_verification_code,omitempty" json:"email_verification_code,omitempty"`
// EmailVerificationExpiry time.Time `bson:"email_verification_expiry,omitempty" json:"email_verification_expiry,omitempty"`
Phone string `bson:"phone" json:"phone,omitempty"`
Country string `bson:"country" json:"country,omitempty"`
Timezone string `bson:"timezone" json:"timezone"`
Region string `bson:"region" json:"region,omitempty"`
City string `bson:"city" json:"city,omitempty"`
PostalCode string `bson:"postal_code" json:"postal_code,omitempty"`
AddressLine1 string `bson:"address_line1" json:"address_line1,omitempty"`
AddressLine2 string `bson:"address_line2" json:"address_line2,omitempty"`
// HasShippingAddress bool `bson:"has_shipping_address" json:"has_shipping_address,omitempty"`
// ShippingName string `bson:"shipping_name" json:"shipping_name,omitempty"`
// ShippingPhone string `bson:"shipping_phone" json:"shipping_phone,omitempty"`
// ShippingCountry string `bson:"shipping_country" json:"shipping_country,omitempty"`
// ShippingRegion string `bson:"shipping_region" json:"shipping_region,omitempty"`
// ShippingCity string `bson:"shipping_city" json:"shipping_city,omitempty"`
// ShippingPostalCode string `bson:"shipping_postal_code" json:"shipping_postal_code,omitempty"`
// ShippingAddressLine1 string `bson:"shipping_address_line1" json:"shipping_address_line1,omitempty"`
// ShippingAddressLine2 string `bson:"shipping_address_line2" json:"shipping_address_line2,omitempty"`
// HowDidYouHearAboutUs int8 `bson:"how_did_you_hear_about_us" json:"how_did_you_hear_about_us,omitempty"`
// HowDidYouHearAboutUsOther string `bson:"how_did_you_hear_about_us_other" json:"how_did_you_hear_about_us_other,omitempty"`
// AgreeTermsOfService bool `bson:"agree_terms_of_service" json:"agree_terms_of_service,omitempty"`
AgreePromotions bool `bson:"agree_promotions" json:"agree_promotions,omitempty"`
AgreeToTrackingAcrossThirdPartyAppsAndServices bool `bson:"agree_to_tracking_across_third_party_apps_and_services" json:"agree_to_tracking_across_third_party_apps_and_services,omitempty"`
ShareNotificationsEnabled *bool `bson:"share_notifications_enabled" json:"share_notifications_enabled,omitempty"`
// CreatedFromIPAddress string `bson:"created_from_ip_address" json:"created_from_ip_address"`
// CreatedByFederatedIdentityID gocql.UUID `bson:"created_by_federatedidentity_id" json:"created_by_federatedidentity_id"`
CreatedAt time.Time `bson:"created_at" json:"created_at,omitempty"`
// CreatedByName string `bson:"created_by_name" json:"created_by_name"`
// ModifiedFromIPAddress string `bson:"modified_from_ip_address" json:"modified_from_ip_address"`
// ModifiedByFederatedIdentityID gocql.UUID `bson:"modified_by_federatedidentity_id" json:"modified_by_federatedidentity_id"`
// ModifiedAt time.Time `bson:"modified_at" json:"modified_at,omitempty"`
// ModifiedByName string `bson:"modified_by_name" json:"modified_by_name"`
Status int8 `bson:"status" json:"status"`
// PaymentProcessorName string `bson:"payment_processor_name" json:"payment_processor_name"`
// PaymentProcessorCustomerID string `bson:"payment_processor_customer_id" json:"payment_processor_customer_id"`
// OTPEnabled bool `bson:"otp_enabled" json:"otp_enabled"`
// OTPVerified bool `bson:"otp_verified" json:"otp_verified"`
// OTPValidated bool `bson:"otp_validated" json:"otp_validated"`
// OTPSecret string `bson:"otp_secret" json:"-"`
// OTPAuthURL string `bson:"otp_auth_url" json:"-"`
// OTPBackupCodeHash string `bson:"otp_backup_code_hash" json:"-"`
// OTPBackupCodeHashAlgorithm string `bson:"otp_backup_code_hash_algorithm" json:"-"`
// HowLongCollectingComicBooksForGrading int8 `bson:"how_long_collecting_comic_books_for_grading" json:"how_long_collecting_comic_books_for_grading"`
// HasPreviouslySubmittedComicBookForGrading int8 `bson:"has_previously_submitted_comic_book_for_grading" json:"has_previously_submitted_comic_book_for_grading"`
// HasOwnedGradedComicBooks int8 `bson:"has_owned_graded_comic_books" json:"has_owned_graded_comic_books"`
// HasRegularComicBookShop int8 `bson:"has_regular_comic_book_shop" json:"has_regular_comic_book_shop"`
// HasPreviouslyPurchasedFromAuctionSite int8 `bson:"has_previously_purchased_from_auction_site" json:"has_previously_purchased_from_auction_site"`
// HasPreviouslyPurchasedFromFacebookMarketplace int8 `bson:"has_previously_purchased_from_facebook_marketplace" json:"has_previously_purchased_from_facebook_marketplace"`
// HasRegularlyAttendedComicConsOrCollectibleShows int8 `bson:"has_regularly_attended_comic_cons_or_collectible_shows" json:"has_regularly_attended_comic_cons_or_collectible_shows"`
ProfileVerificationStatus int8 `bson:"profile_verification_status" json:"profile_verification_status,omitempty"`
WebsiteURL string `bson:"website_url" json:"website_url"`
Description string `bson:"description" json:"description"`
ComicBookStoreName string `bson:"comic_book_store_name" json:"comic_book_store_name,omitempty"`
}
type GetMeService interface {
Execute(sessCtx context.Context) (*MeResponseDTO, error)
}
type getMeServiceImpl struct {
config *config.Configuration
logger *zap.Logger
userGetByIDUseCase uc_user.UserGetByIDUseCase
userCreateUseCase uc_user.UserCreateUseCase
userUpdateUseCase uc_user.UserUpdateUseCase
}
func NewGetMeService(
config *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userCreateUseCase uc_user.UserCreateUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) GetMeService {
logger = logger.Named("GetMeService")
return &getMeServiceImpl{
config: config,
logger: logger,
userGetByIDUseCase: userGetByIDUseCase,
userCreateUseCase: userCreateUseCase,
userUpdateUseCase: userUpdateUseCase,
}
}
func (svc *getMeServiceImpl) Execute(sessCtx context.Context) (*MeResponseDTO, error) {
//
// Get required from context.
//
userID, ok := sessCtx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting local user id",
zap.Any("error", "Not found in context: user_id"))
return nil, errors.New("user id not found in context")
}
// Get the user account (aka "Me") and if it doesn't exist then return error.
user, err := svc.userGetByIDUseCase.Execute(sessCtx, userID)
if err != nil {
svc.logger.Error("Failed getting me", zap.Any("error", err))
return nil, err
}
if user == nil {
err := fmt.Errorf("User does not exist for user id: %v", userID.String())
svc.logger.Error("Failed getting me", zap.Any("error", err))
return nil, err
}
return &MeResponseDTO{
ID: user.ID,
Email: user.Email,
FirstName: user.FirstName,
LastName: user.LastName,
Name: user.Name,
LexicalName: user.LexicalName,
Role: user.Role,
Phone: user.ProfileData.Phone,
Country: user.ProfileData.Country,
Timezone: user.Timezone,
Region: user.ProfileData.Region,
City: user.ProfileData.City,
PostalCode: user.ProfileData.PostalCode,
AddressLine1: user.ProfileData.AddressLine1,
AddressLine2: user.ProfileData.AddressLine2,
AgreePromotions: user.ProfileData.AgreePromotions,
AgreeToTrackingAcrossThirdPartyAppsAndServices: user.ProfileData.AgreeToTrackingAcrossThirdPartyAppsAndServices,
ShareNotificationsEnabled: user.ProfileData.ShareNotificationsEnabled,
CreatedAt: user.CreatedAt,
Status: user.Status,
}, nil
}

View file

@ -0,0 +1,52 @@
package me
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
svc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user"
)
// Wire providers for me services
func ProvideGetMeService(
cfg *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userCreateUseCase uc_user.UserCreateUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) GetMeService {
return NewGetMeService(cfg, logger, userGetByIDUseCase, userCreateUseCase, userUpdateUseCase)
}
func ProvideUpdateMeService(
cfg *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userGetByEmailUseCase uc_user.UserGetByEmailUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) UpdateMeService {
return NewUpdateMeService(cfg, logger, userGetByIDUseCase, userGetByEmailUseCase, userUpdateUseCase)
}
func ProvideDeleteMeService(
cfg *config.Configuration,
logger *zap.Logger,
completeUserDeletionService svc_user.CompleteUserDeletionService,
) DeleteMeService {
return NewDeleteMeService(
cfg,
logger,
completeUserDeletionService,
)
}
func ProvideVerifyProfileService(
cfg *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) VerifyProfileService {
return NewVerifyProfileService(cfg, logger, userGetByIDUseCase, userUpdateUseCase)
}

View file

@ -0,0 +1,201 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me/update.go
package me
import (
"context"
"errors"
"fmt"
"strings"
"github.com/gocql/gocql"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type UpdateMeRequestDTO struct {
Email string `bson:"email" json:"email"`
FirstName string `bson:"first_name" json:"first_name"`
LastName string `bson:"last_name" json:"last_name"`
Phone string `bson:"phone" json:"phone,omitempty"`
Country string `bson:"country" json:"country,omitempty"`
Region string `bson:"region" json:"region,omitempty"`
Timezone string `bson:"timezone" json:"timezone"`
AgreePromotions bool `bson:"agree_promotions" json:"agree_promotions,omitempty"`
AgreeToTrackingAcrossThirdPartyAppsAndServices bool `bson:"agree_to_tracking_across_third_party_apps_and_services" json:"agree_to_tracking_across_third_party_apps_and_services,omitempty"`
ShareNotificationsEnabled *bool `bson:"share_notifications_enabled" json:"share_notifications_enabled,omitempty"`
}
type UpdateMeService interface {
Execute(sessCtx context.Context, req *UpdateMeRequestDTO) (*MeResponseDTO, error)
}
type updateMeServiceImpl struct {
config *config.Configuration
logger *zap.Logger
userGetByIDUseCase uc_user.UserGetByIDUseCase
userGetByEmailUseCase uc_user.UserGetByEmailUseCase
userUpdateUseCase uc_user.UserUpdateUseCase
}
func NewUpdateMeService(
config *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userGetByEmailUseCase uc_user.UserGetByEmailUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) UpdateMeService {
logger = logger.Named("UpdateMeService")
return &updateMeServiceImpl{
config: config,
logger: logger,
userGetByIDUseCase: userGetByIDUseCase,
userGetByEmailUseCase: userGetByEmailUseCase,
userUpdateUseCase: userUpdateUseCase,
}
}
func (svc *updateMeServiceImpl) Execute(sessCtx context.Context, req *UpdateMeRequestDTO) (*MeResponseDTO, error) {
//
// Get required from context.
//
userID, ok := sessCtx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting local user id",
zap.Any("error", "Not found in context: user_id"))
return nil, errors.New("user id not found in context")
}
//
// STEP 2: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nothing received")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request is required in submission")
}
// Sanitization
req.Email = strings.ToLower(req.Email) // Ensure email is lowercase
e := make(map[string]string)
// Add any specific field validations here if needed. Example:
if req.FirstName == "" {
e["first_name"] = "First name is required"
}
if req.LastName == "" {
e["last_name"] = "Last name is required"
}
if req.Email == "" {
e["email"] = "Email is required"
}
if len(req.Email) > 255 {
e["email"] = "Email is too long"
}
if req.Phone == "" {
e["phone"] = "Phone confirm is required"
}
if req.Country == "" {
e["country"] = "Country is required"
}
if req.Timezone == "" {
e["timezone"] = "Timezone is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// Get related records.
//
// Get the user account (aka "Me").
user, err := svc.userGetByIDUseCase.Execute(sessCtx, userID)
if err != nil {
// Handle other potential errors during fetch.
svc.logger.Error("Failed getting user by ID", zap.Any("error", err))
return nil, err
}
// Defensive check, though GetByID should return ErrNoDocuments if not found.
if user == nil {
err := fmt.Errorf("user is nil after lookup for id: %v", userID.String())
svc.logger.Error("Failed getting user", zap.Any("error", err))
return nil, err
}
//
// Check if the requested email is already taken by another user.
//
if req.Email != user.Email {
existingUser, err := svc.userGetByEmailUseCase.Execute(sessCtx, req.Email)
if err != nil {
svc.logger.Error("Failed checking existing email", zap.String("email", validation.MaskEmail(req.Email)), zap.Any("error", err))
return nil, err // Internal Server Error
}
if existingUser != nil {
// Email exists and belongs to another user.
svc.logger.Warn("Attempted to update to an email already in use",
zap.String("user_id", userID.String()),
zap.String("existing_user_id", existingUser.ID.String()),
zap.String("email", validation.MaskEmail(req.Email)))
e["email"] = "This email address is already in use."
return nil, httperror.NewForBadRequest(&e)
}
// If err is mongo.ErrNoDocuments or existingUser is nil, the email is available.
}
//
// Update local database.
//
// Apply changes from request DTO to the user object
user.Email = req.Email
user.FirstName = req.FirstName
user.LastName = req.LastName
user.Name = fmt.Sprintf("%s %s", req.FirstName, req.LastName)
user.LexicalName = fmt.Sprintf("%s, %s", req.LastName, req.FirstName)
user.ProfileData.Phone = req.Phone
user.ProfileData.Country = req.Country
user.ProfileData.Region = req.Region
user.Timezone = req.Timezone
user.ProfileData.AgreePromotions = req.AgreePromotions
user.ProfileData.AgreeToTrackingAcrossThirdPartyAppsAndServices = req.AgreeToTrackingAcrossThirdPartyAppsAndServices
if req.ShareNotificationsEnabled != nil {
user.ProfileData.ShareNotificationsEnabled = req.ShareNotificationsEnabled
}
// Persist changes
if err := svc.userUpdateUseCase.Execute(sessCtx, user); err != nil {
svc.logger.Error("Failed updating user", zap.Any("error", err), zap.String("user_id", user.ID.String()))
// Consider mapping specific DB errors (like constraint violations) to HTTP errors if applicable
return nil, err
}
svc.logger.Debug("User updated successfully",
zap.String("user_id", user.ID.String()))
// Return updated user details
return &MeResponseDTO{
ID: user.ID,
Email: user.Email,
FirstName: user.FirstName,
LastName: user.LastName,
Name: user.Name,
LexicalName: user.LexicalName,
Phone: user.ProfileData.Phone,
Country: user.ProfileData.Country,
Region: user.ProfileData.Region,
Timezone: user.Timezone,
AgreePromotions: user.ProfileData.AgreePromotions,
AgreeToTrackingAcrossThirdPartyAppsAndServices: user.ProfileData.AgreeToTrackingAcrossThirdPartyAppsAndServices,
ShareNotificationsEnabled: user.ProfileData.ShareNotificationsEnabled,
}, nil
}

View file

@ -0,0 +1,314 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/me/verifyprofile.go
package me
import (
"context"
"errors"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
domain "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type VerifyProfileRequestDTO struct {
// Common fields
Country string `json:"country,omitempty"`
Region string `json:"region,omitempty"`
City string `json:"city,omitempty"`
PostalCode string `json:"postal_code,omitempty"`
AddressLine1 string `json:"address_line1,omitempty"`
AddressLine2 string `json:"address_line2,omitempty"`
HasShippingAddress bool `json:"has_shipping_address,omitempty"`
ShippingName string `json:"shipping_name,omitempty"`
ShippingPhone string `json:"shipping_phone,omitempty"`
ShippingCountry string `json:"shipping_country,omitempty"`
ShippingRegion string `json:"shipping_region,omitempty"`
ShippingCity string `json:"shipping_city,omitempty"`
ShippingPostalCode string `json:"shipping_postal_code,omitempty"`
ShippingAddressLine1 string `json:"shipping_address_line1,omitempty"`
ShippingAddressLine2 string `json:"shipping_address_line2,omitempty"`
HowDidYouHearAboutUs int8 `json:"how_did_you_hear_about_us,omitempty"`
HowDidYouHearAboutUsOther string `json:"how_did_you_hear_about_us_other,omitempty"`
WebsiteURL string `json:"website_url,omitempty"`
Description string `bson:"description" json:"description"`
// Customer specific fields
HowLongCollectingComicBooksForGrading int8 `json:"how_long_collecting_comic_books_for_grading,omitempty"`
HasPreviouslySubmittedComicBookForGrading int8 `json:"has_previously_submitted_comic_book_for_grading,omitempty"`
HasOwnedGradedComicBooks int8 `json:"has_owned_graded_comic_books,omitempty"`
HasRegularComicBookShop int8 `json:"has_regular_comic_book_shop,omitempty"`
HasPreviouslyPurchasedFromAuctionSite int8 `json:"has_previously_purchased_from_auction_site,omitempty"`
HasPreviouslyPurchasedFromFacebookMarketplace int8 `json:"has_previously_purchased_from_facebook_marketplace,omitempty"`
HasRegularlyAttendedComicConsOrCollectibleShows int8 `json:"has_regularly_attended_comic_cons_or_collectible_shows,omitempty"`
// Retailer specific fields
ComicBookStoreName string `json:"comic_book_store_name,omitempty"`
StoreLogo string `json:"store_logo,omitempty"`
HowLongStoreOperating int8 `json:"how_long_store_operating,omitempty"`
GradingComicsExperience string `json:"grading_comics_experience,omitempty"`
RetailPartnershipReason string `json:"retail_partnership_reason,omitempty"`
ComicCoinPartnershipReason string `json:"comic_coin_partnership_reason,omitempty"`
EstimatedSubmissionsPerMonth int8 `json:"estimated_submissions_per_month,omitempty"`
HasOtherGradingService int8 `json:"has_other_grading_service,omitempty"`
OtherGradingServiceName string `json:"other_grading_service_name,omitempty"`
RequestWelcomePackage int8 `json:"request_welcome_package,omitempty"`
// Explicitly specify user role if needed (overrides the user's current role)
UserRole int8 `json:"user_role,omitempty"`
}
type VerifyProfileResponseDTO struct {
Message string `json:"message"`
UserRole int8 `json:"user_role"`
Status int8 `json:"profile_verification_status"`
}
type VerifyProfileService interface {
Execute(sessCtx context.Context, req *VerifyProfileRequestDTO) (*VerifyProfileResponseDTO, error)
}
type verifyProfileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
userGetByIDUseCase uc_user.UserGetByIDUseCase
userUpdateUseCase uc_user.UserUpdateUseCase
}
func NewVerifyProfileService(
config *config.Configuration,
logger *zap.Logger,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
userUpdateUseCase uc_user.UserUpdateUseCase,
) VerifyProfileService {
return &verifyProfileServiceImpl{
config: config,
logger: logger,
userGetByIDUseCase: userGetByIDUseCase,
userUpdateUseCase: userUpdateUseCase,
}
}
func (s *verifyProfileServiceImpl) Execute(
sessCtx context.Context,
req *VerifyProfileRequestDTO,
) (*VerifyProfileResponseDTO, error) {
//
// STEP 1: Get required from context.
//
userID, ok := sessCtx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
s.logger.Error("Failed getting local user id",
zap.Any("error", "Not found in context: user_id"))
return nil, errors.New("user id not found in context")
}
//
// STEP 2: Retrieve user from database
//
user, err := s.userGetByIDUseCase.Execute(sessCtx, userID)
if err != nil {
s.logger.Error("Failed retrieving user", zap.Any("error", err))
return nil, err
}
if user == nil {
s.logger.Error("User not found", zap.Any("userID", userID))
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "User not found")
}
// Check if we need to override the user role based on the request
if req.UserRole != 0 && (req.UserRole == domain.UserRoleIndividual || req.UserRole == domain.UserRoleCompany) {
s.logger.Info("Setting user role based on request",
zap.Int("original_role", int(user.Role)),
zap.Int("new_role", int(req.UserRole)))
user.Role = req.UserRole
}
//
// STEP 3: Validate request based on user role
//
e := make(map[string]string)
// Validate common fields regardless of role
s.validateCommonFields(req, e)
// Role-specific validation
if user.Role == domain.UserRoleIndividual {
s.validateCustomerFields(req, e)
} else if user.Role == domain.UserRoleCompany {
s.validateRetailerFields(req, e)
} else {
s.logger.Warn("Unrecognized user role", zap.Int("role", int(user.Role)))
e["user_role"] = "Invalid user role. Must be either customer or retailer."
}
// Return validation errors if any
if len(e) != 0 {
s.logger.Warn("Failed validation", zap.Any("errors", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 4: Update user profile based on role
//
// Update common fields
s.updateCommonFields(user, req)
//
// STEP 5: Save updated user to database
//
if err := s.userUpdateUseCase.Execute(sessCtx, user); err != nil {
s.logger.Error("Failed to update user", zap.Any("error", err))
return nil, err
}
//
// STEP 6: Generate appropriate response
//
var responseMessage string
if user.Role == domain.UserRoleIndividual {
responseMessage = "Your profile has been submitted for verification. You'll be notified once it's been reviewed."
} else if user.Role == domain.UserRoleCompany {
responseMessage = "Your retailer profile has been submitted for verification. Our team will review your application and contact you soon."
} else {
responseMessage = "Your profile has been submitted for verification."
}
return &VerifyProfileResponseDTO{
Message: responseMessage,
UserRole: user.Role,
}, nil
}
// validateCommonFields validates fields common to all user types
func (s *verifyProfileServiceImpl) validateCommonFields(req *VerifyProfileRequestDTO, e map[string]string) {
if req.Country == "" {
e["country"] = "Country is required"
}
if req.City == "" {
e["city"] = "City is required"
}
if req.AddressLine1 == "" {
e["address_line1"] = "Address is required"
}
if req.PostalCode == "" {
e["postal_code"] = "Postal code is required"
}
if req.HowDidYouHearAboutUs == 0 {
e["how_did_you_hear_about_us"] = "How did you hear about us is required"
}
if req.HowDidYouHearAboutUs == 7 && req.HowDidYouHearAboutUsOther == "" { // Assuming 7 is "Other"
e["how_did_you_hear_about_us_other"] = "Please specify how you heard about us"
}
// Validate shipping address if it's enabled
if req.HasShippingAddress {
if req.ShippingName == "" {
e["shipping_name"] = "Shipping name is required"
}
if req.ShippingPhone == "" {
e["shipping_phone"] = "Shipping phone is required"
}
if req.ShippingCountry == "" {
e["shipping_country"] = "Shipping country is required"
}
if req.ShippingCity == "" {
e["shipping_city"] = "Shipping city is required"
}
if req.ShippingAddressLine1 == "" {
e["shipping_address_line1"] = "Shipping address is required"
}
if req.ShippingPostalCode == "" {
e["shipping_postal_code"] = "Shipping postal code is required"
}
}
// More common fields...
if req.WebsiteURL == "" {
e["website_url"] = "Website URL is required"
}
if req.Description == "" {
e["description"] = "Description is required"
}
}
// validateCustomerFields validates fields specific to customers
func (s *verifyProfileServiceImpl) validateCustomerFields(req *VerifyProfileRequestDTO, e map[string]string) {
if req.HowLongCollectingComicBooksForGrading == 0 {
e["how_long_collecting_comic_books_for_grading"] = "How long you've been collecting comic books for grading is required"
}
if req.HasPreviouslySubmittedComicBookForGrading == 0 {
e["has_previously_submitted_comic_book_for_grading"] = "Previous submission information is required"
}
if req.HasOwnedGradedComicBooks == 0 {
e["has_owned_graded_comic_books"] = "Information about owning graded comic books is required"
}
if req.HasRegularComicBookShop == 0 {
e["has_regular_comic_book_shop"] = "Regular comic book shop information is required"
}
if req.HasPreviouslyPurchasedFromAuctionSite == 0 {
e["has_previously_purchased_from_auction_site"] = "Auction site purchase information is required"
}
if req.HasPreviouslyPurchasedFromFacebookMarketplace == 0 {
e["has_previously_purchased_from_facebook_marketplace"] = "Facebook Marketplace purchase information is required"
}
if req.HasRegularlyAttendedComicConsOrCollectibleShows == 0 {
e["has_regularly_attended_comic_cons_or_collectible_shows"] = "Comic convention attendance information is required"
}
}
// validateRetailerFields validates fields specific to retailers
func (s *verifyProfileServiceImpl) validateRetailerFields(req *VerifyProfileRequestDTO, e map[string]string) {
if req.ComicBookStoreName == "" {
e["comic_book_store_name"] = "Store name is required"
}
if req.HowLongStoreOperating == 0 {
e["how_long_store_operating"] = "Store operation duration is required"
}
if req.GradingComicsExperience == "" {
e["grading_comics_experience"] = "Grading comics experience is required"
}
if req.RetailPartnershipReason == "" {
e["retail_partnership_reason"] = "Retail partnership reason is required"
}
if req.ComicBookStoreName == "" {
e["comic_book_store_name"] = "Comic book store name is required"
}
if req.EstimatedSubmissionsPerMonth == 0 {
e["estimated_submissions_per_month"] = "Estimated submissions per month is required"
}
if req.HasOtherGradingService == 0 {
e["has_other_grading_service"] = "Other grading service information is required"
}
if req.HasOtherGradingService == 1 && req.OtherGradingServiceName == "" {
e["other_grading_service_name"] = "Please specify the grading service"
}
if req.RequestWelcomePackage == 0 {
e["request_welcome_package"] = "Welcome package request information is required"
}
}
// updateCommonFields updates common fields for all user types
func (s *verifyProfileServiceImpl) updateCommonFields(user *domain.User, req *VerifyProfileRequestDTO) {
user.ProfileData.Country = req.Country
user.ProfileData.Region = req.Region
user.ProfileData.City = req.City
user.ProfileData.PostalCode = req.PostalCode
user.ProfileData.AddressLine1 = req.AddressLine1
user.ProfileData.AddressLine2 = req.AddressLine2
user.ProfileData.HasShippingAddress = req.HasShippingAddress
user.ProfileData.ShippingName = req.ShippingName
user.ProfileData.ShippingPhone = req.ShippingPhone
user.ProfileData.ShippingCountry = req.ShippingCountry
user.ProfileData.ShippingRegion = req.ShippingRegion
user.ProfileData.ShippingCity = req.ShippingCity
user.ProfileData.ShippingPostalCode = req.ShippingPostalCode
user.ProfileData.ShippingAddressLine1 = req.ShippingAddressLine1
user.ProfileData.ShippingAddressLine2 = req.ShippingAddressLine2
}

View file

@ -0,0 +1,155 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storagedailyusage/get_trend.go
package storagedailyusage
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetStorageDailyUsageTrendRequestDTO struct {
TrendPeriod string `json:"trend_period"` // "7days", "monthly", "yearly"
Year *int `json:"year,omitempty"`
Month *time.Month `json:"month,omitempty"`
}
type StorageDailyUsageResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
UsageDay time.Time `json:"usage_day"`
TotalBytes int64 `json:"total_bytes"`
TotalAddBytes int64 `json:"total_add_bytes"`
TotalRemoveBytes int64 `json:"total_remove_bytes"`
}
type StorageUsageTrendResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
DailyUsages []*StorageDailyUsageResponseDTO `json:"daily_usages"`
TotalAdded int64 `json:"total_added"`
TotalRemoved int64 `json:"total_removed"`
NetChange int64 `json:"net_change"`
AverageDailyAdd int64 `json:"average_daily_add"`
PeakUsageDay *time.Time `json:"peak_usage_day,omitempty"`
PeakUsageBytes int64 `json:"peak_usage_bytes"`
}
type GetStorageDailyUsageTrendResponseDTO struct {
TrendPeriod string `json:"trend_period"`
Trend *StorageUsageTrendResponseDTO `json:"trend"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetStorageDailyUsageTrendService interface {
Execute(ctx context.Context, req *GetStorageDailyUsageTrendRequestDTO) (*GetStorageDailyUsageTrendResponseDTO, error)
}
type getStorageDailyUsageTrendServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getStorageDailyUsageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase
}
func NewGetStorageDailyUsageTrendService(
config *config.Configuration,
logger *zap.Logger,
getStorageDailyUsageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase,
) GetStorageDailyUsageTrendService {
logger = logger.Named("GetStorageDailyUsageTrendService")
return &getStorageDailyUsageTrendServiceImpl{
config: config,
logger: logger,
getStorageDailyUsageTrendUseCase: getStorageDailyUsageTrendUseCase,
}
}
func (svc *getStorageDailyUsageTrendServiceImpl) Execute(ctx context.Context, req *GetStorageDailyUsageTrendRequestDTO) (*GetStorageDailyUsageTrendResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storagedailyusage.GetStorageDailyUsageTrendRequest{
UserID: userID,
TrendPeriod: req.TrendPeriod,
Year: req.Year,
Month: req.Month,
}
//
// STEP 4: Execute use case
//
trend, err := svc.getStorageDailyUsageTrendUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to get storage daily usage trend",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Error(err))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
dailyUsages := make([]*StorageDailyUsageResponseDTO, len(trend.DailyUsages))
for i, usage := range trend.DailyUsages {
dailyUsages[i] = &StorageDailyUsageResponseDTO{
UserID: usage.UserID,
UsageDay: usage.UsageDay,
TotalBytes: usage.TotalBytes,
TotalAddBytes: usage.TotalAddBytes,
TotalRemoveBytes: usage.TotalRemoveBytes,
}
}
trendResponse := &StorageUsageTrendResponseDTO{
UserID: trend.UserID,
StartDate: trend.StartDate,
EndDate: trend.EndDate,
DailyUsages: dailyUsages,
TotalAdded: trend.TotalAdded,
TotalRemoved: trend.TotalRemoved,
NetChange: trend.NetChange,
AverageDailyAdd: trend.AverageDailyAdd,
PeakUsageDay: trend.PeakUsageDay,
PeakUsageBytes: trend.PeakUsageBytes,
}
response := &GetStorageDailyUsageTrendResponseDTO{
TrendPeriod: req.TrendPeriod,
Trend: trendResponse,
Success: true,
Message: "Storage daily usage trend retrieved successfully",
}
svc.logger.Debug("Storage daily usage trend retrieved successfully",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Int("daily_usages_count", len(dailyUsages)),
zap.Int64("net_change", trend.NetChange))
return response, nil
}

View file

@ -0,0 +1,153 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storagedailyusage/get_usage_by_date_range.go
package storagedailyusage
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetStorageUsageByDateRangeRequestDTO struct {
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
}
type DateRangeSummaryResponseDTO struct {
TotalDays int `json:"total_days"`
DaysWithData int `json:"days_with_data"`
TotalAdded int64 `json:"total_added"`
TotalRemoved int64 `json:"total_removed"`
NetChange int64 `json:"net_change"`
AverageDailyAdd float64 `json:"average_daily_add"`
PeakUsageDay *time.Time `json:"peak_usage_day,omitempty"`
PeakUsageBytes int64 `json:"peak_usage_bytes"`
LowestUsageDay *time.Time `json:"lowest_usage_day,omitempty"`
LowestUsageBytes int64 `json:"lowest_usage_bytes"`
}
type GetStorageUsageByDateRangeResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
DailyUsages []*StorageDailyUsageResponseDTO `json:"daily_usages"`
Summary *DateRangeSummaryResponseDTO `json:"summary"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetStorageUsageByDateRangeService interface {
Execute(ctx context.Context, req *GetStorageUsageByDateRangeRequestDTO) (*GetStorageUsageByDateRangeResponseDTO, error)
}
type getStorageUsageByDateRangeServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getStorageUsageByDateRangeUseCase uc_storagedailyusage.GetStorageUsageByDateRangeUseCase
}
func NewGetStorageUsageByDateRangeService(
config *config.Configuration,
logger *zap.Logger,
getStorageUsageByDateRangeUseCase uc_storagedailyusage.GetStorageUsageByDateRangeUseCase,
) GetStorageUsageByDateRangeService {
logger = logger.Named("GetStorageUsageByDateRangeService")
return &getStorageUsageByDateRangeServiceImpl{
config: config,
logger: logger,
getStorageUsageByDateRangeUseCase: getStorageUsageByDateRangeUseCase,
}
}
func (svc *getStorageUsageByDateRangeServiceImpl) Execute(ctx context.Context, req *GetStorageUsageByDateRangeRequestDTO) (*GetStorageUsageByDateRangeResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storagedailyusage.GetStorageUsageByDateRangeRequest{
UserID: userID,
StartDate: req.StartDate,
EndDate: req.EndDate,
}
//
// STEP 4: Execute use case
//
useCaseResp, err := svc.getStorageUsageByDateRangeUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to get storage usage by date range",
zap.String("user_id", userID.String()),
zap.Time("start_date", req.StartDate),
zap.Time("end_date", req.EndDate),
zap.Error(err))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
dailyUsages := make([]*StorageDailyUsageResponseDTO, len(useCaseResp.DailyUsages))
for i, usage := range useCaseResp.DailyUsages {
dailyUsages[i] = &StorageDailyUsageResponseDTO{
UserID: usage.UserID,
UsageDay: usage.UsageDay,
TotalBytes: usage.TotalBytes,
TotalAddBytes: usage.TotalAddBytes,
TotalRemoveBytes: usage.TotalRemoveBytes,
}
}
summaryResponse := &DateRangeSummaryResponseDTO{
TotalDays: useCaseResp.Summary.TotalDays,
DaysWithData: useCaseResp.Summary.DaysWithData,
TotalAdded: useCaseResp.Summary.TotalAdded,
TotalRemoved: useCaseResp.Summary.TotalRemoved,
NetChange: useCaseResp.Summary.NetChange,
AverageDailyAdd: useCaseResp.Summary.AverageDailyAdd,
PeakUsageDay: useCaseResp.Summary.PeakUsageDay,
PeakUsageBytes: useCaseResp.Summary.PeakUsageBytes,
LowestUsageDay: useCaseResp.Summary.LowestUsageDay,
LowestUsageBytes: useCaseResp.Summary.LowestUsageBytes,
}
response := &GetStorageUsageByDateRangeResponseDTO{
UserID: useCaseResp.UserID,
StartDate: useCaseResp.StartDate,
EndDate: useCaseResp.EndDate,
DailyUsages: dailyUsages,
Summary: summaryResponse,
Success: true,
Message: "Storage usage by date range retrieved successfully",
}
svc.logger.Debug("Storage usage by date range retrieved successfully",
zap.String("user_id", userID.String()),
zap.Time("start_date", req.StartDate),
zap.Time("end_date", req.EndDate),
zap.Int("daily_usages_count", len(dailyUsages)),
zap.Int64("net_change", useCaseResp.Summary.NetChange))
return response, nil
}

View file

@ -0,0 +1,129 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storagedailyusage/get_usage_summary.go
package storagedailyusage
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetStorageUsageSummaryRequestDTO struct {
SummaryType string `json:"summary_type"` // "current_month", "current_year"
}
type StorageUsageSummaryResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
Period string `json:"period"`
StartDate string `json:"start_date"`
EndDate string `json:"end_date"`
CurrentUsage int64 `json:"current_usage_bytes"`
TotalAdded int64 `json:"total_added_bytes"`
TotalRemoved int64 `json:"total_removed_bytes"`
NetChange int64 `json:"net_change_bytes"`
DaysWithData int `json:"days_with_data"`
}
type GetStorageUsageSummaryResponseDTO struct {
SummaryType string `json:"summary_type"`
Summary *StorageUsageSummaryResponseDTO `json:"summary"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetStorageUsageSummaryService interface {
Execute(ctx context.Context, req *GetStorageUsageSummaryRequestDTO) (*GetStorageUsageSummaryResponseDTO, error)
}
type getStorageUsageSummaryServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getStorageUsageSummaryUseCase uc_storagedailyusage.GetStorageUsageSummaryUseCase
}
func NewGetStorageUsageSummaryService(
config *config.Configuration,
logger *zap.Logger,
getStorageUsageSummaryUseCase uc_storagedailyusage.GetStorageUsageSummaryUseCase,
) GetStorageUsageSummaryService {
logger = logger.Named("GetStorageUsageSummaryService")
return &getStorageUsageSummaryServiceImpl{
config: config,
logger: logger,
getStorageUsageSummaryUseCase: getStorageUsageSummaryUseCase,
}
}
func (svc *getStorageUsageSummaryServiceImpl) Execute(ctx context.Context, req *GetStorageUsageSummaryRequestDTO) (*GetStorageUsageSummaryResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storagedailyusage.GetStorageUsageSummaryRequest{
UserID: userID,
SummaryType: req.SummaryType,
}
//
// STEP 4: Execute use case
//
summary, err := svc.getStorageUsageSummaryUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to get storage usage summary",
zap.String("user_id", userID.String()),
zap.String("summary_type", req.SummaryType),
zap.Error(err))
return nil, err
}
//
// STEP 5: Map domain model to response DTO
//
summaryResponse := &StorageUsageSummaryResponseDTO{
UserID: summary.UserID,
Period: summary.Period,
StartDate: summary.StartDate.Format("2006-01-02"),
EndDate: summary.EndDate.Format("2006-01-02"),
CurrentUsage: summary.CurrentUsage,
TotalAdded: summary.TotalAdded,
TotalRemoved: summary.TotalRemoved,
NetChange: summary.NetChange,
DaysWithData: summary.DaysWithData,
}
response := &GetStorageUsageSummaryResponseDTO{
SummaryType: req.SummaryType,
Summary: summaryResponse,
Success: true,
Message: "Storage usage summary retrieved successfully",
}
svc.logger.Debug("Storage usage summary retrieved successfully",
zap.String("user_id", userID.String()),
zap.String("summary_type", req.SummaryType),
zap.Int64("current_usage", summary.CurrentUsage),
zap.Int64("net_change", summary.NetChange))
return response, nil
}

View file

@ -0,0 +1,42 @@
package storagedailyusage
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
)
// Wire providers for storage daily usage services
func ProvideGetStorageDailyUsageTrendService(
cfg *config.Configuration,
logger *zap.Logger,
getStorageDailyUsageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase,
) GetStorageDailyUsageTrendService {
return NewGetStorageDailyUsageTrendService(cfg, logger, getStorageDailyUsageTrendUseCase)
}
func ProvideGetStorageUsageSummaryService(
cfg *config.Configuration,
logger *zap.Logger,
getStorageUsageSummaryUseCase uc_storagedailyusage.GetStorageUsageSummaryUseCase,
) GetStorageUsageSummaryService {
return NewGetStorageUsageSummaryService(cfg, logger, getStorageUsageSummaryUseCase)
}
func ProvideGetStorageUsageByDateRangeService(
cfg *config.Configuration,
logger *zap.Logger,
getStorageUsageByDateRangeUseCase uc_storagedailyusage.GetStorageUsageByDateRangeUseCase,
) GetStorageUsageByDateRangeService {
return NewGetStorageUsageByDateRangeService(cfg, logger, getStorageUsageByDateRangeUseCase)
}
func ProvideUpdateStorageUsageService(
cfg *config.Configuration,
logger *zap.Logger,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) UpdateStorageUsageService {
return NewUpdateStorageUsageService(cfg, logger, updateStorageUsageUseCase)
}

View file

@ -0,0 +1,111 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storagedailyusage/update_usage.go
package storagedailyusage
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type UpdateStorageUsageRequestDTO struct {
UsageDay *time.Time `json:"usage_day,omitempty"` // Optional, defaults to today
TotalBytes int64 `json:"total_bytes"`
AddBytes int64 `json:"add_bytes"`
RemoveBytes int64 `json:"remove_bytes"`
IsIncrement bool `json:"is_increment"` // If true, increment existing values; if false, set absolute values
}
type UpdateStorageUsageResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type UpdateStorageUsageService interface {
Execute(ctx context.Context, req *UpdateStorageUsageRequestDTO) (*UpdateStorageUsageResponseDTO, error)
}
type updateStorageUsageServiceImpl struct {
config *config.Configuration
logger *zap.Logger
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewUpdateStorageUsageService(
config *config.Configuration,
logger *zap.Logger,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) UpdateStorageUsageService {
logger = logger.Named("UpdateStorageUsageService")
return &updateStorageUsageServiceImpl{
config: config,
logger: logger,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *updateStorageUsageServiceImpl) Execute(ctx context.Context, req *UpdateStorageUsageRequestDTO) (*UpdateStorageUsageResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Update details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: userID,
UsageDay: req.UsageDay,
TotalBytes: req.TotalBytes,
AddBytes: req.AddBytes,
RemoveBytes: req.RemoveBytes,
IsIncrement: req.IsIncrement,
}
//
// STEP 4: Execute use case
//
err := svc.updateStorageUsageUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to update storage usage",
zap.String("user_id", userID.String()),
zap.Int64("total_bytes", req.TotalBytes),
zap.Int64("add_bytes", req.AddBytes),
zap.Int64("remove_bytes", req.RemoveBytes),
zap.Bool("is_increment", req.IsIncrement),
zap.Error(err))
return nil, err
}
response := &UpdateStorageUsageResponseDTO{
Success: true,
Message: "Storage usage updated successfully",
}
svc.logger.Debug("Storage usage updated successfully",
zap.String("user_id", userID.String()),
zap.Int64("total_bytes", req.TotalBytes),
zap.Int64("add_bytes", req.AddBytes),
zap.Int64("remove_bytes", req.RemoveBytes),
zap.Bool("is_increment", req.IsIncrement))
return response, nil
}

View file

@ -0,0 +1,91 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storageusageevent/create_event.go
package storageusageevent
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreateStorageUsageEventRequestDTO struct {
FileSize int64 `json:"file_size"`
Operation string `json:"operation"` // "add" or "remove"
}
type CreateStorageUsageEventResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type CreateStorageUsageEventService interface {
Execute(ctx context.Context, req *CreateStorageUsageEventRequestDTO) (*CreateStorageUsageEventResponseDTO, error)
}
type createStorageUsageEventServiceImpl struct {
config *config.Configuration
logger *zap.Logger
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
}
func NewCreateStorageUsageEventService(
config *config.Configuration,
logger *zap.Logger,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
) CreateStorageUsageEventService {
logger = logger.Named("CreateStorageUsageEventService")
return &createStorageUsageEventServiceImpl{
config: config,
logger: logger,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
}
}
func (svc *createStorageUsageEventServiceImpl) Execute(ctx context.Context, req *CreateStorageUsageEventRequestDTO) (*CreateStorageUsageEventResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Event details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Execute use case
//
err := svc.createStorageUsageEventUseCase.Execute(ctx, userID, req.FileSize, req.Operation)
if err != nil {
svc.logger.Error("Failed to create storage usage event",
zap.String("user_id", userID.String()),
zap.Int64("file_size", req.FileSize),
zap.String("operation", req.Operation),
zap.Error(err))
return nil, err
}
response := &CreateStorageUsageEventResponseDTO{
Success: true,
Message: "Storage usage event created successfully",
}
svc.logger.Debug("Storage usage event created successfully",
zap.String("user_id", userID.String()),
zap.Int64("file_size", req.FileSize),
zap.String("operation", req.Operation))
return response, nil
}

View file

@ -0,0 +1,138 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storageusageevent/get_events.go
package storageusageevent
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetStorageUsageEventsRequestDTO struct {
TrendPeriod string `json:"trend_period"` // "7days", "monthly", "yearly", "custom"
Year *int `json:"year,omitempty"`
Month *time.Month `json:"month,omitempty"`
Days *int `json:"days,omitempty"` // For custom day ranges
}
type StorageUsageEventResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
EventDay time.Time `json:"event_day"`
EventTime time.Time `json:"event_time"`
FileSize int64 `json:"file_size"`
Operation string `json:"operation"`
}
type GetStorageUsageEventsResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
TrendPeriod string `json:"trend_period"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
Events []*StorageUsageEventResponseDTO `json:"events"`
EventCount int `json:"event_count"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetStorageUsageEventsService interface {
Execute(ctx context.Context, req *GetStorageUsageEventsRequestDTO) (*GetStorageUsageEventsResponseDTO, error)
}
type getStorageUsageEventsServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getStorageUsageEventsUseCase uc_storageusageevent.GetStorageUsageEventsUseCase
}
func NewGetStorageUsageEventsService(
config *config.Configuration,
logger *zap.Logger,
getStorageUsageEventsUseCase uc_storageusageevent.GetStorageUsageEventsUseCase,
) GetStorageUsageEventsService {
logger = logger.Named("GetStorageUsageEventsService")
return &getStorageUsageEventsServiceImpl{
config: config,
logger: logger,
getStorageUsageEventsUseCase: getStorageUsageEventsUseCase,
}
}
func (svc *getStorageUsageEventsServiceImpl) Execute(ctx context.Context, req *GetStorageUsageEventsRequestDTO) (*GetStorageUsageEventsResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storageusageevent.GetStorageUsageEventsRequest{
UserID: userID,
TrendPeriod: req.TrendPeriod,
Year: req.Year,
Month: req.Month,
Days: req.Days,
}
//
// STEP 4: Execute use case
//
useCaseResp, err := svc.getStorageUsageEventsUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to get storage usage events",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Error(err))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
events := make([]*StorageUsageEventResponseDTO, len(useCaseResp.Events))
for i, event := range useCaseResp.Events {
events[i] = &StorageUsageEventResponseDTO{
UserID: event.UserID,
EventDay: event.EventDay,
EventTime: event.EventTime,
FileSize: event.FileSize,
Operation: event.Operation,
}
}
response := &GetStorageUsageEventsResponseDTO{
UserID: useCaseResp.UserID,
TrendPeriod: useCaseResp.TrendPeriod,
StartDate: useCaseResp.StartDate,
EndDate: useCaseResp.EndDate,
Events: events,
EventCount: useCaseResp.EventCount,
Success: true,
Message: "Storage usage events retrieved successfully",
}
svc.logger.Debug("Storage usage events retrieved successfully",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Int("event_count", len(events)))
return response, nil
}

View file

@ -0,0 +1,159 @@
// monorepo/cloud/maplefile-backend/internal/maplefile/service/storageusageevent/get_trend_analysis.go
package storageusageevent
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetStorageUsageEventsTrendAnalysisRequestDTO struct {
TrendPeriod string `json:"trend_period"` // "7days", "monthly", "yearly", "custom"
Year *int `json:"year,omitempty"`
Month *time.Month `json:"month,omitempty"`
Days *int `json:"days,omitempty"` // For custom day ranges
}
type DailyStatsResponseDTO struct {
Date time.Time `json:"date"`
AddEvents int `json:"add_events"`
RemoveEvents int `json:"remove_events"`
BytesAdded int64 `json:"bytes_added"`
BytesRemoved int64 `json:"bytes_removed"`
NetChange int64 `json:"net_change"`
}
type GetStorageUsageEventsTrendAnalysisResponseDTO struct {
UserID gocql.UUID `json:"user_id"`
TrendPeriod string `json:"trend_period"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
TotalEvents int `json:"total_events"`
AddEvents int `json:"add_events"`
RemoveEvents int `json:"remove_events"`
TotalBytesAdded int64 `json:"total_bytes_added"`
TotalBytesRemoved int64 `json:"total_bytes_removed"`
NetBytesChange int64 `json:"net_bytes_change"`
AverageBytesPerAdd float64 `json:"average_bytes_per_add"`
AverageBytesPerRemove float64 `json:"average_bytes_per_remove"`
LargestAddEvent int64 `json:"largest_add_event"`
LargestRemoveEvent int64 `json:"largest_remove_event"`
DailyBreakdown []*DailyStatsResponseDTO `json:"daily_breakdown,omitempty"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetStorageUsageEventsTrendAnalysisService interface {
Execute(ctx context.Context, req *GetStorageUsageEventsTrendAnalysisRequestDTO) (*GetStorageUsageEventsTrendAnalysisResponseDTO, error)
}
type getStorageUsageEventsTrendAnalysisServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getStorageUsageEventsTrendAnalysisUseCase uc_storageusageevent.GetStorageUsageEventsTrendAnalysisUseCase
}
func NewGetStorageUsageEventsTrendAnalysisService(
config *config.Configuration,
logger *zap.Logger,
getStorageUsageEventsTrendAnalysisUseCase uc_storageusageevent.GetStorageUsageEventsTrendAnalysisUseCase,
) GetStorageUsageEventsTrendAnalysisService {
logger = logger.Named("GetStorageUsageEventsTrendAnalysisService")
return &getStorageUsageEventsTrendAnalysisServiceImpl{
config: config,
logger: logger,
getStorageUsageEventsTrendAnalysisUseCase: getStorageUsageEventsTrendAnalysisUseCase,
}
}
func (svc *getStorageUsageEventsTrendAnalysisServiceImpl) Execute(ctx context.Context, req *GetStorageUsageEventsTrendAnalysisRequestDTO) (*GetStorageUsageEventsTrendAnalysisResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Build use case request
//
useCaseReq := &uc_storageusageevent.GetStorageUsageEventsRequest{
UserID: userID,
TrendPeriod: req.TrendPeriod,
Year: req.Year,
Month: req.Month,
Days: req.Days,
}
//
// STEP 4: Execute use case
//
analysis, err := svc.getStorageUsageEventsTrendAnalysisUseCase.Execute(ctx, useCaseReq)
if err != nil {
svc.logger.Error("Failed to get storage usage events trend analysis",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Error(err))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
dailyBreakdown := make([]*DailyStatsResponseDTO, len(analysis.DailyBreakdown))
for i, daily := range analysis.DailyBreakdown {
dailyBreakdown[i] = &DailyStatsResponseDTO{
Date: daily.Date,
AddEvents: daily.AddEvents,
RemoveEvents: daily.RemoveEvents,
BytesAdded: daily.BytesAdded,
BytesRemoved: daily.BytesRemoved,
NetChange: daily.NetChange,
}
}
response := &GetStorageUsageEventsTrendAnalysisResponseDTO{
UserID: analysis.UserID,
TrendPeriod: analysis.TrendPeriod,
StartDate: analysis.StartDate,
EndDate: analysis.EndDate,
TotalEvents: analysis.TotalEvents,
AddEvents: analysis.AddEvents,
RemoveEvents: analysis.RemoveEvents,
TotalBytesAdded: analysis.TotalBytesAdded,
TotalBytesRemoved: analysis.TotalBytesRemoved,
NetBytesChange: analysis.NetBytesChange,
AverageBytesPerAdd: analysis.AverageBytesPerAdd,
AverageBytesPerRemove: analysis.AverageBytesPerRemove,
LargestAddEvent: analysis.LargestAddEvent,
LargestRemoveEvent: analysis.LargestRemoveEvent,
DailyBreakdown: dailyBreakdown,
Success: true,
Message: "Storage usage events trend analysis completed successfully",
}
svc.logger.Debug("Storage usage events trend analysis completed successfully",
zap.String("user_id", userID.String()),
zap.String("trend_period", req.TrendPeriod),
zap.Int("total_events", analysis.TotalEvents),
zap.Int64("net_bytes_change", analysis.NetBytesChange))
return response, nil
}

View file

@ -0,0 +1,43 @@
package tag
import (
"go.uber.org/zap"
uc_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/tag"
)
// ProvideTagService provides the tag service for Wire DI
func ProvideTagService(
createTagUC *uc_tag.CreateTagUseCase,
getTagByIDUC *uc_tag.GetTagByIDUseCase,
listTagsByUserUC *uc_tag.ListTagsByUserUseCase,
updateTagUC *uc_tag.UpdateTagUseCase,
deleteTagUC *uc_tag.DeleteTagUseCase,
assignTagUC *uc_tag.AssignTagUseCase,
unassignTagUC *uc_tag.UnassignTagUseCase,
getTagsForEntityUC *uc_tag.GetTagsForEntityUseCase,
) *TagService {
return NewTagService(
createTagUC,
getTagByIDUC,
listTagsByUserUC,
updateTagUC,
deleteTagUC,
assignTagUC,
unassignTagUC,
getTagsForEntityUC,
)
}
// ProvideSearchByTagsService provides the search by tags service for Wire DI
func ProvideSearchByTagsService(
logger *zap.Logger,
listCollectionsUC *uc_tag.ListCollectionsByTagUseCase,
listFilesUC *uc_tag.ListFilesByTagUseCase,
) *SearchByTagsService {
return NewSearchByTagsService(
logger,
listCollectionsUC,
listFilesUC,
)
}

View file

@ -0,0 +1,148 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/tag/search_by_tags.go
package tag
import (
"context"
"sync"
"github.com/gocql/gocql"
"go.uber.org/zap"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/tag"
)
// SearchByTagsService orchestrates searching for both collections and files by tags
type SearchByTagsService struct {
logger *zap.Logger
listCollectionsUC *uc_tag.ListCollectionsByTagUseCase
listFilesUC *uc_tag.ListFilesByTagUseCase
}
// NewSearchByTagsService creates a new search by tags service
func NewSearchByTagsService(
logger *zap.Logger,
listCollectionsUC *uc_tag.ListCollectionsByTagUseCase,
listFilesUC *uc_tag.ListFilesByTagUseCase,
) *SearchByTagsService {
return &SearchByTagsService{
logger: logger.Named("SearchByTagsService"),
listCollectionsUC: listCollectionsUC,
listFilesUC: listFilesUC,
}
}
// SearchByTagsRequest represents the input for searching by tags
type SearchByTagsRequest struct {
UserID gocql.UUID
TagIDs []gocql.UUID
Limit int // Total results limit (split between collections and files)
}
// SearchByTagsResponse represents the unified search results
type SearchByTagsResponse struct {
Collections []*dom_collection.Collection `json:"collections"`
Files []*dom_file.File `json:"files"`
TagCount int `json:"tag_count"`
CollectionCount int `json:"collection_count"`
FileCount int `json:"file_count"`
}
// Execute performs the unified tag search, querying both collections and files in parallel
func (s *SearchByTagsService) Execute(ctx context.Context, req *SearchByTagsRequest) (*SearchByTagsResponse, error) {
// Validate input
if req == nil {
return nil, nil
}
if len(req.TagIDs) == 0 {
return &SearchByTagsResponse{
Collections: []*dom_collection.Collection{},
Files: []*dom_file.File{},
TagCount: 0,
CollectionCount: 0,
FileCount: 0,
}, nil
}
// Set default limit if not specified
limit := req.Limit
if limit <= 0 {
limit = 50
}
if limit > 100 {
limit = 100
}
// Split the limit between collections and files
// Give each half of the total limit
collectionsLimit := limit / 2
filesLimit := limit / 2
// Use WaitGroup to execute both queries in parallel
var wg sync.WaitGroup
wg.Add(2)
var collections []*dom_collection.Collection
var files []*dom_file.File
var collectionsErr error
var filesErr error
// Query collections
go func() {
defer wg.Done()
collections, _, collectionsErr = s.listCollectionsUC.Execute(ctx, req.UserID, req.TagIDs, collectionsLimit, "")
if collectionsErr != nil {
s.logger.Warn("Failed to list collections by tags",
zap.Error(collectionsErr),
zap.Int("tag_count", len(req.TagIDs)))
}
}()
// Query files
go func() {
defer wg.Done()
files, _, filesErr = s.listFilesUC.Execute(ctx, req.UserID, req.TagIDs, filesLimit, "")
if filesErr != nil {
s.logger.Warn("Failed to list files by tags",
zap.Error(filesErr),
zap.Int("tag_count", len(req.TagIDs)))
}
}()
// Wait for both queries to complete
wg.Wait()
// Handle errors - if both failed, return error
// If only one failed, continue with partial results
if collectionsErr != nil && filesErr != nil {
s.logger.Error("Both collection and file queries failed",
zap.Error(collectionsErr),
zap.NamedError("files_error", filesErr))
return nil, collectionsErr // Return first error
}
// Ensure we have non-nil slices
if collections == nil {
collections = []*dom_collection.Collection{}
}
if files == nil {
files = []*dom_file.File{}
}
response := &SearchByTagsResponse{
Collections: collections,
Files: files,
TagCount: len(req.TagIDs),
CollectionCount: len(collections),
FileCount: len(files),
}
s.logger.Info("Tag search completed",
zap.Int("tag_count", len(req.TagIDs)),
zap.Int("collections_found", len(collections)),
zap.Int("files_found", len(files)))
return response, nil
}

View file

@ -0,0 +1,95 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/tag/tag.go
package tag
import (
"context"
"github.com/gocql/gocql"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/tag"
)
// TagService provides business logic for tag operations
// Note: With E2EE, the service layer primarily validates encrypted data exists
// and passes it through to use cases. Plaintext validation happens client-side.
type TagService struct {
createUC *uc_tag.CreateTagUseCase
getByIDUC *uc_tag.GetTagByIDUseCase
listByUserUC *uc_tag.ListTagsByUserUseCase
updateUC *uc_tag.UpdateTagUseCase
deleteUC *uc_tag.DeleteTagUseCase
assignTagUC *uc_tag.AssignTagUseCase
unassignTagUC *uc_tag.UnassignTagUseCase
getTagsForEntityUC *uc_tag.GetTagsForEntityUseCase
}
func NewTagService(
createUC *uc_tag.CreateTagUseCase,
getByIDUC *uc_tag.GetTagByIDUseCase,
listByUserUC *uc_tag.ListTagsByUserUseCase,
updateUC *uc_tag.UpdateTagUseCase,
deleteUC *uc_tag.DeleteTagUseCase,
assignTagUC *uc_tag.AssignTagUseCase,
unassignTagUC *uc_tag.UnassignTagUseCase,
getTagsForEntityUC *uc_tag.GetTagsForEntityUseCase,
) *TagService {
return &TagService{
createUC: createUC,
getByIDUC: getByIDUC,
listByUserUC: listByUserUC,
updateUC: updateUC,
deleteUC: deleteUC,
assignTagUC: assignTagUC,
unassignTagUC: unassignTagUC,
getTagsForEntityUC: getTagsForEntityUC,
}
}
// NOTE: Plaintext validation methods removed - validation happens client-side with E2EE
// The backend only validates that encrypted data exists and is properly formatted
// CreateTag creates a new tag with encrypted data (E2EE)
// The client must send a complete Tag object with encrypted fields
func (s *TagService) CreateTag(ctx context.Context, tag *dom_tag.Tag) error {
return s.createUC.Execute(ctx, tag)
}
// GetTag retrieves a tag by ID
func (s *TagService) GetTag(ctx context.Context, id gocql.UUID) (*dom_tag.Tag, error) {
return s.getByIDUC.Execute(ctx, id)
}
// ListUserTags lists all tags for a user
func (s *TagService) ListUserTags(ctx context.Context, userID gocql.UUID) ([]*dom_tag.Tag, error) {
return s.listByUserUC.Execute(ctx, userID)
}
// UpdateTag updates a tag with new encrypted data (E2EE)
// The client must send a complete updated Tag object with encrypted fields
func (s *TagService) UpdateTag(ctx context.Context, tag *dom_tag.Tag) error {
return s.updateUC.Execute(ctx, tag)
}
// DeleteTag deletes a tag
func (s *TagService) DeleteTag(ctx context.Context, userID, id gocql.UUID) error {
return s.deleteUC.Execute(ctx, userID, id)
}
// AssignTag assigns a tag to an entity (collection or file)
func (s *TagService) AssignTag(ctx context.Context, userID, tagID, entityID gocql.UUID, entityType string) error {
return s.assignTagUC.Execute(ctx, userID, tagID, entityID, entityType)
}
// UnassignTag removes a tag from an entity
func (s *TagService) UnassignTag(ctx context.Context, tagID, entityID gocql.UUID, entityType string) error {
return s.unassignTagUC.Execute(ctx, tagID, entityID, entityType)
}
// GetTagsForEntity retrieves all tags assigned to an entity
func (s *TagService) GetTagsForEntity(ctx context.Context, entityID gocql.UUID, entityType string) ([]*dom_tag.Tag, error) {
return s.getTagsForEntityUC.Execute(ctx, entityID, entityType)
}
// CreateDefaultTags has been removed - default tags must be created client-side
// due to E2EE. The client creates default tags after first login.

View file

@ -0,0 +1,348 @@
// monorepo/cloud/backend/internal/service/user/complete_deletion.go
package user
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
// CompleteUserDeletionRequest represents a GDPR right-to-be-forgotten deletion request
type CompleteUserDeletionRequest struct {
UserID gocql.UUID `json:"user_id"`
Password string `json:"password"` // For authentication
}
// DeletionResult contains comprehensive information about the deletion operation
type DeletionResult struct {
UserID gocql.UUID `json:"user_id"`
FilesDeleted int `json:"files_deleted"`
CollectionsDeleted int `json:"collections_deleted"`
S3ObjectsDeleted int `json:"s3_objects_deleted"`
TotalDataSizeBytes int64 `json:"total_data_size_bytes"`
MembershipsRemoved int `json:"memberships_removed"`
DeletedAt time.Time `json:"deleted_at"`
Success bool `json:"success"`
Errors []string `json:"errors,omitempty"` // Non-fatal errors
}
// CompleteUserDeletionService orchestrates complete GDPR-compliant user deletion
type CompleteUserDeletionService interface {
Execute(ctx context.Context, req *CompleteUserDeletionRequest) (*DeletionResult, error)
}
type completeUserDeletionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getUserUseCase uc_user.UserGetByIDUseCase
deleteUserByIDUseCase uc_user.UserDeleteByIDUseCase
listFilesByOwnerIDService svc_file.ListFilesByOwnerIDService
softDeleteFileService svc_file.SoftDeleteFileService
listCollectionsByUserUseCase uc_collection.ListCollectionsByUserUseCase
softDeleteCollectionService svc_collection.SoftDeleteCollectionService
removeUserFromAllCollectionsUseCase uc_collection.RemoveUserFromAllCollectionsUseCase
deleteStorageDailyUsageUseCase uc_storagedailyusage.DeleteByUserUseCase
deleteStorageUsageEventUseCase uc_storageusageevent.DeleteByUserUseCase
anonymizeUserIPsImmediatelyUseCase uc_user.AnonymizeUserIPsImmediatelyUseCase
clearUserCacheUseCase uc_user.ClearUserCacheUseCase
anonymizeFileUserReferencesUseCase uc_filemetadata.AnonymizeUserReferencesUseCase
anonymizeCollectionUserReferencesUseCase uc_collection.AnonymizeUserReferencesUseCase
}
func NewCompleteUserDeletionService(
config *config.Configuration,
logger *zap.Logger,
getUserUseCase uc_user.UserGetByIDUseCase,
deleteUserByIDUseCase uc_user.UserDeleteByIDUseCase,
listFilesByOwnerIDService svc_file.ListFilesByOwnerIDService,
softDeleteFileService svc_file.SoftDeleteFileService,
listCollectionsByUserUseCase uc_collection.ListCollectionsByUserUseCase,
softDeleteCollectionService svc_collection.SoftDeleteCollectionService,
removeUserFromAllCollectionsUseCase uc_collection.RemoveUserFromAllCollectionsUseCase,
deleteStorageDailyUsageUseCase uc_storagedailyusage.DeleteByUserUseCase,
deleteStorageUsageEventUseCase uc_storageusageevent.DeleteByUserUseCase,
anonymizeUserIPsImmediatelyUseCase uc_user.AnonymizeUserIPsImmediatelyUseCase,
clearUserCacheUseCase uc_user.ClearUserCacheUseCase,
anonymizeFileUserReferencesUseCase uc_filemetadata.AnonymizeUserReferencesUseCase,
anonymizeCollectionUserReferencesUseCase uc_collection.AnonymizeUserReferencesUseCase,
) CompleteUserDeletionService {
logger = logger.Named("CompleteUserDeletionService")
return &completeUserDeletionServiceImpl{
config: config,
logger: logger,
getUserUseCase: getUserUseCase,
deleteUserByIDUseCase: deleteUserByIDUseCase,
listFilesByOwnerIDService: listFilesByOwnerIDService,
softDeleteFileService: softDeleteFileService,
listCollectionsByUserUseCase: listCollectionsByUserUseCase,
softDeleteCollectionService: softDeleteCollectionService,
removeUserFromAllCollectionsUseCase: removeUserFromAllCollectionsUseCase,
deleteStorageDailyUsageUseCase: deleteStorageDailyUsageUseCase,
deleteStorageUsageEventUseCase: deleteStorageUsageEventUseCase,
anonymizeUserIPsImmediatelyUseCase: anonymizeUserIPsImmediatelyUseCase,
clearUserCacheUseCase: clearUserCacheUseCase,
anonymizeFileUserReferencesUseCase: anonymizeFileUserReferencesUseCase,
anonymizeCollectionUserReferencesUseCase: anonymizeCollectionUserReferencesUseCase,
}
}
func (svc *completeUserDeletionServiceImpl) Execute(ctx context.Context, req *CompleteUserDeletionRequest) (*DeletionResult, error) {
//
// STEP 0: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request is required")
}
e := make(map[string]string)
if req.UserID.String() == "" {
e["user_id"] = "User ID is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validating complete user deletion",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
result := &DeletionResult{
UserID: req.UserID,
DeletedAt: time.Now(),
Errors: []string{},
}
svc.logger.Info("🚨 Starting GDPR right-to-be-forgotten complete user deletion",
zap.String("user_id", req.UserID.String()))
//
// STEP 1: Verify user exists
//
user, err := svc.getUserUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("User not found for deletion",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
return nil, err
}
svc.logger.Info("User verified for deletion",
zap.String("user_id", req.UserID.String()),
zap.String("email", user.Email))
//
// STEP 2: List and hard delete all user files
//
svc.logger.Info("Step 2/11: Deleting user files...")
listFilesReq := &svc_file.ListFilesByOwnerIDRequestDTO{OwnerID: req.UserID}
filesResp, err := svc.listFilesByOwnerIDService.Execute(ctx, listFilesReq)
if err != nil {
svc.logger.Error("Failed to list user files",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("List files: %v", err))
} else {
result.FilesDeleted = len(filesResp.Files)
svc.logger.Info("Found files to delete",
zap.Int("file_count", result.FilesDeleted))
// Hard delete each file (no tombstone - GDPR mode)
for _, file := range filesResp.Files {
deleteFileReq := &svc_file.SoftDeleteFileRequestDTO{
FileID: file.ID,
ForceHardDelete: true, // GDPR mode - immediate permanent deletion
}
deleteResp, err := svc.softDeleteFileService.Execute(ctx, deleteFileReq)
if err != nil {
svc.logger.Error("Failed to delete file",
zap.String("file_id", file.ID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("File %s: %v", file.ID, err))
} else {
result.S3ObjectsDeleted++
result.TotalDataSizeBytes += deleteResp.ReleasedBytes
}
}
}
//
// STEP 3: List and hard delete all user collections
//
svc.logger.Info("Step 3/11: Deleting user collections...")
collections, err := svc.listCollectionsByUserUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to list user collections",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("List collections: %v", err))
} else {
result.CollectionsDeleted = len(collections)
svc.logger.Info("Found collections to delete",
zap.Int("collection_count", result.CollectionsDeleted))
// Hard delete each collection (no tombstone - GDPR mode)
for _, collection := range collections {
deleteColReq := &svc_collection.SoftDeleteCollectionRequestDTO{
ID: collection.ID,
ForceHardDelete: true, // GDPR mode - immediate permanent deletion
}
_, err := svc.softDeleteCollectionService.Execute(ctx, deleteColReq)
if err != nil {
svc.logger.Error("Failed to delete collection",
zap.String("collection_id", collection.ID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Collection %s: %v", collection.ID, err))
}
}
}
//
// STEP 4: Remove user from shared collections
//
svc.logger.Info("Step 4/11: Removing user from shared collections...")
removedCount, err := svc.removeUserFromAllCollectionsUseCase.Execute(ctx, req.UserID, user.Email)
if err != nil {
svc.logger.Error("Failed to remove user from shared collections",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Membership cleanup: %v", err))
} else {
result.MembershipsRemoved = removedCount
svc.logger.Info("Removed user from shared collections",
zap.Int("memberships_removed", removedCount))
}
//
// STEP 5: Delete storage daily usage data
//
svc.logger.Info("Step 5/11: Deleting storage daily usage data...")
err = svc.deleteStorageDailyUsageUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to delete storage daily usage",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Storage daily usage: %v", err))
}
//
// STEP 6: Delete storage usage events
//
svc.logger.Info("Step 6/11: Deleting storage usage events...")
err = svc.deleteStorageUsageEventUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to delete storage usage events",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Storage usage events: %v", err))
}
//
// STEP 7: Anonymize all IP addresses
//
svc.logger.Info("Step 7/11: Anonymizing IP addresses...")
err = svc.anonymizeUserIPsImmediatelyUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to anonymize IP addresses",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("IP anonymization: %v", err))
}
//
// STEP 8: Anonymize user references in files (CreatedByUserID/ModifiedByUserID)
//
svc.logger.Info("Step 8/11: Anonymizing user references in files...")
filesUpdated, err := svc.anonymizeFileUserReferencesUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to anonymize user references in files",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("File user references: %v", err))
} else {
svc.logger.Info("Anonymized user references in files",
zap.Int("files_updated", filesUpdated))
}
//
// STEP 9: Anonymize user references in collections (CreatedByUserID/ModifiedByUserID/GrantedByID)
//
svc.logger.Info("Step 9/11: Anonymizing user references in collections...")
collectionsUpdated, err := svc.anonymizeCollectionUserReferencesUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("Failed to anonymize user references in collections",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Collection user references: %v", err))
} else {
svc.logger.Info("Anonymized user references in collections",
zap.Int("collections_updated", collectionsUpdated))
}
//
// STEP 10: Clear cache and session data
//
svc.logger.Info("Step 10/11: Clearing cache and session data...")
err = svc.clearUserCacheUseCase.Execute(ctx, req.UserID, user.Email)
if err != nil {
svc.logger.Error("Failed to clear user cache",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
result.Errors = append(result.Errors, fmt.Sprintf("Cache cleanup: %v", err))
}
//
// STEP 11: Delete user account (final step - point of no return)
//
svc.logger.Info("Step 11/11: Deleting user account (final step)...")
err = svc.deleteUserByIDUseCase.Execute(ctx, req.UserID)
if err != nil {
svc.logger.Error("CRITICAL: User account deletion failed",
zap.String("user_id", req.UserID.String()),
zap.Error(err))
return nil, fmt.Errorf("CRITICAL: User account deletion failed: %w", err)
}
//
// SUCCESS
//
result.Success = true
svc.logger.Info("✅ GDPR right-to-be-forgotten complete user deletion SUCCEEDED",
zap.String("user_id", req.UserID.String()),
zap.String("email", user.Email),
zap.Int("files_deleted", result.FilesDeleted),
zap.Int("collections_deleted", result.CollectionsDeleted),
zap.Int("s3_objects_deleted", result.S3ObjectsDeleted),
zap.Int("memberships_removed", result.MembershipsRemoved),
zap.Int64("data_size_bytes", result.TotalDataSizeBytes),
zap.Int("non_fatal_errors", len(result.Errors)))
if len(result.Errors) > 0 {
svc.logger.Warn("Deletion completed with non-fatal errors",
zap.Strings("errors", result.Errors))
}
return result, nil
}

View file

@ -0,0 +1,41 @@
package user
import (
"testing"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
)
// NOTE: Unit tests for CompleteUserDeletionService would require mocks.
// For now, this service will be tested via integration tests.
// See Task 1.10 in RIGHT_TO_BE_FORGOTTEN_IMPLEMENTATION.md
func TestCompleteUserDeletionService_Constructor(t *testing.T) {
// Test that constructor creates service successfully
cfg := &config.Configuration{}
logger := zap.NewNop()
service := NewCompleteUserDeletionService(
cfg,
logger,
nil, // getUserUseCase
nil, // deleteUserByIDUseCase
nil, // listFilesByOwnerIDService
nil, // softDeleteFileService
nil, // listCollectionsByUserUseCase
nil, // softDeleteCollectionService
nil, // removeUserFromAllCollectionsUseCase
nil, // deleteStorageDailyUsageUseCase
nil, // deleteStorageUsageEventUseCase
nil, // anonymizeUserIPsImmediatelyUseCase
nil, // clearUserCacheUseCase
nil, // anonymizeFileUserReferencesUseCase
nil, // anonymizeCollectionUserReferencesUseCase
)
if service == nil {
t.Error("Expected service to be created, got nil")
}
}

View file

@ -0,0 +1,61 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user/provider.go
package user
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
svc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/collection"
svc_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// ProvideUserPublicLookupService provides the user public lookup service
func ProvideUserPublicLookupService(
config *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
) UserPublicLookupService {
return NewUserPublicLookupService(config, logger, userGetByEmailUC)
}
// ProvideCompleteUserDeletionService provides the complete GDPR user deletion service
func ProvideCompleteUserDeletionService(
cfg *config.Configuration,
logger *zap.Logger,
getUserUseCase uc_user.UserGetByIDUseCase,
deleteUserByIDUseCase uc_user.UserDeleteByIDUseCase,
listFilesByOwnerIDService svc_file.ListFilesByOwnerIDService,
softDeleteFileService svc_file.SoftDeleteFileService,
listCollectionsByUserUseCase uc_collection.ListCollectionsByUserUseCase,
softDeleteCollectionService svc_collection.SoftDeleteCollectionService,
removeUserFromAllCollectionsUseCase uc_collection.RemoveUserFromAllCollectionsUseCase,
deleteStorageDailyUsageUseCase uc_storagedailyusage.DeleteByUserUseCase,
deleteStorageUsageEventUseCase uc_storageusageevent.DeleteByUserUseCase,
anonymizeUserIPsImmediatelyUseCase uc_user.AnonymizeUserIPsImmediatelyUseCase,
clearUserCacheUseCase uc_user.ClearUserCacheUseCase,
anonymizeFileUserReferencesUseCase uc_filemetadata.AnonymizeUserReferencesUseCase,
anonymizeCollectionUserReferencesUseCase uc_collection.AnonymizeUserReferencesUseCase,
) CompleteUserDeletionService {
return NewCompleteUserDeletionService(
cfg,
logger,
getUserUseCase,
deleteUserByIDUseCase,
listFilesByOwnerIDService,
softDeleteFileService,
listCollectionsByUserUseCase,
softDeleteCollectionService,
removeUserFromAllCollectionsUseCase,
deleteStorageDailyUsageUseCase,
deleteStorageUsageEventUseCase,
anonymizeUserIPsImmediatelyUseCase,
clearUserCacheUseCase,
anonymizeFileUserReferencesUseCase,
anonymizeCollectionUserReferencesUseCase,
)
}

View file

@ -0,0 +1,109 @@
// codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/user/publiclookup.go
package user
import (
"context"
"encoding/base64"
"strings"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/validation"
)
type UserPublicLookupRequestDTO struct {
Email string `json:"email"`
}
type UserPublicLookupResponseDTO struct {
UserID string `json:"user_id"`
Email string `json:"email"`
Name string `json:"name"` // Optional: for display
PublicKeyInBase64 string `json:"public_key_in_base64"` // Base64 encoded
VerificationID string `json:"verification_id"`
}
type UserPublicLookupService interface {
Execute(ctx context.Context, req *UserPublicLookupRequestDTO) (*UserPublicLookupResponseDTO, error)
}
type userPublicLookupServiceImpl struct {
config *config.Config
logger *zap.Logger
userGetByEmailUC uc_user.UserGetByEmailUseCase
}
func NewUserPublicLookupService(
cfg *config.Config,
logger *zap.Logger,
userGetByEmailUC uc_user.UserGetByEmailUseCase,
) UserPublicLookupService {
logger = logger.Named("UserPublicLookupService")
return &userPublicLookupServiceImpl{cfg, logger, userGetByEmailUC}
}
func (svc *userPublicLookupServiceImpl) Execute(ctx context.Context, req *UserPublicLookupRequestDTO) (*UserPublicLookupResponseDTO, error) {
//
// STEP 1: Sanitization of the input.
//
// Defensive Code: For security purposes we need to perform some sanitization on the inputs.
req.Email = strings.ToLower(req.Email)
req.Email = strings.ReplaceAll(req.Email, " ", "")
req.Email = strings.ReplaceAll(req.Email, "\t", "")
req.Email = strings.TrimSpace(req.Email)
svc.logger.Debug("sanitized email",
zap.String("email", validation.MaskEmail(req.Email)))
//
// STEP 2: Validation of input.
//
e := make(map[string]string)
if req.Email == "" {
e["email"] = "Email is required"
}
if len(req.Email) > 255 {
e["email"] = "Email is too long"
}
if len(e) != 0 {
svc.logger.Warn("failed validating",
zap.Any("e", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 3: Lookup user by email
//
// Lookup the user in our database, else return a `400 Bad Request` error.
// Note: We return a generic error message to prevent user enumeration attacks.
u, err := svc.userGetByEmailUC.Execute(ctx, req.Email)
if err != nil {
svc.logger.Error("failed getting user by email from database",
zap.Any("error", err))
return nil, httperror.NewForBadRequestWithSingleField("email", "Unable to complete lookup")
}
if u == nil {
svc.logger.Warn("user lookup attempted for non-existent email",
zap.String("email", validation.MaskEmail(req.Email)))
// Return same error message as above to prevent user enumeration
return nil, httperror.NewForBadRequestWithSingleField("email", "Unable to complete lookup")
}
// STEP 4: Build response DTO
dto := &UserPublicLookupResponseDTO{
UserID: u.ID.String(),
Email: u.Email,
Name: u.Name,
PublicKeyInBase64: base64.StdEncoding.EncodeToString(u.SecurityData.PublicKey.Key),
VerificationID: u.SecurityData.VerificationID,
}
return dto, nil
}