Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/service/file/archive.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ArchiveFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
}
type ArchiveFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type ArchiveFileService interface {
Execute(ctx context.Context, req *ArchiveFileRequestDTO) (*ArchiveFileResponseDTO, error)
}
type archiveFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewArchiveFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) ArchiveFileService {
logger = logger.Named("ArchiveFileService")
return &archiveFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *archiveFileServiceImpl) Execute(ctx context.Context, req *ArchiveFileRequestDTO) (*ArchiveFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata (including any state for archiving)
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file archive attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to archive this file")
}
//
// STEP 5: Validate state transition
//
err = dom_file.IsValidStateTransition(file.State, dom_file.FileStateArchived)
if err != nil {
svc.logger.Warn("Invalid state transition for file archive",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State),
zap.String("target_state", dom_file.FileStateArchived),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Archive the file
//
file.State = dom_file.FileStateArchived
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to archive file",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
svc.logger.Info("File archived successfully",
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return &ArchiveFileResponseDTO{
Success: true,
Message: "File archived successfully",
}, nil
}

View file

@ -0,0 +1,442 @@
// monorepo/cloud/backend/internal/maplefile/service/file/complete_file_upload.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type CompleteFileUploadRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
// Optional: Client can provide actual file size for validation
ActualFileSizeInBytes int64 `json:"actual_file_size_in_bytes,omitempty"`
// Optional: Client can provide actual thumbnail size for validation
ActualThumbnailSizeInBytes int64 `json:"actual_thumbnail_size_in_bytes,omitempty"`
// Optional: Client can confirm successful upload
UploadConfirmed bool `json:"upload_confirmed,omitempty"`
ThumbnailUploadConfirmed bool `json:"thumbnail_upload_confirmed,omitempty"`
}
type CompleteFileUploadResponseDTO struct {
File *FileResponseDTO `json:"file"`
Success bool `json:"success"`
Message string `json:"message"`
ActualFileSize int64 `json:"actual_file_size"`
ActualThumbnailSize int64 `json:"actual_thumbnail_size"`
UploadVerified bool `json:"upload_verified"`
ThumbnailVerified bool `json:"thumbnail_verified"`
StorageAdjustment int64 `json:"storage_adjustment"` // Positive if more space used, negative if less
}
type CompleteFileUploadService interface {
Execute(ctx context.Context, req *CompleteFileUploadRequestDTO) (*CompleteFileUploadResponseDTO, error)
}
type completeFileUploadServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewCompleteFileUploadService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase,
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) CompleteFileUploadService {
logger = logger.Named("CompleteFileUploadService")
return &completeFileUploadServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
verifyObjectExistsUseCase: verifyObjectExistsUseCase,
getObjectSizeUseCase: getObjectSizeUseCase,
deleteDataUseCase: deleteDataUseCase,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *completeFileUploadServiceImpl) Execute(ctx context.Context, req *CompleteFileUploadRequestDTO) (*CompleteFileUploadResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File completion details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("⚠️ Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("🔴 Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
// Developers note: Use `ExecuteWithAnyState` because initially created `FileMetadata` object has state set to `pending`.
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("🔴 Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Verify user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("🔴 Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("⚠️ Unauthorized file completion attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to complete this file upload")
}
//
// STEP 5: Verify file is in pending state
//
if file.State != dom_file.FileStatePending {
svc.logger.Warn("⚠️ File is not in pending state",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State))
return nil, httperror.NewForBadRequestWithSingleField("file_id", fmt.Sprintf("File is not in pending state (current state: %s)", file.State))
}
//
// STEP 6: Verify file exists in object storage and get actual size
//
fileExists, err := svc.verifyObjectExistsUseCase.Execute(file.EncryptedFileObjectKey)
if err != nil {
svc.logger.Error("🔴 Failed to verify file exists in storage",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to verify file upload")
}
if !fileExists {
svc.logger.Warn("⚠️ File does not exist in storage",
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File has not been uploaded yet")
}
// Get actual file size from storage
actualFileSize, err := svc.getObjectSizeUseCase.Execute(file.EncryptedFileObjectKey)
if err != nil {
svc.logger.Error("🔴 Failed to get file size from storage",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to verify file size")
}
//
// STEP 7: Verify thumbnail if expected
//
var actualThumbnailSize int64 = 0
var thumbnailVerified bool = true
if file.EncryptedThumbnailObjectKey != "" {
thumbnailExists, err := svc.verifyObjectExistsUseCase.Execute(file.EncryptedThumbnailObjectKey)
if err != nil {
svc.logger.Warn("⚠️ Failed to verify thumbnail exists, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
thumbnailVerified = false
} else if thumbnailExists {
actualThumbnailSize, err = svc.getObjectSizeUseCase.Execute(file.EncryptedThumbnailObjectKey)
if err != nil {
svc.logger.Warn("⚠️ Failed to get thumbnail size, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
thumbnailVerified = false
}
} else {
// Thumbnail was expected but not uploaded - clear the path
file.EncryptedThumbnailObjectKey = ""
thumbnailVerified = false
}
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("complete-file-upload", svc.logger)
//
// STEP 8: Calculate storage adjustment and update quota
//
expectedTotalSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
actualTotalSize := actualFileSize + actualThumbnailSize
storageAdjustment := actualTotalSize - expectedTotalSize
svc.logger.Info("Starting file upload completion with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.Int64("expected_file_size", file.EncryptedFileSizeInBytes),
zap.Int64("actual_file_size", actualFileSize),
zap.Int64("expected_thumbnail_size", file.EncryptedThumbnailSizeInBytes),
zap.Int64("actual_thumbnail_size", actualThumbnailSize),
zap.Int64("expected_total", expectedTotalSize),
zap.Int64("actual_total", actualTotalSize),
zap.Int64("adjustment", storageAdjustment))
// Handle storage quota adjustment (SAGA protected)
if storageAdjustment != 0 {
if storageAdjustment > 0 {
// Need more quota than originally reserved
err = svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userID, storageAdjustment)
if err != nil {
svc.logger.Error("Failed to reserve additional storage quota",
zap.String("user_id", userID.String()),
zap.Int64("additional_size", storageAdjustment),
zap.Error(err))
// Clean up the uploaded file since we can't complete due to quota
// Note: This is an exceptional case - quota exceeded before any SAGA operations
if deleteErr := svc.deleteDataUseCase.Execute(file.EncryptedFileObjectKey); deleteErr != nil {
svc.logger.Error("Failed to clean up file after quota exceeded", zap.Error(deleteErr))
}
if file.EncryptedThumbnailObjectKey != "" {
if deleteErr := svc.deleteDataUseCase.Execute(file.EncryptedThumbnailObjectKey); deleteErr != nil {
svc.logger.Error("Failed to clean up thumbnail after quota exceeded", zap.Error(deleteErr))
}
}
saga.Rollback(ctx)
return nil, err
}
// Register compensation: release the additional quota if later steps fail
storageAdjustmentCaptured := storageAdjustment
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: releasing additional reserved quota",
zap.Int64("size", storageAdjustmentCaptured))
return svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userIDCaptured, storageAdjustmentCaptured)
})
} else {
// Used less quota than originally reserved, release the difference
err = svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, -storageAdjustment)
if err != nil {
svc.logger.Error("Failed to release excess quota",
zap.String("user_id", userID.String()),
zap.Int64("excess_size", -storageAdjustment),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: re-reserve the released quota if later steps fail
excessQuotaCaptured := -storageAdjustment
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving released excess quota",
zap.Int64("size", excessQuotaCaptured))
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, excessQuotaCaptured)
})
}
}
//
// STEP 9: Validate file size if client provided it
//
if req.ActualFileSizeInBytes > 0 && req.ActualFileSizeInBytes != actualFileSize {
svc.logger.Warn("⚠️ File size mismatch between client and storage",
zap.Any("file_id", req.FileID),
zap.Int64("client_reported_size", req.ActualFileSizeInBytes),
zap.Int64("storage_actual_size", actualFileSize))
// Continue with storage size as authoritative
}
//
// STEP 10: Update file metadata to active state (SAGA protected)
//
originalState := file.State
originalFileSizeInBytes := file.EncryptedFileSizeInBytes
originalThumbnailSizeInBytes := file.EncryptedThumbnailSizeInBytes
originalVersion := file.Version
file.EncryptedFileSizeInBytes = actualFileSize
file.EncryptedThumbnailSizeInBytes = actualThumbnailSize
file.State = dom_file.FileStateActive
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
file.Version++ // Every mutation we need to keep a track of.
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to update file metadata to active state",
zap.Error(err),
zap.String("file_id", req.FileID.String()))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: restore original metadata state
fileIDCaptured := file.ID
originalStateCaptured := originalState
originalFileSizeCaptured := originalFileSizeInBytes
originalThumbnailSizeCaptured := originalThumbnailSizeInBytes
originalVersionCaptured := originalVersion
collectionIDCaptured := file.CollectionID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata to pending state",
zap.String("file_id", fileIDCaptured.String()))
restoredFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
restoredFile.State = originalStateCaptured
restoredFile.EncryptedFileSizeInBytes = originalFileSizeCaptured
restoredFile.EncryptedThumbnailSizeInBytes = originalThumbnailSizeCaptured
restoredFile.Version = originalVersionCaptured
restoredFile.ModifiedAt = time.Now()
// Note: The repository Update method handles file count adjustments based on state changes,
// so restoring to pending state will automatically decrement the file count
return svc.updateMetadataUseCase.Execute(ctx, restoredFile)
})
// Note: File count increment is handled by the repository's Update method when state changes
// from pending to active. No explicit increment needed here to avoid double counting.
//
// STEP 11: Create storage usage event (SAGA protected)
//
_ = collectionIDCaptured // Keep variable for potential future use
err = svc.createStorageUsageEventUseCase.Execute(ctx, file.OwnerID, actualTotalSize, "add")
if err != nil {
svc.logger.Error("Failed to create storage usage event",
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("file_size", actualTotalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: create compensating "remove" event
ownerIDCaptured := file.OwnerID
actualTotalSizeCaptured := actualTotalSize
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event")
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, actualTotalSizeCaptured, "remove")
})
//
// STEP 13: Update daily storage usage (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: file.OwnerID,
UsageDay: &today,
TotalBytes: actualTotalSize,
AddBytes: actualTotalSize,
RemoveBytes: 0,
IsIncrement: true, // Increment the existing values
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage",
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("file_size", actualTotalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update")
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: -actualTotalSizeCaptured, // Negative to reverse
AddBytes: 0,
RemoveBytes: actualTotalSizeCaptured,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("File upload completed successfully with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.String("collection_id", file.CollectionID.String()),
zap.String("owner_id", file.OwnerID.String()),
zap.Int64("actual_file_size", actualFileSize),
zap.Int64("actual_thumbnail_size", actualThumbnailSize),
zap.Int64("storage_adjustment", storageAdjustment))
return &CompleteFileUploadResponseDTO{
File: mapFileToDTO(file),
Success: true,
Message: "File upload completed successfully with storage quota updated",
ActualFileSize: actualFileSize,
ActualThumbnailSize: actualThumbnailSize,
UploadVerified: true,
ThumbnailVerified: thumbnailVerified,
StorageAdjustment: storageAdjustment,
}, nil
}

View file

@ -0,0 +1,395 @@
// monorepo/cloud/backend/internal/maplefile/service/file/create_pending_file.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type CreatePendingFileRequestDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
// Optional: expected file size for validation (in bytes)
ExpectedFileSizeInBytes int64 `json:"expected_file_size_in_bytes,omitempty"`
// Optional: expected thumbnail size for validation (in bytes)
ExpectedThumbnailSizeInBytes int64 `json:"expected_thumbnail_size_in_bytes,omitempty"`
// Optional: content type for file upload validation (e.g., "image/jpeg", "video/mp4")
// Required for album uploads to enforce photo/video restrictions
ContentType string `json:"content_type,omitempty"`
// Optional: tag IDs to embed in file at creation time
TagIDs []gocql.UUID `json:"tag_ids,omitempty"`
}
type FileResponseDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
OwnerID gocql.UUID `json:"owner_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedThumbnailSizeInBytes int64 `json:"encrypted_thumbnail_size_in_bytes"`
Tags []dom_tag.EmbeddedTag `json:"tags"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
TombstoneVersion uint64 `json:"tombstone_version"`
TombstoneExpiry time.Time `json:"tombstone_expiry"`
}
type CreatePendingFileResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
UploadURLExpirationTime time.Time `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type CreatePendingFileService interface {
Execute(ctx context.Context, req *CreatePendingFileRequestDTO) (*CreatePendingFileResponseDTO, error)
}
type createPendingFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getCollectionUseCase uc_collection.GetCollectionUseCase
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
tagRepo dom_tag.Repository
fileValidator *FileValidator
}
func NewCreatePendingFileService(
config *config.Configuration,
logger *zap.Logger,
getCollectionUseCase uc_collection.GetCollectionUseCase,
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase,
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase,
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
tagRepo dom_tag.Repository,
) CreatePendingFileService {
logger = logger.Named("CreatePendingFileService")
return &createPendingFileServiceImpl{
config: config,
logger: logger,
getCollectionUseCase: getCollectionUseCase,
checkCollectionAccessUseCase: checkCollectionAccessUseCase,
checkFileExistsUseCase: checkFileExistsUseCase,
createMetadataUseCase: createMetadataUseCase,
generatePresignedUploadURLUseCase: generatePresignedUploadURLUseCase,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
tagRepo: tagRepo,
fileValidator: NewFileValidator(),
}
}
func (svc *createPendingFileServiceImpl) Execute(ctx context.Context, req *CreatePendingFileRequestDTO) (*CreatePendingFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File creation details are required")
}
e := make(map[string]string)
if req.ID.String() == "" {
e["id"] = "Client-side generated ID is required"
}
doesExist, err := svc.checkFileExistsUseCase.Execute(req.ID)
if err != nil {
e["id"] = fmt.Sprintf("Client-side generated ID causes error: %v", req.ID)
}
if doesExist {
e["id"] = "Client-side generated ID already exists"
}
if req.CollectionID.String() == "" {
e["collection_id"] = "Collection ID is required"
}
if req.EncryptedMetadata == "" {
e["encrypted_metadata"] = "Encrypted metadata is required"
}
if req.EncryptedFileKey.Ciphertext == nil || len(req.EncryptedFileKey.Ciphertext) == 0 {
e["encrypted_file_key"] = "Encrypted file key is required"
}
if req.EncryptionVersion == "" {
e["encryption_version"] = "Encryption version is required"
}
if req.EncryptedHash == "" {
e["encrypted_hash"] = "Encrypted hash is required"
}
if req.ExpectedFileSizeInBytes <= 0 {
e["expected_file_size_in_bytes"] = "Expected file size must be greater than 0"
}
if len(e) != 0 {
svc.logger.Warn("⚠️ Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("❌ Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check storage quota BEFORE creating file
//
totalExpectedSize := req.ExpectedFileSizeInBytes + req.ExpectedThumbnailSizeInBytes
err = svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userID, totalExpectedSize)
if err != nil {
svc.logger.Warn("⚠️ Storage quota check failed",
zap.String("user_id", userID.String()),
zap.Int64("requested_size", totalExpectedSize),
zap.Error(err))
return nil, err // This will be a proper HTTP error from the quota helper
}
svc.logger.Info("✅ Storage quota reserved successfully",
zap.String("user_id", userID.String()),
zap.Int64("reserved_size", totalExpectedSize))
//
// STEP 4: Check if user has write access to the collection
//
hasAccess, err := svc.checkCollectionAccessUseCase.Execute(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after collection access check error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
// Release reserved quota on access denied
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after access denied", zap.Error(releaseErr))
}
svc.logger.Warn("⚠️ Unauthorized file creation attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to create files in this collection")
}
//
// STEP 5: Get collection details and validate file upload
//
// CWE-434: Unrestricted Upload of File with Dangerous Type
// OWASP A04:2021: Insecure Design - File upload validation
collection, err := svc.getCollectionUseCase.Execute(ctx, req.CollectionID)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after collection retrieval error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to get collection details",
zap.Error(err),
zap.Any("collection_id", req.CollectionID))
return nil, err
}
// Validate file upload based on collection type
if err := svc.fileValidator.ValidateFileUpload(
collection.CollectionType,
req.ExpectedFileSizeInBytes,
req.ExpectedThumbnailSizeInBytes,
req.ContentType,
); err != nil {
// Release reserved quota on validation error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after validation error", zap.Error(releaseErr))
}
svc.logger.Warn("⚠️ File upload validation failed",
zap.Error(err),
zap.String("collection_type", collection.CollectionType),
zap.Int64("file_size", req.ExpectedFileSizeInBytes),
zap.String("content_type", req.ContentType))
return nil, httperror.NewForBadRequestWithSingleField("file", err.Error())
}
svc.logger.Info("✅ File upload validated successfully",
zap.String("collection_type", collection.CollectionType),
zap.Int64("file_size", req.ExpectedFileSizeInBytes),
zap.String("content_type", req.ContentType))
//
// STEP 6: Generate storage paths.
//
storagePath := generateStoragePath(userID.String(), req.ID.String())
thumbnailStoragePath := generateThumbnailStoragePath(userID.String(), req.ID.String())
//
// STEP 6: Generate presigned upload URLs
//
uploadURLDuration := 1 * time.Hour // URLs valid for 1 hour
expirationTime := time.Now().Add(uploadURLDuration)
presignedUploadURL, err := svc.generatePresignedUploadURLUseCase.Execute(ctx, storagePath, uploadURLDuration)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after presigned URL generation error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to generate presigned upload URL",
zap.Any("error", err),
zap.Any("file_id", req.ID),
zap.String("storage_path", storagePath))
return nil, err
}
// Generate thumbnail upload URL (optional)
var presignedThumbnailURL string
if req.ExpectedThumbnailSizeInBytes > 0 {
presignedThumbnailURL, err = svc.generatePresignedUploadURLUseCase.Execute(ctx, thumbnailStoragePath, uploadURLDuration)
if err != nil {
svc.logger.Warn("⚠️ Failed to generate thumbnail presigned upload URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.ID),
zap.String("thumbnail_storage_path", thumbnailStoragePath))
}
}
//
// STEP 7: Look up and embed tags if TagIDs were provided
//
var embeddedTags []dom_tag.EmbeddedTag
if len(req.TagIDs) > 0 {
svc.logger.Debug("🏷️ Looking up tags to embed in file",
zap.Int("tagCount", len(req.TagIDs)))
for _, tagID := range req.TagIDs {
tagObj, err := svc.tagRepo.GetByID(ctx, tagID)
if err != nil {
svc.logger.Warn("Failed to get tag for embedding, skipping",
zap.String("tagID", tagID.String()),
zap.Error(err))
continue
}
// Verify tag belongs to the user
if tagObj.UserID != userID {
svc.logger.Warn("Tag does not belong to user, skipping",
zap.String("tagID", tagID.String()),
zap.String("userID", userID.String()))
continue
}
embeddedTags = append(embeddedTags, *tagObj.ToEmbeddedTag())
}
svc.logger.Info("✅ Tags embedded in file",
zap.Int("embeddedCount", len(embeddedTags)),
zap.Int("requestedCount", len(req.TagIDs)))
}
//
// STEP 8: Create pending file metadata record
//
now := time.Now()
file := &dom_file.File{
ID: req.ID,
CollectionID: req.CollectionID,
OwnerID: userID,
EncryptedMetadata: req.EncryptedMetadata,
EncryptedFileKey: req.EncryptedFileKey,
EncryptionVersion: req.EncryptionVersion,
EncryptedHash: req.EncryptedHash,
EncryptedFileObjectKey: storagePath,
EncryptedFileSizeInBytes: req.ExpectedFileSizeInBytes, // Will be updated when upload completes
EncryptedThumbnailObjectKey: thumbnailStoragePath,
EncryptedThumbnailSizeInBytes: req.ExpectedThumbnailSizeInBytes, // Will be updated when upload completes
Tags: embeddedTags,
CreatedAt: now,
CreatedByUserID: userID,
ModifiedAt: now,
ModifiedByUserID: userID,
Version: 1, // File creation always starts mutation version at 1.
State: dom_file.FileStatePending, // File creation always starts state in a pending upload.
}
err = svc.createMetadataUseCase.Execute(file)
if err != nil {
// Release reserved quota on error
if releaseErr := svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalExpectedSize); releaseErr != nil {
svc.logger.Error("❌ Failed to release quota after metadata creation error", zap.Error(releaseErr))
}
svc.logger.Error("❌ Failed to create pending file metadata",
zap.Any("error", err),
zap.Any("file_id", req.ID))
return nil, err
}
//
// STEP 9: Prepare response
//
response := &CreatePendingFileResponseDTO{
File: mapFileToDTO(file),
PresignedUploadURL: presignedUploadURL,
PresignedThumbnailURL: presignedThumbnailURL,
UploadURLExpirationTime: expirationTime,
Success: true,
Message: "Pending file created successfully. Storage quota reserved. Use the presigned URL to upload your file.",
}
svc.logger.Info("✅ Pending file created successfully with quota reservation",
zap.Any("file_id", req.ID),
zap.Any("collection_id", req.CollectionID),
zap.Any("owner_id", userID),
zap.String("storage_path", storagePath),
zap.Int64("reserved_size", totalExpectedSize),
zap.Time("url_expiration", expirationTime))
return response, nil
}
// Helper function to generate consistent storage path
func generateStoragePath(ownerID, fileID string) string {
return fmt.Sprintf("users/%s/files/%s", ownerID, fileID)
}
// Helper function to generate consistent thumbnail storage path
func generateThumbnailStoragePath(ownerID, fileID string) string {
return fmt.Sprintf("users/%s/files/%s_thumb", ownerID, fileID)
}

View file

@ -0,0 +1,386 @@
// monorepo/cloud/backend/internal/maplefile/service/file/delete_multiple.go
package file
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type DeleteMultipleFilesRequestDTO struct {
FileIDs []gocql.UUID `json:"file_ids"`
}
type DeleteMultipleFilesResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
DeletedCount int `json:"deleted_count"`
SkippedCount int `json:"skipped_count"`
TotalRequested int `json:"total_requested"`
}
type DeleteMultipleFilesService interface {
Execute(ctx context.Context, req *DeleteMultipleFilesRequestDTO) (*DeleteMultipleFilesResponseDTO, error)
}
type deleteMultipleFilesServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewDeleteMultipleFilesService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase,
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) DeleteMultipleFilesService {
logger = logger.Named("DeleteMultipleFilesService")
return &deleteMultipleFilesServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataByIDsUseCase: getMetadataByIDsUseCase,
deleteMetadataManyUseCase: deleteMetadataManyUseCase,
deleteMultipleDataUseCase: deleteMultipleDataUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *deleteMultipleFilesServiceImpl) Execute(ctx context.Context, req *DeleteMultipleFilesRequestDTO) (*DeleteMultipleFilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File IDs are required")
}
if req.FileIDs == nil || len(req.FileIDs) == 0 {
svc.logger.Warn("Empty file IDs provided")
return nil, httperror.NewForBadRequestWithSingleField("file_ids", "File IDs are required")
}
// Validate individual file IDs
e := make(map[string]string)
for i, fileID := range req.FileIDs {
if fileID.String() == "" {
e[fmt.Sprintf("file_ids[%d]", i)] = "File ID is required"
}
}
if len(e) != 0 {
svc.logger.Warn("Failed validation",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata for all files
//
files, err := svc.getMetadataByIDsUseCase.Execute(req.FileIDs)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_ids", req.FileIDs))
return nil, err
}
//
// STEP 4: Group files by collection to optimize permission checks
//
filesByCollection := make(map[gocql.UUID][]*dom_file.File)
for _, file := range files {
filesByCollection[file.CollectionID] = append(filesByCollection[file.CollectionID], file)
}
//
// STEP 5: Pre-fetch collection access permissions (eliminates N+1 query)
//
collectionAccess := make(map[gocql.UUID]bool)
for collectionID := range filesByCollection {
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, collectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Warn("Failed to check access for collection",
zap.Any("error", err),
zap.Any("collection_id", collectionID))
collectionAccess[collectionID] = false
continue
}
collectionAccess[collectionID] = hasAccess
}
//
// STEP 6: Filter files that the user has permission to delete and track storage by owner
//
var deletableFiles []*dom_file.File
var storagePaths []string
skippedCount := 0
storageByOwner := make(map[gocql.UUID]int64) // Track total storage to release per owner
filesPerCollection := make(map[gocql.UUID]int) // Track files to delete per collection for count updates
for _, file := range files {
// Use pre-fetched access permission
hasAccess := collectionAccess[file.CollectionID]
if !hasAccess {
svc.logger.Warn("User doesn't have permission to delete file, skipping",
zap.Any("user_id", userID),
zap.Any("file_id", file.ID),
zap.Any("collection_id", file.CollectionID))
skippedCount++
continue
}
// Check valid transitions.
if err := dom_collection.IsValidStateTransition(file.State, dom_file.FileStateDeleted); err != nil {
svc.logger.Warn("Invalid file state transition",
zap.Any("user_id", userID),
zap.Error(err))
skippedCount++
continue
}
deletableFiles = append(deletableFiles, file)
storagePaths = append(storagePaths, file.EncryptedFileObjectKey)
// Add thumbnail paths if they exist
if file.EncryptedThumbnailObjectKey != "" {
storagePaths = append(storagePaths, file.EncryptedThumbnailObjectKey)
}
// Track storage by owner for active files
if file.State == dom_file.FileStateActive {
totalFileSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
storageByOwner[file.OwnerID] += totalFileSize
// Track files per collection for count updates
filesPerCollection[file.CollectionID]++
}
}
if len(deletableFiles) == 0 {
return &DeleteMultipleFilesResponseDTO{
Success: true,
Message: "No files could be deleted due to permission restrictions",
DeletedCount: 0,
SkippedCount: len(req.FileIDs),
TotalRequested: len(req.FileIDs),
}, nil
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("delete-multiple-files", svc.logger)
svc.logger.Info("Starting multiple file deletion with SAGA protection",
zap.Int("deletable_files_count", len(deletableFiles)),
zap.Int("skipped_count", skippedCount),
zap.Int("total_requested", len(req.FileIDs)))
// Note: Version tracking is not needed for hard delete since the file is being
// completely removed. Version tracking is handled in SoftDeleteFileService for
// soft deletes where tombstone records are maintained.
//
// STEP 7: Delete file metadata (SAGA protected)
//
deletableFileIDs := make([]gocql.UUID, len(deletableFiles))
deletableFilesCaptured := make([]*dom_file.File, len(deletableFiles))
for i, file := range deletableFiles {
deletableFileIDs[i] = file.ID
// Deep copy for compensation
fileCopy := *file
deletableFilesCaptured[i] = &fileCopy
}
err = svc.deleteMetadataManyUseCase.Execute(deletableFileIDs)
if err != nil {
svc.logger.Error("Failed to delete file metadata",
zap.Error(err),
zap.Int("file_count", len(deletableFileIDs)))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: This is a hard delete, so we cannot easily restore
// The compensation logs the failure - manual intervention may be required
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: hard delete cannot be automatically reversed",
zap.Int("deleted_file_count", len(deletableFilesCaptured)),
zap.String("note", "Manual restoration from backup may be required"))
// For hard delete, we can't restore deleted metadata without backup
// This compensation serves as an audit trail
return nil
})
//
// STEP 8: Update file counts for affected collections (SAGA protected)
//
for collectionID, fileCount := range filesPerCollection {
if fileCount > 0 {
// Decrement the file count for this collection
for i := 0; i < fileCount; i++ {
err = svc.collectionRepo.DecrementFileCount(ctx, collectionID)
if err != nil {
svc.logger.Error("Failed to decrement file count for collection",
zap.String("collection_id", collectionID.String()),
zap.Int("file_count", fileCount),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
}
// Register compensation: increment the count back
collectionIDCaptured := collectionID
fileCountCaptured := fileCount
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file count",
zap.String("collection_id", collectionIDCaptured.String()),
zap.Int("file_count", fileCountCaptured))
for i := 0; i < fileCountCaptured; i++ {
if err := svc.collectionRepo.IncrementFileCount(ctx, collectionIDCaptured); err != nil {
svc.logger.Error("Failed to restore file count during compensation",
zap.String("collection_id", collectionIDCaptured.String()),
zap.Error(err))
return err
}
}
return nil
})
}
}
//
// STEP 9: Create storage usage events and update daily usage for each owner (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
for ownerID, totalSize := range storageByOwner {
if totalSize > 0 {
// Create storage usage event (SAGA protected)
err = svc.createStorageUsageEventUseCase.Execute(ctx, ownerID, totalSize, "remove")
if err != nil {
svc.logger.Error("Failed to create storage usage event for bulk deletion",
zap.String("owner_id", ownerID.String()),
zap.Int64("total_size", totalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: create compensating "add" event
ownerIDCaptured := ownerID
totalSizeCaptured := totalSize
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event",
zap.String("owner_id", ownerIDCaptured.String()))
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, totalSizeCaptured, "add")
})
// Update daily storage usage (SAGA protected)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerID,
UsageDay: &today,
TotalBytes: -totalSize, // Negative because we're removing
AddBytes: 0,
RemoveBytes: totalSize,
IsIncrement: true, // Increment the existing values
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage for bulk deletion",
zap.String("owner_id", ownerID.String()),
zap.Int64("total_size", totalSize),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update",
zap.String("owner_id", ownerIDCaptured.String()))
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: totalSizeCaptured, // Positive to reverse
AddBytes: totalSizeCaptured,
RemoveBytes: 0,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
}
}
//
// STEP 10: Delete S3 objects
//
if len(storagePaths) > 0 {
svc.logger.Info("Deleting S3 objects for multiple files",
zap.Int("s3_objects_count", len(storagePaths)))
if err := svc.deleteMultipleDataUseCase.Execute(storagePaths); err != nil {
// Log but don't fail - S3 deletion is best effort after metadata is deleted
svc.logger.Error("Failed to delete some S3 objects (continuing anyway)",
zap.Error(err),
zap.Int("s3_objects_count", len(storagePaths)))
} else {
svc.logger.Info("Successfully deleted all S3 objects",
zap.Int("s3_objects_deleted", len(storagePaths)))
}
}
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("Multiple files deleted successfully with SAGA protection",
zap.Int("deleted_count", len(deletableFiles)),
zap.Int("skipped_count", skippedCount),
zap.Int("total_requested", len(req.FileIDs)),
zap.String("user_id", userID.String()),
zap.Int("affected_owners", len(storageByOwner)),
zap.Int("s3_objects_deleted", len(storagePaths)))
return &DeleteMultipleFilesResponseDTO{
Success: true,
Message: fmt.Sprintf("Successfully deleted %d files", len(deletableFiles)),
DeletedCount: len(deletableFiles),
SkippedCount: skippedCount,
TotalRequested: len(req.FileIDs),
}, nil
}

View file

@ -0,0 +1,188 @@
package file
import (
"fmt"
"strings"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
)
// File size limits (in bytes)
const (
// MaxFileSizeGeneral is the maximum file size for general folders (500MB)
MaxFileSizeGeneral = 500 * 1024 * 1024
// MaxFileSizeAlbum is the maximum file size for album uploads (100MB)
// Albums are for photos/videos, so we use a more restrictive limit
MaxFileSizeAlbum = 100 * 1024 * 1024
// MaxThumbnailSize is the maximum thumbnail size (10MB)
MaxThumbnailSize = 10 * 1024 * 1024
)
// Allowed content types for albums (photos and videos only)
var AlbumAllowedContentTypes = []string{
// Image formats
"image/jpeg",
"image/jpg",
"image/png",
"image/gif",
"image/webp",
"image/heic",
"image/heif",
"image/bmp",
"image/tiff",
"image/svg+xml",
// Video formats
"video/mp4",
"video/mpeg",
"video/quicktime", // .mov files
"video/x-msvideo", // .avi files
"video/x-matroska", // .mkv files
"video/webm",
"video/3gpp",
"video/x-flv",
}
// FileValidator provides file upload validation based on collection type
type FileValidator struct{}
// NewFileValidator creates a new file validator
func NewFileValidator() *FileValidator {
return &FileValidator{}
}
// ValidateFileUpload validates a file upload based on collection type and file properties
// CWE-434: Unrestricted Upload of File with Dangerous Type
// OWASP A01:2021: Broken Access Control - File upload restrictions
func (v *FileValidator) ValidateFileUpload(
collectionType string,
fileSize int64,
thumbnailSize int64,
contentType string,
) error {
// Validate file size based on collection type
if err := v.validateFileSize(collectionType, fileSize); err != nil {
return err
}
// Validate thumbnail size if provided
if thumbnailSize > 0 {
if err := v.validateThumbnailSize(thumbnailSize); err != nil {
return err
}
}
// Validate content type for albums (photos/videos only)
if collectionType == dom_collection.CollectionTypeAlbum {
if err := v.validateContentType(contentType); err != nil {
return err
}
}
// For folders (non-albums), no content-type restrictions
// Users can upload any file type to regular folders
return nil
}
// validateFileSize checks if the file size is within limits
func (v *FileValidator) validateFileSize(collectionType string, fileSize int64) error {
if fileSize <= 0 {
return fmt.Errorf("file size must be greater than 0")
}
var maxSize int64
var collectionTypeName string
if collectionType == dom_collection.CollectionTypeAlbum {
maxSize = MaxFileSizeAlbum
collectionTypeName = "album"
} else {
maxSize = MaxFileSizeGeneral
collectionTypeName = "folder"
}
if fileSize > maxSize {
return fmt.Errorf(
"file size (%s) exceeds maximum allowed size for %s (%s)",
formatBytes(fileSize),
collectionTypeName,
formatBytes(maxSize),
)
}
return nil
}
// validateThumbnailSize checks if the thumbnail size is within limits
func (v *FileValidator) validateThumbnailSize(thumbnailSize int64) error {
if thumbnailSize <= 0 {
return nil // Thumbnail is optional
}
if thumbnailSize > MaxThumbnailSize {
return fmt.Errorf(
"thumbnail size (%s) exceeds maximum allowed size (%s)",
formatBytes(thumbnailSize),
formatBytes(MaxThumbnailSize),
)
}
return nil
}
// validateContentType checks if the content type is allowed for albums
func (v *FileValidator) validateContentType(contentType string) error {
if contentType == "" {
return fmt.Errorf("content type is required for album uploads")
}
// Normalize content type (lowercase and trim)
normalizedType := strings.ToLower(strings.TrimSpace(contentType))
// Check if content type is in allowed list
for _, allowedType := range AlbumAllowedContentTypes {
if normalizedType == allowedType {
return nil
}
}
return fmt.Errorf(
"content type '%s' is not allowed in albums. Only photos and videos are permitted",
contentType,
)
}
// GetAllowedContentTypes returns the list of allowed content types for albums
func (v *FileValidator) GetAllowedContentTypes() []string {
return AlbumAllowedContentTypes
}
// GetMaxFileSize returns the maximum file size for a collection type
func (v *FileValidator) GetMaxFileSize(collectionType string) int64 {
if collectionType == dom_collection.CollectionTypeAlbum {
return MaxFileSizeAlbum
}
return MaxFileSizeGeneral
}
// GetMaxThumbnailSize returns the maximum thumbnail size
func (v *FileValidator) GetMaxThumbnailSize() int64 {
return MaxThumbnailSize
}
// formatBytes formats byte size into human-readable format
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View file

@ -0,0 +1,436 @@
package file
import (
"strings"
"testing"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
)
// TestValidateFileUpload_FolderValidCases tests valid folder uploads
func TestValidateFileUpload_FolderValidCases(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
contentType string
}{
{
name: "valid document upload to folder",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 0,
contentType: "application/pdf",
},
{
name: "valid large file to folder",
fileSize: 500 * 1024 * 1024, // 500MB (max)
thumbnailSize: 0,
contentType: "application/zip",
},
{
name: "valid executable to folder",
fileSize: 50 * 1024 * 1024, // 50MB
thumbnailSize: 0,
contentType: "application/x-executable",
},
{
name: "valid image with thumbnail to folder",
fileSize: 20 * 1024 * 1024, // 20MB
thumbnailSize: 5 * 1024 * 1024, // 5MB
contentType: "image/png",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
tt.fileSize,
tt.thumbnailSize,
tt.contentType,
)
if err != nil {
t.Errorf("Expected valid folder upload, got error: %v", err)
}
})
}
}
// TestValidateFileUpload_AlbumValidCases tests valid album uploads
func TestValidateFileUpload_AlbumValidCases(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
contentType string
}{
{
name: "valid JPEG image to album",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 1 * 1024 * 1024, // 1MB
contentType: "image/jpeg",
},
{
name: "valid PNG image to album",
fileSize: 20 * 1024 * 1024, // 20MB
thumbnailSize: 2 * 1024 * 1024, // 2MB
contentType: "image/png",
},
{
name: "valid MP4 video to album",
fileSize: 100 * 1024 * 1024, // 100MB (max)
thumbnailSize: 5 * 1024 * 1024, // 5MB
contentType: "video/mp4",
},
{
name: "valid HEIC image to album",
fileSize: 15 * 1024 * 1024, // 15MB
thumbnailSize: 0,
contentType: "image/heic",
},
{
name: "valid WebP image to album",
fileSize: 8 * 1024 * 1024, // 8MB
thumbnailSize: 500 * 1024, // 500KB
contentType: "image/webp",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
tt.fileSize,
tt.thumbnailSize,
tt.contentType,
)
if err != nil {
t.Errorf("Expected valid album upload, got error: %v", err)
}
})
}
}
// TestValidateFileUpload_FolderSizeLimits tests folder size limit enforcement
func TestValidateFileUpload_FolderSizeLimits(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
thumbnailSize int64
expectError bool
errorContains string
}{
{
name: "file exceeds folder limit",
fileSize: 501 * 1024 * 1024, // 501MB (over limit)
thumbnailSize: 0,
expectError: true,
errorContains: "exceeds maximum allowed size for folder",
},
{
name: "file at folder limit",
fileSize: 500 * 1024 * 1024, // 500MB (exact limit)
thumbnailSize: 0,
expectError: false,
},
{
name: "thumbnail exceeds limit",
fileSize: 10 * 1024 * 1024, // 10MB
thumbnailSize: 11 * 1024 * 1024, // 11MB (over limit)
expectError: true,
errorContains: "thumbnail size",
},
{
name: "zero file size",
fileSize: 0,
thumbnailSize: 0,
expectError: true,
errorContains: "must be greater than 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
tt.fileSize,
tt.thumbnailSize,
"application/pdf",
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_AlbumSizeLimits tests album size limit enforcement
func TestValidateFileUpload_AlbumSizeLimits(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
fileSize int64
expectError bool
errorContains string
}{
{
name: "file exceeds album limit",
fileSize: 101 * 1024 * 1024, // 101MB (over limit)
expectError: true,
errorContains: "exceeds maximum allowed size for album",
},
{
name: "file at album limit",
fileSize: 100 * 1024 * 1024, // 100MB (exact limit)
expectError: false,
},
{
name: "file below album limit",
fileSize: 50 * 1024 * 1024, // 50MB
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
tt.fileSize,
0,
"image/jpeg",
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_AlbumContentTypeRestrictions tests album content type validation
func TestValidateFileUpload_AlbumContentTypeRestrictions(t *testing.T) {
validator := NewFileValidator()
tests := []struct {
name string
contentType string
expectError bool
errorContains string
}{
{
name: "valid JPEG",
contentType: "image/jpeg",
expectError: false,
},
{
name: "valid PNG",
contentType: "image/png",
expectError: false,
},
{
name: "valid MP4",
contentType: "video/mp4",
expectError: false,
},
{
name: "invalid PDF",
contentType: "application/pdf",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "invalid ZIP",
contentType: "application/zip",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "invalid executable",
contentType: "application/x-executable",
expectError: true,
errorContains: "not allowed in albums",
},
{
name: "empty content type",
contentType: "",
expectError: true,
errorContains: "content type is required",
},
{
name: "case insensitive IMAGE/JPEG",
contentType: "IMAGE/JPEG",
expectError: false,
},
{
name: "content type with extra spaces",
contentType: " image/png ",
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
10*1024*1024, // 10MB
0,
tt.contentType,
)
if tt.expectError {
if err == nil {
t.Error("Expected error but got none")
} else if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("Expected error containing '%s', got: %v", tt.errorContains, err)
}
} else {
if err != nil {
t.Errorf("Expected no error, got: %v", err)
}
}
})
}
}
// TestValidateFileUpload_FolderNoContentTypeRestrictions tests that folders allow any content type
func TestValidateFileUpload_FolderNoContentTypeRestrictions(t *testing.T) {
validator := NewFileValidator()
contentTypes := []string{
"application/pdf",
"application/zip",
"application/x-executable",
"text/plain",
"application/json",
"application/octet-stream",
"image/jpeg", // Photos are also allowed in folders
"video/mp4", // Videos are also allowed in folders
"", // Even empty content type is OK for folders
}
for _, contentType := range contentTypes {
t.Run("folder allows "+contentType, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeFolder,
10*1024*1024, // 10MB
0,
contentType,
)
if err != nil {
t.Errorf("Expected folder to allow content type '%s', got error: %v", contentType, err)
}
})
}
}
// TestGetMaxFileSize tests the GetMaxFileSize helper method
func TestGetMaxFileSize(t *testing.T) {
validator := NewFileValidator()
folderMax := validator.GetMaxFileSize(dom_collection.CollectionTypeFolder)
if folderMax != MaxFileSizeGeneral {
t.Errorf("Expected folder max size %d, got %d", MaxFileSizeGeneral, folderMax)
}
albumMax := validator.GetMaxFileSize(dom_collection.CollectionTypeAlbum)
if albumMax != MaxFileSizeAlbum {
t.Errorf("Expected album max size %d, got %d", MaxFileSizeAlbum, albumMax)
}
}
// TestGetMaxThumbnailSize tests the GetMaxThumbnailSize helper method
func TestGetMaxThumbnailSize(t *testing.T) {
validator := NewFileValidator()
maxThumb := validator.GetMaxThumbnailSize()
if maxThumb != MaxThumbnailSize {
t.Errorf("Expected max thumbnail size %d, got %d", MaxThumbnailSize, maxThumb)
}
}
// TestGetAllowedContentTypes tests the GetAllowedContentTypes helper method
func TestGetAllowedContentTypes(t *testing.T) {
validator := NewFileValidator()
allowedTypes := validator.GetAllowedContentTypes()
if len(allowedTypes) == 0 {
t.Error("Expected non-empty list of allowed content types")
}
// Check that common photo/video types are included
expectedTypes := []string{"image/jpeg", "image/png", "video/mp4"}
for _, expected := range expectedTypes {
found := false
for _, allowed := range allowedTypes {
if allowed == expected {
found = true
break
}
}
if !found {
t.Errorf("Expected allowed type '%s' not found in list", expected)
}
}
}
// TestFormatBytes tests the formatBytes helper function
func TestFormatBytes(t *testing.T) {
tests := []struct {
bytes int64
expected string
}{
{bytes: 0, expected: "0 B"},
{bytes: 1023, expected: "1023 B"},
{bytes: 1024, expected: "1.0 KB"},
{bytes: 1024 * 1024, expected: "1.0 MB"},
{bytes: 500 * 1024 * 1024, expected: "500.0 MB"},
{bytes: 1024 * 1024 * 1024, expected: "1.0 GB"},
}
for _, tt := range tests {
result := formatBytes(tt.bytes)
if result != tt.expected {
t.Errorf("formatBytes(%d) = %s, expected %s", tt.bytes, result, tt.expected)
}
}
}
// TestValidateFileUpload_AllAllowedAlbumContentTypes tests all allowed album content types
func TestValidateFileUpload_AllAllowedAlbumContentTypes(t *testing.T) {
validator := NewFileValidator()
for _, contentType := range AlbumAllowedContentTypes {
t.Run("album allows "+contentType, func(t *testing.T) {
err := validator.ValidateFileUpload(
dom_collection.CollectionTypeAlbum,
10*1024*1024, // 10MB
0,
contentType,
)
if err != nil {
t.Errorf("Expected album to allow content type '%s', got error: %v", contentType, err)
}
})
}
}

View file

@ -0,0 +1,98 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetFileService interface {
Execute(ctx context.Context, fileID gocql.UUID) (*FileResponseDTO, error)
}
type getFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
}
func NewGetFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
) GetFileService {
logger = logger.Named("GetFileService")
return &getFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
}
}
func (svc *getFileServiceImpl) Execute(ctx context.Context, fileID gocql.UUID) (*FileResponseDTO, error) {
//
// STEP 1: Validation
//
if fileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(fileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", fileID))
return nil, err
}
//
// STEP 4: Check if user has access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file access attempt",
zap.Any("user_id", userID),
zap.Any("file_id", fileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to access this file")
}
//
// STEP 5: Map domain model to response DTO
//
response := mapFileToDTO(file)
return response, nil
}

View file

@ -0,0 +1,165 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get_presigned_download_url.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedDownloadURLRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
URLDuration time.Duration `json:"url_duration,omitempty"` // Optional, defaults to 1 hour
}
type GetPresignedDownloadURLResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedDownloadURL string `json:"presigned_download_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
DownloadURLExpirationTime time.Time `json:"download_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetPresignedDownloadURLService interface {
Execute(ctx context.Context, req *GetPresignedDownloadURLRequestDTO) (*GetPresignedDownloadURLResponseDTO, error)
}
type getPresignedDownloadURLServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase
}
func NewGetPresignedDownloadURLService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase,
) GetPresignedDownloadURLService {
logger = logger.Named("GetPresignedDownloadURLService")
return &getPresignedDownloadURLServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
generatePresignedDownloadURLUseCase: generatePresignedDownloadURLUseCase,
}
}
func (svc *getPresignedDownloadURLServiceImpl) Execute(ctx context.Context, req *GetPresignedDownloadURLRequestDTO) (*GetPresignedDownloadURLResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("⚠️ Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("⚠️ Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
// Set default URL duration if not provided
if req.URLDuration == 0 {
req.URLDuration = 1 * time.Hour
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("🔴 Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("🔴 Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has read access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("🔴 Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("⚠️ Unauthorized presigned download URL request",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to download this file")
}
//
// STEP 5: Generate presigned download URLs
//
expirationTime := time.Now().Add(req.URLDuration)
presignedDownloadURL, err := svc.generatePresignedDownloadURLUseCase.Execute(ctx, file.EncryptedFileObjectKey, req.URLDuration)
if err != nil {
svc.logger.Error("🔴 Failed to generate presigned download URL",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, err
}
// Generate thumbnail download URL if thumbnail path exists
var presignedThumbnailURL string
if file.EncryptedThumbnailObjectKey != "" {
presignedThumbnailURL, err = svc.generatePresignedDownloadURLUseCase.Execute(ctx, file.EncryptedThumbnailObjectKey, req.URLDuration)
if err != nil {
svc.logger.Warn("⚠️ Failed to generate thumbnail presigned download URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
}
}
//
// STEP 6: Prepare response
//
response := &GetPresignedDownloadURLResponseDTO{
File: mapFileToDTO(file),
PresignedDownloadURL: presignedDownloadURL,
PresignedThumbnailURL: presignedThumbnailURL,
DownloadURLExpirationTime: expirationTime,
Success: true,
Message: "Presigned download URLs generated successfully",
}
svc.logger.Info("✅ Presigned download URLs generated successfully",
zap.Any("file_id", req.FileID),
zap.Any("user_id", userID),
zap.Time("url_expiration", expirationTime))
return response, nil
}

View file

@ -0,0 +1,165 @@
// monorepo/cloud/backend/internal/maplefile/service/file/get_presigned_upload_url.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type GetPresignedUploadURLRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
URLDuration time.Duration `json:"url_duration,omitempty"` // Optional, defaults to 1 hour
}
type GetPresignedUploadURLResponseDTO struct {
File *FileResponseDTO `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
PresignedThumbnailURL string `json:"presigned_thumbnail_url,omitempty"`
UploadURLExpirationTime time.Time `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
type GetPresignedUploadURLService interface {
Execute(ctx context.Context, req *GetPresignedUploadURLRequestDTO) (*GetPresignedUploadURLResponseDTO, error)
}
type getPresignedUploadURLServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase
}
func NewGetPresignedUploadURLService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
) GetPresignedUploadURLService {
logger = logger.Named("GetPresignedUploadURLService")
return &getPresignedUploadURLServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
generatePresignedUploadURLUseCase: generatePresignedUploadURLUseCase,
}
}
func (svc *getPresignedUploadURLServiceImpl) Execute(ctx context.Context, req *GetPresignedUploadURLRequestDTO) (*GetPresignedUploadURLResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Request details are required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
// Set default URL duration if not provided
if req.URLDuration == 0 {
req.URLDuration = 1 * time.Hour
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized presigned URL request",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to upload to this file")
}
//
// STEP 5: Generate presigned upload URLs
//
expirationTime := time.Now().Add(req.URLDuration)
presignedUploadURL, err := svc.generatePresignedUploadURLUseCase.Execute(ctx, file.EncryptedFileObjectKey, req.URLDuration)
if err != nil {
svc.logger.Error("Failed to generate presigned upload URL",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("storage_path", file.EncryptedFileObjectKey))
return nil, err
}
// Generate thumbnail upload URL if thumbnail path exists
var presignedThumbnailURL string
if file.EncryptedThumbnailObjectKey != "" {
presignedThumbnailURL, err = svc.generatePresignedUploadURLUseCase.Execute(ctx, file.EncryptedThumbnailObjectKey, req.URLDuration)
if err != nil {
svc.logger.Warn("Failed to generate thumbnail presigned upload URL, continuing without it",
zap.Any("error", err),
zap.Any("file_id", req.FileID),
zap.String("thumbnail_storage_path", file.EncryptedThumbnailObjectKey))
}
}
//
// STEP 6: Prepare response
//
response := &GetPresignedUploadURLResponseDTO{
File: mapFileToDTO(file),
PresignedUploadURL: presignedUploadURL,
PresignedThumbnailURL: presignedThumbnailURL,
UploadURLExpirationTime: expirationTime,
Success: true,
Message: "Presigned upload URLs generated successfully",
}
svc.logger.Info("Presigned upload URLs generated successfully",
zap.Any("file_id", req.FileID),
zap.Any("user_id", userID),
zap.Time("url_expiration", expirationTime))
return response, nil
}

View file

@ -0,0 +1,120 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_collection.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByCollectionRequestDTO struct {
CollectionID gocql.UUID `json:"collection_id"`
}
type FilesResponseDTO struct {
Files []*FileResponseDTO `json:"files"`
}
type ListFilesByCollectionService interface {
Execute(ctx context.Context, req *ListFilesByCollectionRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByCollectionServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase
}
func NewListFilesByCollectionService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase,
) ListFilesByCollectionService {
logger = logger.Named("ListFilesByCollectionService")
return &listFilesByCollectionServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getFilesByCollectionUseCase: getFilesByCollectionUseCase,
}
}
func (svc *listFilesByCollectionServiceImpl) Execute(ctx context.Context, req *ListFilesByCollectionRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Collection ID is required")
}
if req.CollectionID.String() == "" {
svc.logger.Warn("Empty collection ID provided")
return nil, httperror.NewForBadRequestWithSingleField("collection_id", "Collection ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if user has access to the collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, req.CollectionID, userID, dom_collection.CollectionPermissionReadOnly)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized collection access attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to access this collection")
}
//
// STEP 4: Get files by collection
//
files, err := svc.getFilesByCollectionUseCase.Execute(req.CollectionID)
if err != nil {
svc.logger.Error("Failed to get files by collection",
zap.Any("error", err),
zap.Any("collection_id", req.CollectionID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by collection",
zap.Int("count", len(files)),
zap.Any("collection_id", req.CollectionID))
return response, nil
}

View file

@ -0,0 +1,104 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_created_by_user_id.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByCreatedByUserIDRequestDTO struct {
CreatedByUserID gocql.UUID `json:"created_by_user_id"`
}
type ListFilesByCreatedByUserIDService interface {
Execute(ctx context.Context, req *ListFilesByCreatedByUserIDRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByCreatedByUserIDServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase
}
func NewListFilesByCreatedByUserIDService(
config *config.Configuration,
logger *zap.Logger,
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase,
) ListFilesByCreatedByUserIDService {
logger = logger.Named("ListFilesByCreatedByUserIDService")
return &listFilesByCreatedByUserIDServiceImpl{
config: config,
logger: logger,
getFilesByCreatedByUserIDUseCase: getFilesByCreatedByUserIDUseCase,
}
}
func (svc *listFilesByCreatedByUserIDServiceImpl) Execute(ctx context.Context, req *ListFilesByCreatedByUserIDRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Created by user ID is required")
}
if req.CreatedByUserID.String() == "" {
svc.logger.Warn("Empty created by user ID provided")
return nil, httperror.NewForBadRequestWithSingleField("created_by_user_id", "Created by user ID is required")
}
//
// STEP 2: Get user ID from context (for authorization)
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if the requesting user can access files created by the specified user
// Only allow users to see their own created files for privacy
//
if userID != req.CreatedByUserID {
svc.logger.Warn("Unauthorized attempt to list files created by another user",
zap.Any("requesting_user_id", userID),
zap.Any("created_by_user_id", req.CreatedByUserID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You can only view files you have created")
}
//
// STEP 4: Get files by created_by_user_id
//
files, err := svc.getFilesByCreatedByUserIDUseCase.Execute(req.CreatedByUserID)
if err != nil {
svc.logger.Error("Failed to get files by created_by_user_id",
zap.Any("error", err),
zap.Any("created_by_user_id", req.CreatedByUserID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by created_by_user_id",
zap.Int("count", len(files)),
zap.Any("created_by_user_id", req.CreatedByUserID))
return response, nil
}

View file

@ -0,0 +1,104 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_by_owner_id.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFilesByOwnerIDRequestDTO struct {
OwnerID gocql.UUID `json:"owner_id"`
}
type ListFilesByOwnerIDService interface {
Execute(ctx context.Context, req *ListFilesByOwnerIDRequestDTO) (*FilesResponseDTO, error)
}
type listFilesByOwnerIDServiceImpl struct {
config *config.Configuration
logger *zap.Logger
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase
}
func NewListFilesByOwnerIDService(
config *config.Configuration,
logger *zap.Logger,
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase,
) ListFilesByOwnerIDService {
logger = logger.Named("ListFilesByOwnerIDService")
return &listFilesByOwnerIDServiceImpl{
config: config,
logger: logger,
getFilesByOwnerIDUseCase: getFilesByOwnerIDUseCase,
}
}
func (svc *listFilesByOwnerIDServiceImpl) Execute(ctx context.Context, req *ListFilesByOwnerIDRequestDTO) (*FilesResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "Owner ID is required")
}
if req.OwnerID.String() == "" {
svc.logger.Warn("Empty owner ID provided")
return nil, httperror.NewForBadRequestWithSingleField("owner_id", "Owner ID is required")
}
//
// STEP 2: Get user ID from context (for authorization)
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Check if the requesting user can access files created by the specified user
// Only allow users to see their own created files for privacy
//
if userID != req.OwnerID {
svc.logger.Warn("Unauthorized attempt to list files created by another user",
zap.Any("requesting_user_id", userID),
zap.Any("owner_id", req.OwnerID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You can only view files you have created")
}
//
// STEP 4: Get files by owner_id
//
files, err := svc.getFilesByOwnerIDUseCase.Execute(req.OwnerID)
if err != nil {
svc.logger.Error("Failed to get files by owner_id",
zap.Any("error", err),
zap.Any("owner_id", req.OwnerID))
return nil, err
}
//
// STEP 5: Map domain models to response DTOs
//
response := &FilesResponseDTO{
Files: make([]*FileResponseDTO, len(files)),
}
for i, file := range files {
response.Files[i] = mapFileToDTO(file)
}
svc.logger.Debug("Found files by owner_id",
zap.Int("count", len(files)),
zap.Any("owner_id", req.OwnerID))
return response, nil
}

View file

@ -0,0 +1,225 @@
// cloud/maplefile-backend/internal/maplefile/service/file/list_recent_files.go
package file
import (
"context"
"encoding/base64"
"encoding/json"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
// RecentFileResponseDTO represents a recent file in the response
type RecentFileResponseDTO struct {
ID gocql.UUID `json:"id"`
CollectionID gocql.UUID `json:"collection_id"`
OwnerID gocql.UUID `json:"owner_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key"`
EncryptionVersion string `json:"encryption_version"`
EncryptedHash string `json:"encrypted_hash"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedThumbnailSizeInBytes int64 `json:"encrypted_thumbnail_size_in_bytes"`
Tags []dom_tag.EmbeddedTag `json:"tags"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
}
// ListRecentFilesResponseDTO represents the response for listing recent files
type ListRecentFilesResponseDTO struct {
Files []RecentFileResponseDTO `json:"files"`
NextCursor *string `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more"`
TotalCount int `json:"total_count"`
}
type ListRecentFilesService interface {
Execute(ctx context.Context, cursor *string, limit int64) (*ListRecentFilesResponseDTO, error)
}
type listRecentFilesServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase
}
func NewListRecentFilesService(
config *config.Configuration,
logger *zap.Logger,
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase,
) ListRecentFilesService {
logger = logger.Named("ListRecentFilesService")
return &listRecentFilesServiceImpl{
config: config,
logger: logger,
listRecentFilesUseCase: listRecentFilesUseCase,
}
}
func (svc *listRecentFilesServiceImpl) Execute(ctx context.Context, cursor *string, limit int64) (*ListRecentFilesResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Parse cursor if provided
//
var parsedCursor *dom_file.RecentFilesCursor
if cursor != nil && *cursor != "" {
// Decode base64 cursor
cursorBytes, err := base64.StdEncoding.DecodeString(*cursor)
if err != nil {
svc.logger.Error("Failed to decode cursor",
zap.String("cursor", *cursor),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format")
}
// Parse JSON cursor
var cursorData dom_file.RecentFilesCursor
if err := json.Unmarshal(cursorBytes, &cursorData); err != nil {
svc.logger.Error("Failed to parse cursor",
zap.String("cursor", *cursor),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("cursor", "Invalid cursor format")
}
parsedCursor = &cursorData
}
//
// STEP 3: Set default limit if not provided
//
if limit <= 0 {
limit = 30 // Default limit
}
if limit > 100 {
limit = 100 // Max limit
}
svc.logger.Debug("Processing recent files request",
zap.Any("user_id", userID),
zap.Int64("limit", limit),
zap.Any("cursor", parsedCursor))
//
// STEP 4: Call use case to get recent files
//
response, err := svc.listRecentFilesUseCase.Execute(ctx, userID, parsedCursor, limit)
if err != nil {
svc.logger.Error("Failed to get recent files",
zap.Any("user_id", userID),
zap.Error(err))
return nil, err
}
//
// STEP 5: Convert domain response to service DTO
//
files := make([]RecentFileResponseDTO, len(response.Files))
for i, file := range response.Files {
// Deserialize encrypted file key
var encryptedFileKey crypto.EncryptedFileKey
if file.EncryptedFileKey == "" {
svc.logger.Warn("Encrypted file key is empty in database for file",
zap.String("file_id", file.ID.String()))
// Continue with empty key rather than failing entirely
} else if err := json.Unmarshal([]byte(file.EncryptedFileKey), &encryptedFileKey); err != nil {
svc.logger.Warn("Failed to deserialize encrypted file key for file",
zap.String("file_id", file.ID.String()),
zap.Int("encrypted_key_length", len(file.EncryptedFileKey)),
zap.String("encrypted_key_preview", truncateString(file.EncryptedFileKey, 100)),
zap.Error(err))
// Continue with empty key rather than failing entirely
} else if len(encryptedFileKey.Ciphertext) == 0 || len(encryptedFileKey.Nonce) == 0 {
// Deserialization succeeded but resulted in empty ciphertext/nonce
// This can happen if the base64 decoding in custom UnmarshalJSON fails silently
svc.logger.Warn("Encrypted file key deserialized but has empty ciphertext or nonce",
zap.String("file_id", file.ID.String()),
zap.Int("ciphertext_len", len(encryptedFileKey.Ciphertext)),
zap.Int("nonce_len", len(encryptedFileKey.Nonce)),
zap.String("encrypted_key_preview", truncateString(file.EncryptedFileKey, 200)))
} else {
// Successfully deserialized - log for debugging
svc.logger.Debug("Successfully deserialized encrypted file key",
zap.String("file_id", file.ID.String()),
zap.Int("ciphertext_len", len(encryptedFileKey.Ciphertext)),
zap.Int("nonce_len", len(encryptedFileKey.Nonce)),
zap.Int("key_version", encryptedFileKey.KeyVersion))
}
files[i] = RecentFileResponseDTO{
ID: file.ID,
CollectionID: file.CollectionID,
OwnerID: file.OwnerID,
EncryptedMetadata: file.EncryptedMetadata,
EncryptedFileKey: encryptedFileKey,
EncryptionVersion: file.EncryptionVersion,
EncryptedHash: file.EncryptedHash,
EncryptedFileSizeInBytes: file.EncryptedFileSizeInBytes,
EncryptedThumbnailSizeInBytes: file.EncryptedThumbnailSizeInBytes,
Tags: file.Tags,
CreatedAt: file.CreatedAt.Format("2006-01-02T15:04:05Z07:00"),
ModifiedAt: file.ModifiedAt.Format("2006-01-02T15:04:05Z07:00"),
Version: file.Version,
State: file.State,
}
}
//
// STEP 6: Encode next cursor if present
//
var encodedNextCursor *string
if response.NextCursor != nil {
cursorBytes, err := json.Marshal(response.NextCursor)
if err != nil {
svc.logger.Error("Failed to marshal next cursor",
zap.Any("cursor", response.NextCursor),
zap.Error(err))
} else {
cursorStr := base64.StdEncoding.EncodeToString(cursorBytes)
encodedNextCursor = &cursorStr
}
}
//
// STEP 7: Prepare response
//
serviceResponse := &ListRecentFilesResponseDTO{
Files: files,
NextCursor: encodedNextCursor,
HasMore: response.HasMore,
TotalCount: len(files),
}
svc.logger.Info("Successfully served recent files",
zap.Any("user_id", userID),
zap.Int("files_count", len(files)),
zap.Bool("has_more", response.HasMore),
zap.Any("next_cursor", encodedNextCursor))
return serviceResponse, nil
}
// truncateString truncates a string to maxLen characters, appending "..." if truncated
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View file

@ -0,0 +1,143 @@
// monorepo/cloud/backend/internal/maplefile/service/file/list_sync_data.go
package file
import (
"context"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type ListFileSyncDataService interface {
Execute(ctx context.Context, cursor *dom_file.FileSyncCursor, limit int64) (*dom_file.FileSyncResponse, error)
}
type listFileSyncDataServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listFileSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase
collectionRepository dom_collection.CollectionRepository
}
func NewListFileSyncDataService(
config *config.Configuration,
logger *zap.Logger,
listFileSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase,
collectionRepository dom_collection.CollectionRepository,
) ListFileSyncDataService {
logger = logger.Named("ListFileSyncDataService")
return &listFileSyncDataServiceImpl{
config: config,
logger: logger,
listFileSyncDataUseCase: listFileSyncDataUseCase,
collectionRepository: collectionRepository,
}
}
func (svc *listFileSyncDataServiceImpl) Execute(ctx context.Context, cursor *dom_file.FileSyncCursor, limit int64) (*dom_file.FileSyncResponse, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Get accessible collections for the user
//
svc.logger.Debug("Getting accessible collections for file sync",
zap.String("user_id", userID.String()))
// Get collections where user is owner
ownedCollections, err := svc.collectionRepository.GetAllByUserID(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get owned collections",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to get accessible collections")
}
// Get collections shared with user
sharedCollections, err := svc.collectionRepository.GetCollectionsSharedWithUser(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get shared collections",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Failed to get accessible collections")
}
// Combine owned and shared collections
var accessibleCollectionIDs []gocql.UUID
for _, coll := range ownedCollections {
if coll.State == "active" { // Only include active collections
accessibleCollectionIDs = append(accessibleCollectionIDs, coll.ID)
}
}
for _, coll := range sharedCollections {
if coll.State == "active" { // Only include active collections
accessibleCollectionIDs = append(accessibleCollectionIDs, coll.ID)
}
}
svc.logger.Debug("Found accessible collections for file sync",
zap.String("user_id", userID.String()),
zap.Int("owned_count", len(ownedCollections)),
zap.Int("shared_count", len(sharedCollections)),
zap.Int("total_accessible", len(accessibleCollectionIDs)))
// If no accessible collections, return empty response
if len(accessibleCollectionIDs) == 0 {
svc.logger.Info("User has no accessible collections for file sync",
zap.String("user_id", userID.String()))
return &dom_file.FileSyncResponse{
Files: []dom_file.FileSyncItem{},
NextCursor: nil,
HasMore: false,
}, nil
}
//
// STEP 3: List file sync data for accessible collections
//
syncData, err := svc.listFileSyncDataUseCase.Execute(ctx, userID, cursor, limit, accessibleCollectionIDs)
if err != nil {
svc.logger.Error("Failed to list file sync data",
zap.Any("error", err),
zap.String("user_id", userID.String()))
return nil, err
}
if syncData == nil {
svc.logger.Debug("File sync data not found",
zap.String("user_id", userID.String()))
return nil, httperror.NewForNotFoundWithSingleField("message", "File sync results not found")
}
// Log sync data with all fields including EncryptedFileSizeInBytes
svc.logger.Debug("File sync data successfully retrieved",
zap.String("user_id", userID.String()),
zap.Any("next_cursor", syncData.NextCursor),
zap.Int("files_count", len(syncData.Files)))
// Verify each item has all fields populated including EncryptedFileSizeInBytes
for i, item := range syncData.Files {
svc.logger.Debug("Returning file sync item",
zap.Int("index", i),
zap.String("file_id", item.ID.String()),
zap.String("collection_id", item.CollectionID.String()),
zap.Uint64("version", item.Version),
zap.String("state", item.State),
zap.Int64("encrypted_file_size_in_bytes", item.EncryptedFileSizeInBytes))
}
return syncData, nil
}

View file

@ -0,0 +1,178 @@
package file
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_tag "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
)
// Wire providers for file services
func ProvideGetFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
) GetFileService {
return NewGetFileService(cfg, logger, collectionRepo, getMetadataUseCase)
}
func ProvideUpdateFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) UpdateFileService {
return NewUpdateFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}
func ProvideSoftDeleteFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase,
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
listFilesByOwnerIDService ListFilesByOwnerIDService,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteFileService {
return NewSoftDeleteFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateFileMetadataUseCase, softDeleteMetadataUseCase, hardDeleteMetadataUseCase, deleteDataUseCase, listFilesByOwnerIDService, storageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideDeleteMultipleFilesService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataByIDsUseCase uc_filemetadata.GetFileMetadataByIDsUseCase,
deleteMetadataManyUseCase uc_filemetadata.DeleteManyFileMetadataUseCase,
deleteMultipleDataUseCase uc_fileobjectstorage.DeleteMultipleEncryptedDataUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) DeleteMultipleFilesService {
return NewDeleteMultipleFilesService(cfg, logger, collectionRepo, getMetadataByIDsUseCase, deleteMetadataManyUseCase, deleteMultipleDataUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideListFilesByCollectionService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getFilesByCollectionUseCase uc_filemetadata.GetFileMetadataByCollectionUseCase,
) ListFilesByCollectionService {
return NewListFilesByCollectionService(cfg, logger, collectionRepo, getFilesByCollectionUseCase)
}
func ProvideListFilesByCreatedByUserIDService(
cfg *config.Configuration,
logger *zap.Logger,
getFilesByCreatedByUserIDUseCase uc_filemetadata.GetFileMetadataByCreatedByUserIDUseCase,
) ListFilesByCreatedByUserIDService {
return NewListFilesByCreatedByUserIDService(cfg, logger, getFilesByCreatedByUserIDUseCase)
}
func ProvideListFilesByOwnerIDService(
cfg *config.Configuration,
logger *zap.Logger,
getFilesByOwnerIDUseCase uc_filemetadata.GetFileMetadataByOwnerIDUseCase,
) ListFilesByOwnerIDService {
return NewListFilesByOwnerIDService(cfg, logger, getFilesByOwnerIDUseCase)
}
func ProvideListRecentFilesService(
cfg *config.Configuration,
logger *zap.Logger,
listRecentFilesUseCase uc_filemetadata.ListRecentFilesUseCase,
) ListRecentFilesService {
return NewListRecentFilesService(cfg, logger, listRecentFilesUseCase)
}
func ProvideListFileSyncDataService(
cfg *config.Configuration,
logger *zap.Logger,
listSyncDataUseCase uc_filemetadata.ListFileMetadataSyncDataUseCase,
collectionRepo dom_collection.CollectionRepository,
) ListFileSyncDataService {
return NewListFileSyncDataService(cfg, logger, listSyncDataUseCase, collectionRepo)
}
func ProvideCreatePendingFileService(
cfg *config.Configuration,
logger *zap.Logger,
getCollectionUseCase uc_collection.GetCollectionUseCase,
checkCollectionAccessUseCase uc_collection.CheckCollectionAccessUseCase,
checkFileExistsUseCase uc_filemetadata.CheckFileExistsUseCase,
createMetadataUseCase uc_filemetadata.CreateFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
tagRepo dom_tag.Repository,
) CreatePendingFileService {
return NewCreatePendingFileService(cfg, logger, getCollectionUseCase, checkCollectionAccessUseCase, checkFileExistsUseCase, createMetadataUseCase, generatePresignedUploadURLUseCase, storageQuotaHelperUseCase, tagRepo)
}
func ProvideCompleteFileUploadService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
verifyObjectExistsUseCase uc_fileobjectstorage.VerifyObjectExistsUseCase,
getObjectSizeUseCase uc_fileobjectstorage.GetObjectSizeUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) CompleteFileUploadService {
return NewCompleteFileUploadService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase, verifyObjectExistsUseCase, getObjectSizeUseCase, deleteDataUseCase, storageQuotaHelperUseCase, createStorageUsageEventUseCase, updateStorageUsageUseCase)
}
func ProvideGetPresignedUploadURLService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedUploadURLUseCase uc_fileobjectstorage.GeneratePresignedUploadURLUseCase,
) GetPresignedUploadURLService {
return NewGetPresignedUploadURLService(cfg, logger, collectionRepo, getMetadataUseCase, generatePresignedUploadURLUseCase)
}
func ProvideGetPresignedDownloadURLService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
generatePresignedDownloadURLUseCase uc_fileobjectstorage.GeneratePresignedDownloadURLUseCase,
) GetPresignedDownloadURLService {
return NewGetPresignedDownloadURLService(cfg, logger, collectionRepo, getMetadataUseCase, generatePresignedDownloadURLUseCase)
}
func ProvideArchiveFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) ArchiveFileService {
return NewArchiveFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}
func ProvideRestoreFileService(
cfg *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) RestoreFileService {
return NewRestoreFileService(cfg, logger, collectionRepo, getMetadataUseCase, updateMetadataUseCase)
}

View file

@ -0,0 +1,148 @@
// monorepo/cloud/backend/internal/maplefile/service/file/restore.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type RestoreFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
}
type RestoreFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type RestoreFileService interface {
Execute(ctx context.Context, req *RestoreFileRequestDTO) (*RestoreFileResponseDTO, error)
}
type restoreFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewRestoreFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) RestoreFileService {
logger = logger.Named("RestoreFileService")
return &restoreFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *restoreFileServiceImpl) Execute(ctx context.Context, req *RestoreFileRequestDTO) (*RestoreFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata (including any state for restoration)
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file restore attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to restore this file")
}
//
// STEP 5: Validate state transition
//
err = dom_file.IsValidStateTransition(file.State, dom_file.FileStateActive)
if err != nil {
svc.logger.Warn("Invalid state transition for file restore",
zap.Any("file_id", req.FileID),
zap.String("current_state", file.State),
zap.String("target_state", dom_file.FileStateActive),
zap.Error(err))
return nil, httperror.NewForBadRequestWithSingleField("state", err.Error())
}
//
// STEP 6: Restore the file
//
file.State = dom_file.FileStateActive
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to restore file",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
return nil, err
}
svc.logger.Info("File restored successfully",
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return &RestoreFileResponseDTO{
Success: true,
Message: "File restored successfully",
}, nil
}

View file

@ -0,0 +1,429 @@
// monorepo/cloud/backend/internal/maplefile/service/file/softdelete.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_fileobjectstorage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/fileobjectstorage"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_storageusageevent "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storageusageevent"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/transaction"
)
type SoftDeleteFileRequestDTO struct {
FileID gocql.UUID `json:"file_id"`
ForceHardDelete bool `json:"force_hard_delete"` // Skip tombstone for GDPR right-to-be-forgotten
}
type SoftDeleteFileResponseDTO struct {
Success bool `json:"success"`
Message string `json:"message"`
ReleasedBytes int64 `json:"released_bytes"` // Amount of storage quota released
}
type SoftDeleteFileService interface {
Execute(ctx context.Context, req *SoftDeleteFileRequestDTO) (*SoftDeleteFileResponseDTO, error)
}
type softDeleteFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase
listFilesByOwnerIDService ListFilesByOwnerIDService
// Storage quota management
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase
// Add storage usage tracking use cases
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase
}
func NewSoftDeleteFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateFileMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
softDeleteMetadataUseCase uc_filemetadata.SoftDeleteFileMetadataUseCase,
hardDeleteMetadataUseCase uc_filemetadata.HardDeleteFileMetadataUseCase,
deleteDataUseCase uc_fileobjectstorage.DeleteEncryptedDataUseCase,
listFilesByOwnerIDService ListFilesByOwnerIDService,
storageQuotaHelperUseCase uc_user.UserStorageQuotaHelperUseCase,
createStorageUsageEventUseCase uc_storageusageevent.CreateStorageUsageEventUseCase,
updateStorageUsageUseCase uc_storagedailyusage.UpdateStorageUsageUseCase,
) SoftDeleteFileService {
logger = logger.Named("SoftDeleteFileService")
return &softDeleteFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateFileMetadataUseCase: updateFileMetadataUseCase,
softDeleteMetadataUseCase: softDeleteMetadataUseCase,
hardDeleteMetadataUseCase: hardDeleteMetadataUseCase,
deleteDataUseCase: deleteDataUseCase,
listFilesByOwnerIDService: listFilesByOwnerIDService,
storageQuotaHelperUseCase: storageQuotaHelperUseCase,
createStorageUsageEventUseCase: createStorageUsageEventUseCase,
updateStorageUsageUseCase: updateStorageUsageUseCase,
}
}
func (svc *softDeleteFileServiceImpl) Execute(ctx context.Context, req *SoftDeleteFileRequestDTO) (*SoftDeleteFileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File ID is required")
}
if req.FileID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("file_id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.FileID))
svc.logger.Debug("Debugging started, will list all files that belong to the authenticated user")
currentFiles, err := svc.listFilesByOwnerIDService.Execute(ctx, &ListFilesByOwnerIDRequestDTO{OwnerID: userID})
if err != nil {
svc.logger.Error("Failed to list files by owner ID",
zap.Any("error", err),
zap.Any("user_id", userID))
return nil, err
}
for _, file := range currentFiles.Files {
svc.logger.Debug("File",
zap.Any("id", file.ID))
}
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file deletion attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.FileID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to delete this file")
}
// Check valid transitions.
if err := dom_file.IsValidStateTransition(file.State, dom_file.FileStateDeleted); err != nil {
svc.logger.Warn("Invalid file state transition",
zap.Any("user_id", userID),
zap.Error(err))
return nil, err
}
//
// SAGA: Initialize distributed transaction manager
//
saga := transaction.NewSaga("soft-delete-file", svc.logger)
//
// STEP 5: Calculate storage space to be released
//
totalFileSize := file.EncryptedFileSizeInBytes + file.EncryptedThumbnailSizeInBytes
svc.logger.Info("Starting file soft-delete with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.Int64("file_size", file.EncryptedFileSizeInBytes),
zap.Int64("thumbnail_size", file.EncryptedThumbnailSizeInBytes),
zap.Int64("total_size_to_release", totalFileSize))
//
// STEP 6: Update file metadata with tombstone (SAGA protected)
//
originalState := file.State
originalTombstoneVersion := file.TombstoneVersion
originalTombstoneExpiry := file.TombstoneExpiry
file.State = dom_file.FileStateDeleted
file.Version++
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
file.TombstoneVersion = file.Version
file.TombstoneExpiry = time.Now().Add(30 * 24 * time.Hour)
if err := svc.updateFileMetadataUseCase.Execute(ctx, file); err != nil {
svc.logger.Error("Failed to update file metadata with tombstone",
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: restore original metadata
fileIDCaptured := file.ID
originalStateCaptured := originalState
originalTombstoneVersionCaptured := originalTombstoneVersion
originalTombstoneExpiryCaptured := originalTombstoneExpiry
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata",
zap.String("file_id", fileIDCaptured.String()))
restoredFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
restoredFile.State = originalStateCaptured
restoredFile.TombstoneVersion = originalTombstoneVersionCaptured
restoredFile.TombstoneExpiry = originalTombstoneExpiryCaptured
restoredFile.ModifiedAt = time.Now()
return svc.updateFileMetadataUseCase.Execute(ctx, restoredFile)
})
//
// STEP 7: Delete file metadata record (SAGA protected)
//
if req.ForceHardDelete {
// Hard delete - permanent removal for GDPR right-to-be-forgotten
svc.logger.Info("Performing hard delete (GDPR mode) - no tombstone",
zap.String("file_id", req.FileID.String()))
err = svc.hardDeleteMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to hard-delete file metadata",
zap.Error(err))
saga.Rollback(ctx) // Restores tombstone metadata
return nil, err
}
// No compensation for hard delete - GDPR compliance requires permanent deletion
} else {
// Soft delete - 30-day tombstone (standard deletion)
err = svc.softDeleteMetadataUseCase.Execute(req.FileID)
if err != nil {
svc.logger.Error("Failed to soft-delete file metadata",
zap.Error(err))
saga.Rollback(ctx) // Restores tombstone metadata
return nil, err
}
// Register compensation: restore metadata record to active state
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file metadata record to active state",
zap.String("file_id", fileIDCaptured.String()))
// Get the soft-deleted file
deletedFile, err := svc.getMetadataUseCase.Execute(fileIDCaptured)
if err != nil {
return err
}
// Restore to active state
deletedFile.State = dom_file.FileStateActive
deletedFile.ModifiedAt = time.Now()
deletedFile.Version++
deletedFile.TombstoneVersion = 0
deletedFile.TombstoneExpiry = time.Time{}
return svc.updateFileMetadataUseCase.Execute(ctx, deletedFile)
})
}
//
// STEP 8: Update collection file count (SAGA protected)
//
if originalState == dom_file.FileStateActive {
err = svc.collectionRepo.DecrementFileCount(ctx, file.CollectionID)
if err != nil {
svc.logger.Error("Failed to decrement file count for collection",
zap.String("collection_id", file.CollectionID.String()),
zap.Error(err))
saga.Rollback(ctx)
return nil, err
}
// Register compensation: increment the count back
collectionIDCaptured := file.CollectionID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: restoring file count",
zap.String("collection_id", collectionIDCaptured.String()))
return svc.collectionRepo.IncrementFileCount(ctx, collectionIDCaptured)
})
}
//
// STEP 9: Release storage quota (SAGA protected)
//
var releasedBytes int64 = 0
if originalState == dom_file.FileStateActive && totalFileSize > 0 {
err = svc.storageQuotaHelperUseCase.OnFileDeleted(ctx, userID, totalFileSize)
if err != nil {
svc.logger.Error("Failed to release storage quota after file deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores metadata + tombstone
return nil, err
}
// Register compensation: re-reserve the released quota
totalFileSizeCaptured := totalFileSize
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving released storage quota",
zap.Int64("size", totalFileSizeCaptured))
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, totalFileSizeCaptured)
})
releasedBytes = totalFileSize
svc.logger.Info("Storage quota released successfully",
zap.Int64("released_bytes", releasedBytes))
//
// STEP 10: Create storage usage event (SAGA protected)
//
err = svc.createStorageUsageEventUseCase.Execute(ctx, file.OwnerID, totalFileSize, "remove")
if err != nil {
svc.logger.Error("Failed to create storage usage event for deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores quota + metadata
return nil, err
}
// Register compensation: create compensating "add" event
ownerIDCaptured := file.OwnerID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: creating compensating usage event")
return svc.createStorageUsageEventUseCase.Execute(ctx, ownerIDCaptured, totalFileSizeCaptured, "add")
})
//
// STEP 11: Update daily storage usage (SAGA protected)
//
today := time.Now().Truncate(24 * time.Hour)
updateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: file.OwnerID,
UsageDay: &today,
TotalBytes: -totalFileSize,
AddBytes: 0,
RemoveBytes: totalFileSize,
IsIncrement: true,
}
err = svc.updateStorageUsageUseCase.Execute(ctx, updateReq)
if err != nil {
svc.logger.Error("Failed to update daily storage usage for deletion",
zap.Error(err))
saga.Rollback(ctx) // Restores everything
return nil, err
}
// Register compensation: reverse the usage update
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: reversing daily usage update")
compensateReq := &uc_storagedailyusage.UpdateStorageUsageRequest{
UserID: ownerIDCaptured,
UsageDay: &today,
TotalBytes: totalFileSizeCaptured, // Positive to reverse
AddBytes: totalFileSizeCaptured,
RemoveBytes: 0,
IsIncrement: true,
}
return svc.updateStorageUsageUseCase.Execute(ctx, compensateReq)
})
} else if originalState == dom_file.FileStatePending {
// For pending files, release the reserved quota (SAGA protected)
err = svc.storageQuotaHelperUseCase.ReleaseQuota(ctx, userID, totalFileSize)
if err != nil {
svc.logger.Error("Failed to release reserved storage quota for pending file",
zap.Error(err))
saga.Rollback(ctx) // Restores metadata + tombstone
return nil, err
}
// Register compensation: re-reserve the released quota
totalFileSizeCaptured := totalFileSize
userIDCaptured := userID
saga.AddCompensation(func(ctx context.Context) error {
svc.logger.Warn("SAGA compensation: re-reserving pending file quota")
return svc.storageQuotaHelperUseCase.CheckAndReserveQuota(ctx, userIDCaptured, totalFileSizeCaptured)
})
releasedBytes = totalFileSize
svc.logger.Info("Reserved storage quota released for pending file",
zap.Int64("released_bytes", releasedBytes))
}
//
// STEP 12: Delete S3 objects
//
var storagePaths []string
storagePaths = append(storagePaths, file.EncryptedFileObjectKey)
if file.EncryptedThumbnailObjectKey != "" {
storagePaths = append(storagePaths, file.EncryptedThumbnailObjectKey)
}
svc.logger.Info("Deleting S3 objects for file",
zap.String("file_id", req.FileID.String()),
zap.Int("s3_objects_count", len(storagePaths)))
for _, storagePath := range storagePaths {
if err := svc.deleteDataUseCase.Execute(storagePath); err != nil {
// Log but don't fail - S3 deletion is best effort after metadata is deleted
svc.logger.Error("Failed to delete S3 object (continuing anyway)",
zap.String("storage_path", storagePath),
zap.Error(err))
}
}
//
// SUCCESS: All operations completed with SAGA protection
//
svc.logger.Info("File deleted successfully with SAGA protection",
zap.String("file_id", req.FileID.String()),
zap.String("collection_id", file.CollectionID.String()),
zap.Int64("released_bytes", releasedBytes),
zap.Int("s3_objects_deleted", len(storagePaths)))
return &SoftDeleteFileResponseDTO{
Success: true,
Message: "File deleted successfully",
ReleasedBytes: releasedBytes,
}, nil
}

View file

@ -0,0 +1,178 @@
// monorepo/cloud/backend/internal/maplefile/service/file/update.go
package file
import (
"context"
"time"
"go.uber.org/zap"
"github.com/gocql/gocql"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
)
type UpdateFileRequestDTO struct {
ID gocql.UUID `json:"id"`
EncryptedMetadata string `json:"encrypted_metadata,omitempty"`
EncryptedFileKey crypto.EncryptedFileKey `json:"encrypted_file_key,omitempty"`
EncryptionVersion string `json:"encryption_version,omitempty"`
EncryptedHash string `json:"encrypted_hash,omitempty"`
Version uint64 `json:"version,omitempty"`
}
type UpdateFileService interface {
Execute(ctx context.Context, req *UpdateFileRequestDTO) (*FileResponseDTO, error)
}
type updateFileServiceImpl struct {
config *config.Configuration
logger *zap.Logger
collectionRepo dom_collection.CollectionRepository
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase
}
func NewUpdateFileService(
config *config.Configuration,
logger *zap.Logger,
collectionRepo dom_collection.CollectionRepository,
getMetadataUseCase uc_filemetadata.GetFileMetadataUseCase,
updateMetadataUseCase uc_filemetadata.UpdateFileMetadataUseCase,
) UpdateFileService {
logger = logger.Named("UpdateFileService")
return &updateFileServiceImpl{
config: config,
logger: logger,
collectionRepo: collectionRepo,
getMetadataUseCase: getMetadataUseCase,
updateMetadataUseCase: updateMetadataUseCase,
}
}
func (svc *updateFileServiceImpl) Execute(ctx context.Context, req *UpdateFileRequestDTO) (*FileResponseDTO, error) {
//
// STEP 1: Validation
//
if req == nil {
svc.logger.Warn("Failed validation with nil request")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "File update details are required")
}
if req.ID.String() == "" {
svc.logger.Warn("Empty file ID provided")
return nil, httperror.NewForBadRequestWithSingleField("id", "File ID is required")
}
//
// STEP 2: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 3: Get existing file metadata
//
file, err := svc.getMetadataUseCase.Execute(req.ID)
if err != nil {
svc.logger.Error("Failed to get file metadata",
zap.Any("error", err),
zap.Any("file_id", req.ID))
return nil, err
}
//
// STEP 4: Check if user has write access to the file's collection
//
hasAccess, err := svc.collectionRepo.CheckAccess(ctx, file.CollectionID, userID, dom_collection.CollectionPermissionReadWrite)
if err != nil {
svc.logger.Error("Failed to check collection access",
zap.Any("error", err),
zap.Any("collection_id", file.CollectionID),
zap.Any("user_id", userID))
return nil, err
}
if !hasAccess {
svc.logger.Warn("Unauthorized file update attempt",
zap.Any("user_id", userID),
zap.Any("file_id", req.ID),
zap.Any("collection_id", file.CollectionID))
return nil, httperror.NewForForbiddenWithSingleField("message", "You don't have permission to update this file")
}
//
// STEP 5: Check if submitted collection request is in-sync with our backend's collection copy.
//
// Developers note:
// What is the purpose of this check?
// Our server has multiple clients sharing data and hence our backend needs to ensure that the file being updated is the most recent version.
if file.Version != req.Version {
svc.logger.Warn("Outdated collection update attempt",
zap.Any("user_id", userID),
zap.Any("collection_id", req.ID),
zap.Any("submitted_version", req.Version),
zap.Any("current_version", file.Version))
return nil, httperror.NewForBadRequestWithSingleField("message", "Collection has been updated since you last fetched it")
}
//
// STEP 6: Update file metadata
//
updated := false
if req.EncryptedMetadata != "" {
file.EncryptedMetadata = req.EncryptedMetadata
updated = true
}
if req.EncryptedFileKey.Ciphertext != nil && len(req.EncryptedFileKey.Ciphertext) > 0 {
file.EncryptedFileKey = req.EncryptedFileKey
updated = true
}
if req.EncryptionVersion != "" {
file.EncryptionVersion = req.EncryptionVersion
updated = true
}
if req.EncryptedHash != "" {
file.EncryptedHash = req.EncryptedHash
updated = true
}
if !updated {
svc.logger.Warn("No fields to update provided")
return nil, httperror.NewForBadRequestWithSingleField("non_field_error", "At least one field must be provided for update")
}
file.Version++ // Mutation means we increment version.
file.ModifiedAt = time.Now()
file.ModifiedByUserID = userID
//
// STEP 6: Save updated file
//
err = svc.updateMetadataUseCase.Execute(ctx, file)
if err != nil {
svc.logger.Error("Failed to update file metadata",
zap.Any("error", err),
zap.Any("file_id", file.ID))
return nil, err
}
//
// STEP 7: Map domain model to response DTO
//
response := mapFileToDTO(file)
svc.logger.Debug("File updated successfully",
zap.Any("file_id", file.ID))
return response, nil
}

View file

@ -0,0 +1,28 @@
// monorepo/cloud/backend/internal/maplefile/service/file/utils.go
package file
import (
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
)
// Helper function to map a File domain model to a FileResponseDTO
func mapFileToDTO(file *dom_file.File) *FileResponseDTO {
return &FileResponseDTO{
ID: file.ID,
CollectionID: file.CollectionID,
OwnerID: file.OwnerID,
EncryptedMetadata: file.EncryptedMetadata,
EncryptedFileKey: file.EncryptedFileKey,
EncryptionVersion: file.EncryptionVersion,
EncryptedHash: file.EncryptedHash,
EncryptedFileSizeInBytes: file.EncryptedFileSizeInBytes,
EncryptedThumbnailSizeInBytes: file.EncryptedThumbnailSizeInBytes,
Tags: file.Tags,
CreatedAt: file.CreatedAt,
ModifiedAt: file.ModifiedAt,
Version: file.Version,
State: file.State,
TombstoneVersion: file.TombstoneVersion,
TombstoneExpiry: file.TombstoneExpiry,
}
}