Initial commit: Open sourcing all of the Maple Open Technologies code.
This commit is contained in:
commit
755d54a99d
2010 changed files with 448675 additions and 0 deletions
|
|
@ -0,0 +1,61 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/repo/filemetadata/anonymize_file_ips.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// AnonymizeFileIPsByOwner immediately anonymizes all IP addresses for files owned by a specific user
|
||||
// Used for GDPR right-to-be-forgotten implementation
|
||||
func (impl *fileMetadataRepositoryImpl) AnonymizeFileIPsByOwner(ctx context.Context, ownerID gocql.UUID) (int, error) {
|
||||
impl.Logger.Info("Anonymizing IPs for files owned by user (GDPR mode)",
|
||||
zap.String("owner_id", ownerID.String()))
|
||||
|
||||
count := 0
|
||||
|
||||
// Query all files owned by this user
|
||||
query := `SELECT id FROM maplefile.files_by_id WHERE owner_id = ? ALLOW FILTERING`
|
||||
iter := impl.Session.Query(query, ownerID).WithContext(ctx).Iter()
|
||||
|
||||
var fileID gocql.UUID
|
||||
var fileIDs []gocql.UUID
|
||||
|
||||
// Collect all file IDs first
|
||||
for iter.Scan(&fileID) {
|
||||
fileIDs = append(fileIDs, fileID)
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("Error querying files by owner", zap.Error(err))
|
||||
return count, err
|
||||
}
|
||||
|
||||
// Anonymize IPs for each file
|
||||
for _, fID := range fileIDs {
|
||||
updateQuery := `
|
||||
UPDATE maplefile.files_by_id
|
||||
SET created_from_ip_address = '0.0.0.0',
|
||||
modified_from_ip_address = '0.0.0.0',
|
||||
ip_anonymized_at = ?
|
||||
WHERE id = ?
|
||||
`
|
||||
|
||||
if err := impl.Session.Query(updateQuery, time.Now(), fID).WithContext(ctx).Exec(); err != nil {
|
||||
impl.Logger.Error("Failed to anonymize file IPs",
|
||||
zap.String("file_id", fID.String()),
|
||||
zap.Error(err))
|
||||
continue // Best-effort: continue with next file
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
impl.Logger.Info("✅ Successfully anonymized file IPs",
|
||||
zap.String("owner_id", ownerID.String()),
|
||||
zap.Int("files_anonymized", count))
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/repo/filemetadata/anonymize_old_ips.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// AnonymizeOldIPs anonymizes IP addresses in file tables older than the cutoff date
|
||||
func (impl *fileMetadataRepositoryImpl) AnonymizeOldIPs(ctx context.Context, cutoffDate time.Time) (int, error) {
|
||||
totalAnonymized := 0
|
||||
|
||||
// Anonymize files_by_id table (primary table)
|
||||
count, err := impl.anonymizeFilesById(ctx, cutoffDate)
|
||||
if err != nil {
|
||||
impl.Logger.Error("Failed to anonymize files_by_id",
|
||||
zap.Error(err),
|
||||
zap.Time("cutoff_date", cutoffDate))
|
||||
return totalAnonymized, err
|
||||
}
|
||||
totalAnonymized += count
|
||||
|
||||
impl.Logger.Info("IP anonymization completed for file tables",
|
||||
zap.Int("total_anonymized", totalAnonymized),
|
||||
zap.Time("cutoff_date", cutoffDate))
|
||||
|
||||
return totalAnonymized, nil
|
||||
}
|
||||
|
||||
// anonymizeFilesById processes the files_by_id table
|
||||
func (impl *fileMetadataRepositoryImpl) anonymizeFilesById(ctx context.Context, cutoffDate time.Time) (int, error) {
|
||||
count := 0
|
||||
|
||||
// Query all files (efficient primary key scan, no ALLOW FILTERING)
|
||||
query := `SELECT id, created_at, ip_anonymized_at FROM maplefile.files_by_id`
|
||||
iter := impl.Session.Query(query).WithContext(ctx).Iter()
|
||||
|
||||
var id gocql.UUID
|
||||
var createdAt time.Time
|
||||
var ipAnonymizedAt *time.Time
|
||||
|
||||
for iter.Scan(&id, &createdAt, &ipAnonymizedAt) {
|
||||
// Filter in application code: older than cutoff AND not yet anonymized
|
||||
if createdAt.Before(cutoffDate) && ipAnonymizedAt == nil {
|
||||
// Update the record to anonymize IPs
|
||||
updateQuery := `
|
||||
UPDATE maplefile.files_by_id
|
||||
SET created_from_ip_address = '',
|
||||
modified_from_ip_address = '',
|
||||
ip_anonymized_at = ?
|
||||
WHERE id = ?
|
||||
`
|
||||
if err := impl.Session.Query(updateQuery, time.Now(), id).WithContext(ctx).Exec(); err != nil {
|
||||
impl.Logger.Error("Failed to anonymize file record",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("Error during files_by_id iteration", zap.Error(err))
|
||||
return count, err
|
||||
}
|
||||
|
||||
impl.Logger.Debug("Anonymized files_by_id table",
|
||||
zap.Int("count", count),
|
||||
zap.Time("cutoff_date", cutoffDate))
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/archive.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) Archive(id gocql.UUID) error {
|
||||
file, err := impl.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file for archive: %w", err)
|
||||
}
|
||||
|
||||
if file == nil {
|
||||
return fmt.Errorf("file not found")
|
||||
}
|
||||
|
||||
// Validate state transition
|
||||
if err := dom_file.IsValidStateTransition(file.State, dom_file.FileStateArchived); err != nil {
|
||||
return fmt.Errorf("invalid state transition: %w", err)
|
||||
}
|
||||
|
||||
// Update file state
|
||||
file.State = dom_file.FileStateArchived
|
||||
file.ModifiedAt = time.Now()
|
||||
file.Version++
|
||||
|
||||
return impl.Update(file)
|
||||
}
|
||||
38
cloud/maplefile-backend/internal/repo/filemetadata/check.go
Normal file
38
cloud/maplefile-backend/internal/repo/filemetadata/check.go
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/check.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) CheckIfExistsByID(id gocql.UUID) (bool, error) {
|
||||
var count int
|
||||
|
||||
query := `SELECT COUNT(*) FROM maplefile.files_by_id WHERE id = ?`
|
||||
|
||||
if err := impl.Session.Query(query, id).Scan(&count); err != nil {
|
||||
return false, fmt.Errorf("failed to check file existence: %w", err)
|
||||
}
|
||||
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) CheckIfUserHasAccess(fileID gocql.UUID, userID gocql.UUID) (bool, error) {
|
||||
// Check if user has access via the user sync table
|
||||
var count int
|
||||
|
||||
query := `SELECT COUNT(*) FROM maplefile.files_by_user
|
||||
WHERE user_id = ? AND id = ? LIMIT 1 ALLOW FILTERING`
|
||||
|
||||
err := impl.Session.Query(query, userID, fileID).Scan(&count)
|
||||
if err != nil {
|
||||
if err == gocql.ErrNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to check file access: %w", err)
|
||||
}
|
||||
|
||||
return count > 0, nil
|
||||
}
|
||||
138
cloud/maplefile-backend/internal/repo/filemetadata/count.go
Normal file
138
cloud/maplefile-backend/internal/repo/filemetadata/count.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/count.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
// CountFilesByUser counts all active files accessible to the user
|
||||
// accessibleCollectionIDs should include all collections the user owns or has access to
|
||||
func (impl *fileMetadataRepositoryImpl) CountFilesByUser(ctx context.Context, userID gocql.UUID, accessibleCollectionIDs []gocql.UUID) (int, error) {
|
||||
if len(accessibleCollectionIDs) == 0 {
|
||||
// No accessible collections, return 0
|
||||
impl.Logger.Debug("no accessible collections provided for file count",
|
||||
zap.String("user_id", userID.String()))
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Create a map for efficient collection access checking
|
||||
accessibleCollections := make(map[gocql.UUID]bool)
|
||||
for _, cid := range accessibleCollectionIDs {
|
||||
accessibleCollections[cid] = true
|
||||
}
|
||||
|
||||
// Query files for the user using the user sync table
|
||||
query := `SELECT id, collection_id, state FROM maplefile.files_by_user
|
||||
WHERE user_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, userID).WithContext(ctx).Iter()
|
||||
|
||||
count := 0
|
||||
var fileID, collectionID gocql.UUID
|
||||
var state string
|
||||
|
||||
for iter.Scan(&fileID, &collectionID, &state) {
|
||||
// Only count files from accessible collections
|
||||
if !accessibleCollections[collectionID] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only count active files
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to count files by user",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("accessible_collections_count", len(accessibleCollectionIDs)),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to count files by user: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("counted files by user successfully",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("accessible_collections_count", len(accessibleCollectionIDs)),
|
||||
zap.Int("file_count", count))
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// CountFilesByOwner counts all active files owned by the user (alternative approach)
|
||||
func (impl *fileMetadataRepositoryImpl) CountFilesByOwner(ctx context.Context, ownerID gocql.UUID) (int, error) {
|
||||
// Query files owned by the user using the owner table
|
||||
query := `SELECT id, state FROM maplefile.files_by_owner
|
||||
WHERE owner_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, ownerID).WithContext(ctx).Iter()
|
||||
|
||||
count := 0
|
||||
var fileID gocql.UUID
|
||||
var state string
|
||||
|
||||
for iter.Scan(&fileID, &state) {
|
||||
// Only count active files
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to count files by owner",
|
||||
zap.String("owner_id", ownerID.String()),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to count files by owner: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("counted files by owner successfully",
|
||||
zap.String("owner_id", ownerID.String()),
|
||||
zap.Int("file_count", count))
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// CountFilesByCollection counts active files in a specific collection
|
||||
func (impl *fileMetadataRepositoryImpl) CountFilesByCollection(ctx context.Context, collectionID gocql.UUID) (int, error) {
|
||||
// Query files in the collection using the collection table
|
||||
query := `SELECT id, state FROM maplefile.files_by_collection
|
||||
WHERE collection_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, collectionID).WithContext(ctx).Iter()
|
||||
|
||||
count := 0
|
||||
var fileID gocql.UUID
|
||||
var state string
|
||||
|
||||
for iter.Scan(&fileID, &state) {
|
||||
// Only count active files
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to count files by collection",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to count files by collection: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("counted files by collection successfully",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Int("file_count", count))
|
||||
|
||||
return count, nil
|
||||
}
|
||||
327
cloud/maplefile-backend/internal/repo/filemetadata/create.go
Normal file
327
cloud/maplefile-backend/internal/repo/filemetadata/create.go
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/create.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) Create(file *dom_file.File) error {
|
||||
if file == nil {
|
||||
return fmt.Errorf("file cannot be nil")
|
||||
}
|
||||
|
||||
if !impl.isValidUUID(file.ID) {
|
||||
return fmt.Errorf("file ID is required")
|
||||
}
|
||||
|
||||
if !impl.isValidUUID(file.CollectionID) {
|
||||
return fmt.Errorf("collection ID is required")
|
||||
}
|
||||
|
||||
if !impl.isValidUUID(file.OwnerID) {
|
||||
return fmt.Errorf("owner ID is required")
|
||||
}
|
||||
|
||||
// Set creation timestamp if not set
|
||||
if file.CreatedAt.IsZero() {
|
||||
file.CreatedAt = time.Now()
|
||||
}
|
||||
|
||||
if file.ModifiedAt.IsZero() {
|
||||
file.ModifiedAt = file.CreatedAt
|
||||
}
|
||||
|
||||
// Ensure state is set
|
||||
if file.State == "" {
|
||||
file.State = dom_file.FileStateActive
|
||||
}
|
||||
|
||||
// Serialize encrypted file key
|
||||
encryptedKeyJSON, err := impl.serializeEncryptedFileKey(file.EncryptedFileKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize encrypted file key: %w", err)
|
||||
}
|
||||
|
||||
// Serialize tags
|
||||
tagsJSON, err := impl.serializeTags(file.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize tags: %w", err)
|
||||
}
|
||||
|
||||
batch := impl.Session.NewBatch(gocql.LoggedBatch)
|
||||
|
||||
// 1. Insert into main table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_id
|
||||
(id, collection_id, owner_id, encrypted_metadata, encrypted_file_key, encryption_version,
|
||||
encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes, tags,
|
||||
created_at, created_by_user_id, modified_at, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.ID, file.CollectionID, file.OwnerID, file.EncryptedMetadata, encryptedKeyJSON,
|
||||
file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedAt, file.ModifiedByUserID, file.Version, file.State,
|
||||
file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 2. Insert into collection table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_collection
|
||||
(collection_id, modified_at, id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CollectionID, file.ModifiedAt, file.ID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 3. Insert into owner table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_owner
|
||||
(owner_id, modified_at, id, collection_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 4. Insert into created_by table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_creator
|
||||
(created_by_user_id, created_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CreatedByUserID, file.CreatedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.ModifiedAt, file.ModifiedByUserID, file.Version,
|
||||
file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 5. Insert into user sync table (for owner and any collection members)
|
||||
batch.Query(`INSERT INTO maplefile.files_by_user
|
||||
(user_id, modified_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
tags, created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 6. Insert into denormalized files_by_tag_id table for each tag
|
||||
for _, tag := range file.Tags {
|
||||
batch.Query(`INSERT INTO maplefile.files_by_tag_id
|
||||
(tag_id, file_id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key,
|
||||
encrypted_thumbnail_size_in_bytes, tag_ids, created_at, created_by_user_id,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry,
|
||||
created_from_ip_address, modified_from_ip_address, ip_anonymized_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
tag.ID, file.ID, file.CollectionID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes,
|
||||
file.EncryptedThumbnailObjectKey, file.EncryptedThumbnailSizeInBytes,
|
||||
tagsJSON, file.CreatedAt, file.CreatedByUserID, file.ModifiedAt,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion,
|
||||
file.TombstoneExpiry,
|
||||
nil, nil, nil) // IP tracking fields not yet in domain model
|
||||
}
|
||||
|
||||
// Execute batch
|
||||
if err := impl.Session.ExecuteBatch(batch); err != nil {
|
||||
impl.Logger.Error("failed to create file",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.Error(err))
|
||||
return fmt.Errorf("failed to create file: %w", err)
|
||||
}
|
||||
|
||||
// Increment collection file count for active files
|
||||
if file.State == dom_file.FileStateActive {
|
||||
if err := impl.CollectionRepo.IncrementFileCount(context.Background(), file.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to increment collection file count",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", file.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Info("file created successfully",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", file.CollectionID.String()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) CreateMany(files []*dom_file.File) error {
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
batch := impl.Session.NewBatch(gocql.LoggedBatch)
|
||||
|
||||
for _, file := range files {
|
||||
if file == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Set timestamps if not set
|
||||
if file.CreatedAt.IsZero() {
|
||||
file.CreatedAt = time.Now()
|
||||
}
|
||||
if file.ModifiedAt.IsZero() {
|
||||
file.ModifiedAt = file.CreatedAt
|
||||
}
|
||||
if file.State == "" {
|
||||
file.State = dom_file.FileStateActive
|
||||
}
|
||||
|
||||
encryptedKeyJSON, err := impl.serializeEncryptedFileKey(file.EncryptedFileKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize encrypted file key for file %s: %w", file.ID.String(), err)
|
||||
}
|
||||
|
||||
tagsJSON, err := impl.serializeTags(file.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize tags for file %s: %w", file.ID.String(), err)
|
||||
}
|
||||
|
||||
// Add to all 5 tables (same as Create but in batch)
|
||||
batch.Query(`INSERT INTO maplefile.files_by_id
|
||||
(id, collection_id, owner_id, encrypted_metadata, encrypted_file_key, encryption_version,
|
||||
encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes, tags,
|
||||
created_at, created_by_user_id, modified_at, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.ID, file.CollectionID, file.OwnerID, file.EncryptedMetadata, encryptedKeyJSON,
|
||||
file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedAt, file.ModifiedByUserID, file.Version, file.State,
|
||||
file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 2. Insert into collection table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_collection
|
||||
(collection_id, modified_at, id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CollectionID, file.ModifiedAt, file.ID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 3. Insert into owner table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_owner
|
||||
(owner_id, modified_at, id, collection_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 4. Insert into created_by table
|
||||
batch.Query(`INSERT INTO maplefile.files_by_creator
|
||||
(created_by_user_id, created_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CreatedByUserID, file.CreatedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.ModifiedAt, file.ModifiedByUserID, file.Version,
|
||||
file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 5. Insert into user sync table (for owner and any collection members)
|
||||
batch.Query(`INSERT INTO maplefile.files_by_user
|
||||
(user_id, modified_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
tags, created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 6. Insert into denormalized files_by_tag_id table for each tag
|
||||
for _, tag := range file.Tags {
|
||||
batch.Query(`INSERT INTO maplefile.files_by_tag_id
|
||||
(tag_id, file_id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key,
|
||||
encrypted_thumbnail_size_in_bytes, tag_ids, created_at, created_by_user_id,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry,
|
||||
created_from_ip_address, modified_from_ip_address, ip_anonymized_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
tag.ID, file.ID, file.CollectionID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes,
|
||||
file.EncryptedThumbnailObjectKey, file.EncryptedThumbnailSizeInBytes,
|
||||
tagsJSON, file.CreatedAt, file.CreatedByUserID, file.ModifiedAt,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion,
|
||||
file.TombstoneExpiry,
|
||||
nil, nil, nil) // IP tracking fields not yet in domain model
|
||||
}
|
||||
}
|
||||
|
||||
if err := impl.Session.ExecuteBatch(batch); err != nil {
|
||||
impl.Logger.Error("failed to create multiple files", zap.Error(err))
|
||||
return fmt.Errorf("failed to create multiple files: %w", err)
|
||||
}
|
||||
|
||||
// Increment collection file counts for active files
|
||||
// Group by collection to minimize updates
|
||||
collectionCounts := make(map[gocql.UUID]int)
|
||||
for _, file := range files {
|
||||
if file != nil && file.State == dom_file.FileStateActive {
|
||||
collectionCounts[file.CollectionID]++
|
||||
}
|
||||
}
|
||||
|
||||
for collectionID, count := range collectionCounts {
|
||||
for i := 0; i < count; i++ {
|
||||
if err := impl.CollectionRepo.IncrementFileCount(context.Background(), collectionID); err != nil {
|
||||
impl.Logger.Error("failed to increment collection file count",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Info("multiple files created successfully", zap.Int("count", len(files)))
|
||||
return nil
|
||||
}
|
||||
127
cloud/maplefile-backend/internal/repo/filemetadata/delete.go
Normal file
127
cloud/maplefile-backend/internal/repo/filemetadata/delete.go
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/delete.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) SoftDelete(id gocql.UUID) error {
|
||||
file, err := impl.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file for soft delete: %w", err)
|
||||
}
|
||||
|
||||
if file == nil {
|
||||
return fmt.Errorf("file not found")
|
||||
}
|
||||
|
||||
// Validate state transition
|
||||
if err := dom_file.IsValidStateTransition(file.State, dom_file.FileStateDeleted); err != nil {
|
||||
return fmt.Errorf("invalid state transition: %w", err)
|
||||
}
|
||||
|
||||
// Update file state
|
||||
file.State = dom_file.FileStateDeleted
|
||||
file.ModifiedAt = time.Now()
|
||||
file.Version++
|
||||
file.TombstoneVersion = file.Version
|
||||
file.TombstoneExpiry = time.Now().Add(30 * 24 * time.Hour) // 30 days
|
||||
|
||||
return impl.Update(file)
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) SoftDeleteMany(ids []gocql.UUID) error {
|
||||
for _, id := range ids {
|
||||
if err := impl.SoftDelete(id); err != nil {
|
||||
impl.Logger.Warn("failed to soft delete file",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) HardDelete(id gocql.UUID) error {
|
||||
file, err := impl.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file for hard delete: %w", err)
|
||||
}
|
||||
|
||||
if file == nil {
|
||||
return fmt.Errorf("file not found")
|
||||
}
|
||||
|
||||
batch := impl.Session.NewBatch(gocql.LoggedBatch)
|
||||
|
||||
// 1. Delete from main table
|
||||
batch.Query(`DELETE FROM maplefile.files_by_id WHERE id = ?`, id)
|
||||
|
||||
// 2. Delete from collection table
|
||||
batch.Query(`DELETE FROM maplefile.files_by_collection
|
||||
WHERE collection_id = ? AND modified_at = ? AND id = ?`,
|
||||
file.CollectionID, file.ModifiedAt, id)
|
||||
|
||||
// 3. Delete from owner table
|
||||
batch.Query(`DELETE FROM maplefile.files_by_owner
|
||||
WHERE owner_id = ? AND modified_at = ? AND id = ?`,
|
||||
file.OwnerID, file.ModifiedAt, id)
|
||||
|
||||
// 4. Delete from created_by table
|
||||
batch.Query(`DELETE FROM maplefile.files_by_creator
|
||||
WHERE created_by_user_id = ? AND created_at = ? AND id = ?`,
|
||||
file.CreatedByUserID, file.CreatedAt, id)
|
||||
|
||||
// 5. Delete from user sync table
|
||||
batch.Query(`DELETE FROM maplefile.files_by_user
|
||||
WHERE user_id = ? AND modified_at = ? AND id = ?`,
|
||||
file.OwnerID, file.ModifiedAt, id)
|
||||
|
||||
// 6. Delete from denormalized files_by_tag_id table for all tags
|
||||
for _, tag := range file.Tags {
|
||||
batch.Query(`DELETE FROM maplefile.files_by_tag_id
|
||||
WHERE tag_id = ? AND file_id = ?`,
|
||||
tag.ID, id)
|
||||
}
|
||||
|
||||
// Execute batch
|
||||
if err := impl.Session.ExecuteBatch(batch); err != nil {
|
||||
impl.Logger.Error("failed to hard delete file",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
return fmt.Errorf("failed to hard delete file: %w", err)
|
||||
}
|
||||
|
||||
// Decrement collection file count if the file was active
|
||||
if file.State == dom_file.FileStateActive {
|
||||
if err := impl.CollectionRepo.DecrementFileCount(context.Background(), file.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to decrement collection file count",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.String("collection_id", file.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Info("file hard deleted successfully",
|
||||
zap.String("file_id", id.String()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) HardDeleteMany(ids []gocql.UUID) error {
|
||||
for _, id := range ids {
|
||||
if err := impl.HardDelete(id); err != nil {
|
||||
impl.Logger.Warn("failed to hard delete file",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
217
cloud/maplefile-backend/internal/repo/filemetadata/get.go
Normal file
217
cloud/maplefile-backend/internal/repo/filemetadata/get.go
Normal file
|
|
@ -0,0 +1,217 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/get.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) Get(id gocql.UUID) (*dom_file.File, error) {
|
||||
var (
|
||||
collectionID, ownerID, createdByUserID, modifiedByUserID gocql.UUID
|
||||
encryptedMetadata, encryptedKeyJSON, encryptionVersion string
|
||||
encryptedHash, encryptedFileObjectKey string
|
||||
encryptedThumbnailObjectKey string
|
||||
encryptedFileSizeInBytes, encryptedThumbnailSizeInBytes int64
|
||||
tagsJSON string
|
||||
createdAt, modifiedAt, tombstoneExpiry time.Time
|
||||
version, tombstoneVersion uint64
|
||||
state string
|
||||
)
|
||||
|
||||
query := `SELECT id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes, tags,
|
||||
created_at, created_by_user_id, modified_at, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry
|
||||
FROM maplefile.files_by_id WHERE id = ?`
|
||||
|
||||
err := impl.Session.Query(query, id).Scan(
|
||||
&id, &collectionID, &ownerID, &encryptedMetadata, &encryptedKeyJSON,
|
||||
&encryptionVersion, &encryptedHash, &encryptedFileObjectKey, &encryptedFileSizeInBytes,
|
||||
&encryptedThumbnailObjectKey, &encryptedThumbnailSizeInBytes, &tagsJSON,
|
||||
&createdAt, &createdByUserID, &modifiedAt, &modifiedByUserID, &version,
|
||||
&state, &tombstoneVersion, &tombstoneExpiry)
|
||||
|
||||
if err != nil {
|
||||
if err == gocql.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
// Deserialize encrypted file key
|
||||
encryptedFileKey, err := impl.deserializeEncryptedFileKey(encryptedKeyJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize encrypted file key: %w", err)
|
||||
}
|
||||
|
||||
// Deserialize tags
|
||||
tags, err := impl.deserializeTags(tagsJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize tags: %w", err)
|
||||
}
|
||||
|
||||
file := &dom_file.File{
|
||||
ID: id,
|
||||
CollectionID: collectionID,
|
||||
OwnerID: ownerID,
|
||||
EncryptedMetadata: encryptedMetadata,
|
||||
EncryptedFileKey: encryptedFileKey,
|
||||
EncryptionVersion: encryptionVersion,
|
||||
EncryptedHash: encryptedHash,
|
||||
EncryptedFileObjectKey: encryptedFileObjectKey,
|
||||
EncryptedFileSizeInBytes: encryptedFileSizeInBytes,
|
||||
EncryptedThumbnailObjectKey: encryptedThumbnailObjectKey,
|
||||
EncryptedThumbnailSizeInBytes: encryptedThumbnailSizeInBytes,
|
||||
Tags: tags,
|
||||
CreatedAt: createdAt,
|
||||
CreatedByUserID: createdByUserID,
|
||||
ModifiedAt: modifiedAt,
|
||||
ModifiedByUserID: modifiedByUserID,
|
||||
Version: version,
|
||||
State: state,
|
||||
TombstoneVersion: tombstoneVersion,
|
||||
TombstoneExpiry: tombstoneExpiry,
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) GetByIDs(ids []gocql.UUID) ([]*dom_file.File, error) {
|
||||
if len(ids) == 0 {
|
||||
return []*dom_file.File{}, nil
|
||||
}
|
||||
|
||||
// Use a buffered channel to collect results from goroutines
|
||||
resultsChan := make(chan *dom_file.File, len(ids))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Launch a goroutine for each ID lookup
|
||||
for _, id := range ids {
|
||||
wg.Add(1)
|
||||
go func(id gocql.UUID) {
|
||||
defer wg.Done()
|
||||
|
||||
// Call the existing state-aware Get method
|
||||
file, err := impl.Get(id)
|
||||
|
||||
if err != nil {
|
||||
impl.Logger.Warn("failed to get file by ID",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
// Send nil on error to indicate failure/absence for this ID
|
||||
resultsChan <- nil
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns nil for ErrNotFound or inactive state when stateAware is true.
|
||||
// Send the potentially nil file result to the channel.
|
||||
resultsChan <- file
|
||||
|
||||
}(id) // Pass id into the closure
|
||||
}
|
||||
|
||||
// Goroutine to close the channel once all workers are done
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
}()
|
||||
|
||||
// Collect results from the channel
|
||||
var files []*dom_file.File
|
||||
for file := range resultsChan {
|
||||
// Only append non-nil files (found and active)
|
||||
if file != nil {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
|
||||
// The original function logs warnings for errors but doesn't return an error
|
||||
// from GetByIDs itself. We maintain this behavior.
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) GetByCollection(collectionID gocql.UUID) ([]*dom_file.File, error) {
|
||||
var fileIDs []gocql.UUID
|
||||
|
||||
query := `SELECT id FROM maplefile.files_by_collection
|
||||
WHERE collection_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, collectionID).Iter()
|
||||
|
||||
var fileID gocql.UUID
|
||||
for iter.Scan(&fileID) {
|
||||
fileIDs = append(fileIDs, fileID)
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to get files by collection: %w", err)
|
||||
}
|
||||
|
||||
return impl.loadMultipleFiles(fileIDs)
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) loadMultipleFiles(fileIDs []gocql.UUID) ([]*dom_file.File, error) {
|
||||
if len(fileIDs) == 0 {
|
||||
return []*dom_file.File{}, nil
|
||||
}
|
||||
|
||||
// Use a buffered channel to collect results from goroutines
|
||||
// We expect up to len(fileIDs) results, some of which might be nil.
|
||||
resultsChan := make(chan *dom_file.File, len(fileIDs))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Launch a goroutine for each ID lookup
|
||||
for _, id := range fileIDs {
|
||||
wg.Add(1)
|
||||
go func(id gocql.UUID) {
|
||||
defer wg.Done()
|
||||
|
||||
// Call the existing state-aware Get method
|
||||
// This method returns nil if the file is not found, or if it's
|
||||
// found but not in the 'active' state.
|
||||
file, err := impl.Get(id)
|
||||
|
||||
if err != nil {
|
||||
// Log the error but continue processing other IDs.
|
||||
impl.Logger.Warn("failed to load file",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
// Send nil on error, consistent with how Get returns nil for not found/inactive.
|
||||
resultsChan <- nil
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns nil for ErrNotFound or inactive state when stateAware is true.
|
||||
// Send the potentially nil file result to the channel.
|
||||
resultsChan <- file
|
||||
|
||||
}(id) // Pass id into the closure
|
||||
}
|
||||
|
||||
// Goroutine to close the channel once all workers are done
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
}()
|
||||
|
||||
// Collect results from the channel
|
||||
var files []*dom_file.File
|
||||
for file := range resultsChan {
|
||||
// Only append non-nil files (found and active, or found but error logged)
|
||||
if file != nil {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
|
||||
// The original function logged warnings for errors but didn't return an error
|
||||
// from loadMultipleFiles itself. We maintain this behavior.
|
||||
return files, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/get_by_created_by_user_id.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) GetByCreatedByUserID(createdByUserID gocql.UUID) ([]*dom_file.File, error) {
|
||||
var fileIDs []gocql.UUID
|
||||
|
||||
query := `SELECT id FROM maplefile.files_by_creator
|
||||
WHERE created_by_user_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, createdByUserID).Iter()
|
||||
|
||||
var fileID gocql.UUID
|
||||
for iter.Scan(&fileID) {
|
||||
fileIDs = append(fileIDs, fileID)
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to get files by creator: %w", err)
|
||||
}
|
||||
|
||||
return impl.loadMultipleFiles(fileIDs)
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/get_by_owner_id.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) GetByOwnerID(ownerID gocql.UUID) ([]*dom_file.File, error) {
|
||||
var fileIDs []gocql.UUID
|
||||
|
||||
query := `SELECT id FROM maplefile.files_by_owner
|
||||
WHERE owner_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, ownerID).Iter()
|
||||
|
||||
var fileID gocql.UUID
|
||||
for iter.Scan(&fileID) {
|
||||
fileIDs = append(fileIDs, fileID)
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to get files by owner: %w", err)
|
||||
}
|
||||
|
||||
return impl.loadMultipleFiles(fileIDs)
|
||||
}
|
||||
68
cloud/maplefile-backend/internal/repo/filemetadata/impl.go
Normal file
68
cloud/maplefile-backend/internal/repo/filemetadata/impl.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/impl.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
|
||||
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
|
||||
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/crypto"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/tag"
|
||||
)
|
||||
|
||||
type fileMetadataRepositoryImpl struct {
|
||||
Logger *zap.Logger
|
||||
Session *gocql.Session
|
||||
CollectionRepo dom_collection.CollectionRepository
|
||||
}
|
||||
|
||||
func NewRepository(appCfg *config.Configuration, session *gocql.Session, loggerp *zap.Logger, collectionRepo dom_collection.CollectionRepository) dom_file.FileMetadataRepository {
|
||||
loggerp = loggerp.Named("FileMetadataRepository")
|
||||
|
||||
return &fileMetadataRepositoryImpl{
|
||||
Logger: loggerp,
|
||||
Session: session,
|
||||
CollectionRepo: collectionRepo,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions for JSON serialization
|
||||
func (impl *fileMetadataRepositoryImpl) serializeEncryptedFileKey(key crypto.EncryptedFileKey) (string, error) {
|
||||
data, err := json.Marshal(key)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) deserializeEncryptedFileKey(data string) (crypto.EncryptedFileKey, error) {
|
||||
if data == "" {
|
||||
return crypto.EncryptedFileKey{}, nil
|
||||
}
|
||||
var key crypto.EncryptedFileKey
|
||||
err := json.Unmarshal([]byte(data), &key)
|
||||
return key, err
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) serializeTags(tags []tag.EmbeddedTag) (string, error) {
|
||||
if len(tags) == 0 {
|
||||
return "[]", nil
|
||||
}
|
||||
data, err := json.Marshal(tags)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) deserializeTags(data string) ([]tag.EmbeddedTag, error) {
|
||||
if data == "" || data == "[]" {
|
||||
return []tag.EmbeddedTag{}, nil
|
||||
}
|
||||
var tags []tag.EmbeddedTag
|
||||
err := json.Unmarshal([]byte(data), &tags)
|
||||
return tags, err
|
||||
}
|
||||
|
||||
// isValidUUID checks if UUID is not nil/empty
|
||||
func (impl *fileMetadataRepositoryImpl) isValidUUID(id gocql.UUID) bool {
|
||||
return id.String() != "00000000-0000-0000-0000-000000000000"
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/list_by_tag_id.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// ListByTagID retrieves all files that have the specified tag assigned
|
||||
// Uses the denormalized files_by_tag_id table for efficient lookups
|
||||
func (impl *fileMetadataRepositoryImpl) ListByTagID(ctx context.Context, tagID gocql.UUID) ([]*dom_file.File, error) {
|
||||
impl.Logger.Info("🏷️ REPO: Listing files by tag ID",
|
||||
zap.String("tag_id", tagID.String()))
|
||||
|
||||
var fileIDs []gocql.UUID
|
||||
|
||||
// Query the denormalized table
|
||||
query := `SELECT file_id FROM maplefile.files_by_tag_id WHERE tag_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, tagID).WithContext(ctx).Iter()
|
||||
|
||||
var fileID gocql.UUID
|
||||
for iter.Scan(&fileID) {
|
||||
fileIDs = append(fileIDs, fileID)
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("🏷️ REPO: Failed to query files by tag",
|
||||
zap.String("tag_id", tagID.String()),
|
||||
zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to list files by tag: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Info("🏷️ REPO: Found file IDs for tag",
|
||||
zap.String("tag_id", tagID.String()),
|
||||
zap.Int("count", len(fileIDs)))
|
||||
|
||||
// Load full file details using existing helper method
|
||||
// This will filter to only active files
|
||||
files, err := impl.loadMultipleFiles(fileIDs)
|
||||
if err != nil {
|
||||
impl.Logger.Error("🏷️ REPO: Failed to load files",
|
||||
zap.String("tag_id", tagID.String()),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
impl.Logger.Info("🏷️ REPO: Successfully loaded files by tag",
|
||||
zap.String("tag_id", tagID.String()),
|
||||
zap.Int("active_count", len(files)))
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
// cloud/maplefile-backend/internal/maplefile/repo/filemetadata/list_recent_files.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
// Using types from dom_file package (defined in model.go)
|
||||
|
||||
// ListRecentFiles retrieves recent files with pagination for the specified user and accessible collections
|
||||
func (impl *fileMetadataRepositoryImpl) ListRecentFiles(ctx context.Context, userID gocql.UUID, cursor *dom_file.RecentFilesCursor, limit int64, accessibleCollectionIDs []gocql.UUID) (*dom_file.RecentFilesResponse, error) {
|
||||
if len(accessibleCollectionIDs) == 0 {
|
||||
// No accessible collections, return empty response
|
||||
return &dom_file.RecentFilesResponse{
|
||||
Files: []dom_file.RecentFilesItem{},
|
||||
HasMore: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Build query based on cursor
|
||||
var query string
|
||||
var args []any
|
||||
|
||||
if cursor == nil {
|
||||
// Initial request - get most recent files for user
|
||||
query = `SELECT id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_size_in_bytes, encrypted_thumbnail_size_in_bytes,
|
||||
tags, created_at, modified_at, version, state
|
||||
FROM maplefile.files_by_user
|
||||
WHERE user_id = ? LIMIT ?`
|
||||
args = []any{userID, limit}
|
||||
} else {
|
||||
// Paginated request - get files modified before cursor
|
||||
query = `SELECT id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_size_in_bytes, encrypted_thumbnail_size_in_bytes,
|
||||
tags, created_at, modified_at, version, state
|
||||
FROM maplefile.files_by_user
|
||||
WHERE user_id = ? AND (modified_at, id) < (?, ?) LIMIT ?`
|
||||
args = []any{userID, cursor.LastModified, cursor.LastID, limit}
|
||||
}
|
||||
|
||||
iter := impl.Session.Query(query, args...).WithContext(ctx).Iter()
|
||||
|
||||
var recentItems []dom_file.RecentFilesItem
|
||||
var lastModified time.Time
|
||||
var lastID gocql.UUID
|
||||
|
||||
var (
|
||||
fileID gocql.UUID
|
||||
collectionID, ownerID gocql.UUID
|
||||
encryptedMetadata, encryptedFileKey, encryptionVersion, encryptedHash string
|
||||
encryptedFileSizeInBytes, encryptedThumbnailSizeInBytes int64
|
||||
tagsJSON string
|
||||
createdAt, modifiedAt time.Time
|
||||
version uint64
|
||||
state string
|
||||
)
|
||||
|
||||
// Filter files by accessible collections and only include active files
|
||||
accessibleCollections := make(map[gocql.UUID]bool)
|
||||
for _, cid := range accessibleCollectionIDs {
|
||||
accessibleCollections[cid] = true
|
||||
}
|
||||
|
||||
for iter.Scan(&fileID, &collectionID, &ownerID, &encryptedMetadata, &encryptedFileKey,
|
||||
&encryptionVersion, &encryptedHash, &encryptedFileSizeInBytes, &encryptedThumbnailSizeInBytes,
|
||||
&tagsJSON, &createdAt, &modifiedAt, &version, &state) {
|
||||
|
||||
// Only include files from accessible collections
|
||||
if !accessibleCollections[collectionID] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only include active files (exclude deleted, archived, pending)
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Deserialize tags
|
||||
tags, _ := impl.deserializeTags(tagsJSON)
|
||||
|
||||
recentItem := dom_file.RecentFilesItem{
|
||||
ID: fileID,
|
||||
CollectionID: collectionID,
|
||||
OwnerID: ownerID,
|
||||
EncryptedMetadata: encryptedMetadata,
|
||||
EncryptedFileKey: encryptedFileKey,
|
||||
EncryptionVersion: encryptionVersion,
|
||||
EncryptedHash: encryptedHash,
|
||||
EncryptedFileSizeInBytes: encryptedFileSizeInBytes,
|
||||
EncryptedThumbnailSizeInBytes: encryptedThumbnailSizeInBytes,
|
||||
Tags: tags,
|
||||
CreatedAt: createdAt,
|
||||
ModifiedAt: modifiedAt,
|
||||
Version: version,
|
||||
State: state,
|
||||
}
|
||||
|
||||
recentItems = append(recentItems, recentItem)
|
||||
lastModified = modifiedAt
|
||||
lastID = fileID
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to get recent files: %w", err)
|
||||
}
|
||||
|
||||
// Prepare response
|
||||
response := &dom_file.RecentFilesResponse{
|
||||
Files: recentItems,
|
||||
HasMore: len(recentItems) == int(limit),
|
||||
}
|
||||
|
||||
// Set next cursor if there are more results
|
||||
if response.HasMore {
|
||||
response.NextCursor = &dom_file.RecentFilesCursor{
|
||||
LastModified: lastModified,
|
||||
LastID: lastID,
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Debug("recent files retrieved",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("file_count", len(recentItems)),
|
||||
zap.Bool("has_more", response.HasMore))
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/list_sync_data.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) ListSyncData(ctx context.Context, userID gocql.UUID, cursor *dom_file.FileSyncCursor, limit int64, accessibleCollectionIDs []gocql.UUID) (*dom_file.FileSyncResponse, error) {
|
||||
if len(accessibleCollectionIDs) == 0 {
|
||||
// No accessible collections, return empty response
|
||||
return &dom_file.FileSyncResponse{
|
||||
Files: []dom_file.FileSyncItem{},
|
||||
HasMore: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Build query based on cursor
|
||||
var query string
|
||||
var args []any
|
||||
|
||||
if cursor == nil {
|
||||
// Initial sync - get all files for user
|
||||
query = `SELECT id, collection_id, version, modified_at, state, tombstone_version, tombstone_expiry, encrypted_file_size_in_bytes
|
||||
FROM maplefile.files_by_user
|
||||
WHERE user_id = ? LIMIT ?`
|
||||
args = []any{userID, limit}
|
||||
} else {
|
||||
// Incremental sync - get files modified after cursor
|
||||
query = `SELECT id, collection_id, version, modified_at, state, tombstone_version, tombstone_expiry, encrypted_file_size_in_bytes
|
||||
FROM maplefile.files_by_user
|
||||
WHERE user_id = ? AND (modified_at, id) > (?, ?) LIMIT ?`
|
||||
args = []any{userID, cursor.LastModified, cursor.LastID, limit}
|
||||
}
|
||||
|
||||
iter := impl.Session.Query(query, args...).WithContext(ctx).Iter()
|
||||
|
||||
var syncItems []dom_file.FileSyncItem
|
||||
var lastModified time.Time
|
||||
var lastID gocql.UUID
|
||||
|
||||
var (
|
||||
fileID gocql.UUID
|
||||
collectionID gocql.UUID
|
||||
version, tombstoneVersion uint64
|
||||
modifiedAt, tombstoneExpiry time.Time
|
||||
state string
|
||||
encryptedFileSizeInBytes int64
|
||||
)
|
||||
|
||||
// Filter files by accessible collections
|
||||
accessibleCollections := make(map[gocql.UUID]bool)
|
||||
for _, cid := range accessibleCollectionIDs {
|
||||
accessibleCollections[cid] = true
|
||||
}
|
||||
|
||||
for iter.Scan(&fileID, &collectionID, &version, &modifiedAt, &state, &tombstoneVersion, &tombstoneExpiry, &encryptedFileSizeInBytes) {
|
||||
// Only include files from accessible collections
|
||||
if !accessibleCollections[collectionID] {
|
||||
continue
|
||||
}
|
||||
|
||||
syncItem := dom_file.FileSyncItem{
|
||||
ID: fileID,
|
||||
CollectionID: collectionID,
|
||||
Version: version,
|
||||
ModifiedAt: modifiedAt,
|
||||
State: state,
|
||||
TombstoneVersion: tombstoneVersion,
|
||||
TombstoneExpiry: tombstoneExpiry,
|
||||
EncryptedFileSizeInBytes: encryptedFileSizeInBytes,
|
||||
}
|
||||
|
||||
syncItems = append(syncItems, syncItem)
|
||||
lastModified = modifiedAt
|
||||
lastID = fileID
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to get file sync data: %w", err)
|
||||
}
|
||||
|
||||
// Prepare response
|
||||
response := &dom_file.FileSyncResponse{
|
||||
Files: syncItems,
|
||||
HasMore: len(syncItems) == int(limit),
|
||||
}
|
||||
|
||||
// Set next cursor if there are more results
|
||||
if response.HasMore {
|
||||
response.NextCursor = &dom_file.FileSyncCursor{
|
||||
LastModified: lastModified,
|
||||
LastID: lastID,
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Debug("file sync data retrieved",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("file_count", len(syncItems)),
|
||||
zap.Bool("has_more", response.HasMore))
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
package filemetadata
|
||||
|
||||
import (
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
|
||||
dom_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/collection"
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
// ProvideRepository provides a file metadata repository for Wire DI
|
||||
func ProvideRepository(cfg *config.Config, session *gocql.Session, logger *zap.Logger, collectionRepo dom_collection.CollectionRepository) dom_file.FileMetadataRepository {
|
||||
return NewRepository(cfg, session, logger, collectionRepo)
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/restore.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) Restore(id gocql.UUID) error {
|
||||
file, err := impl.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file for restore: %w", err)
|
||||
}
|
||||
|
||||
if file == nil {
|
||||
return fmt.Errorf("file not found")
|
||||
}
|
||||
|
||||
// Validate state transition
|
||||
if err := dom_file.IsValidStateTransition(file.State, dom_file.FileStateActive); err != nil {
|
||||
return fmt.Errorf("invalid state transition: %w", err)
|
||||
}
|
||||
|
||||
// Update file state
|
||||
file.State = dom_file.FileStateActive
|
||||
file.ModifiedAt = time.Now()
|
||||
file.Version++
|
||||
file.TombstoneVersion = 0
|
||||
file.TombstoneExpiry = time.Time{}
|
||||
|
||||
return impl.Update(file)
|
||||
}
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) RestoreMany(ids []gocql.UUID) error {
|
||||
for _, id := range ids {
|
||||
if err := impl.Restore(id); err != nil {
|
||||
impl.Logger.Warn("failed to restore file",
|
||||
zap.String("file_id", id.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/storage_size.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
// GetTotalStorageSizeByOwner calculates total storage size for all active files owned by the user
|
||||
func (impl *fileMetadataRepositoryImpl) GetTotalStorageSizeByOwner(ctx context.Context, ownerID gocql.UUID) (int64, error) {
|
||||
// Query files owned by the user using the owner table
|
||||
query := `SELECT id, state, encrypted_file_size_in_bytes, encrypted_thumbnail_size_in_bytes
|
||||
FROM maplefile.files_by_owner
|
||||
WHERE owner_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, ownerID).WithContext(ctx).Iter()
|
||||
|
||||
var totalSize int64
|
||||
var fileID gocql.UUID
|
||||
var state string
|
||||
var fileSize, thumbnailSize int64
|
||||
|
||||
for iter.Scan(&fileID, &state, &fileSize, &thumbnailSize) {
|
||||
// Only include active files in size calculation
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add both file and thumbnail sizes
|
||||
totalSize += fileSize + thumbnailSize
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to calculate total storage size by owner",
|
||||
zap.String("owner_id", ownerID.String()),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to calculate total storage size by owner: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("calculated total storage size by owner successfully",
|
||||
zap.String("owner_id", ownerID.String()),
|
||||
zap.Int64("total_size_bytes", totalSize))
|
||||
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
// GetTotalStorageSizeByUser calculates total storage size for all active files accessible to the user
|
||||
// accessibleCollectionIDs should include all collections the user owns or has access to
|
||||
func (impl *fileMetadataRepositoryImpl) GetTotalStorageSizeByUser(ctx context.Context, userID gocql.UUID, accessibleCollectionIDs []gocql.UUID) (int64, error) {
|
||||
if len(accessibleCollectionIDs) == 0 {
|
||||
// No accessible collections, return 0
|
||||
impl.Logger.Debug("no accessible collections provided for storage size calculation",
|
||||
zap.String("user_id", userID.String()))
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Create a map for efficient collection access checking
|
||||
accessibleCollections := make(map[gocql.UUID]bool)
|
||||
for _, cid := range accessibleCollectionIDs {
|
||||
accessibleCollections[cid] = true
|
||||
}
|
||||
|
||||
// Query files for the user using the user sync table
|
||||
query := `SELECT id, collection_id, state, encrypted_file_size_in_bytes, encrypted_thumbnail_size_in_bytes
|
||||
FROM maplefile.files_by_user
|
||||
WHERE user_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, userID).WithContext(ctx).Iter()
|
||||
|
||||
var totalSize int64
|
||||
var fileID, collectionID gocql.UUID
|
||||
var state string
|
||||
var fileSize, thumbnailSize int64
|
||||
|
||||
for iter.Scan(&fileID, &collectionID, &state, &fileSize, &thumbnailSize) {
|
||||
// Only include files from accessible collections
|
||||
if !accessibleCollections[collectionID] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only include active files in size calculation
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add both file and thumbnail sizes
|
||||
totalSize += fileSize + thumbnailSize
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to calculate total storage size by user",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("accessible_collections_count", len(accessibleCollectionIDs)),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to calculate total storage size by user: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("calculated total storage size by user successfully",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int("accessible_collections_count", len(accessibleCollectionIDs)),
|
||||
zap.Int64("total_size_bytes", totalSize))
|
||||
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
// GetTotalStorageSizeByCollection calculates total storage size for all active files in a specific collection
|
||||
func (impl *fileMetadataRepositoryImpl) GetTotalStorageSizeByCollection(ctx context.Context, collectionID gocql.UUID) (int64, error) {
|
||||
// Query files in the collection using the collection table
|
||||
query := `SELECT id, state, encrypted_file_size_in_bytes, encrypted_thumbnail_size_in_bytes
|
||||
FROM maplefile.files_by_collection
|
||||
WHERE collection_id = ?`
|
||||
|
||||
iter := impl.Session.Query(query, collectionID).WithContext(ctx).Iter()
|
||||
|
||||
var totalSize int64
|
||||
var fileID gocql.UUID
|
||||
var state string
|
||||
var fileSize, thumbnailSize int64
|
||||
|
||||
for iter.Scan(&fileID, &state, &fileSize, &thumbnailSize) {
|
||||
// Only include active files in size calculation
|
||||
if state != dom_file.FileStateActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add both file and thumbnail sizes
|
||||
totalSize += fileSize + thumbnailSize
|
||||
}
|
||||
|
||||
if err := iter.Close(); err != nil {
|
||||
impl.Logger.Error("failed to calculate total storage size by collection",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Error(err))
|
||||
return 0, fmt.Errorf("failed to calculate total storage size by collection: %w", err)
|
||||
}
|
||||
|
||||
impl.Logger.Debug("calculated total storage size by collection successfully",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Int64("total_size_bytes", totalSize))
|
||||
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
// GetStorageSizeBreakdownByUser provides detailed breakdown of storage usage
|
||||
// Returns owned size, shared size, and detailed collection breakdown
|
||||
func (impl *fileMetadataRepositoryImpl) GetStorageSizeBreakdownByUser(ctx context.Context, userID gocql.UUID, ownedCollectionIDs, sharedCollectionIDs []gocql.UUID) (ownedSize, sharedSize int64, collectionBreakdown map[gocql.UUID]int64, err error) {
|
||||
collectionBreakdown = make(map[gocql.UUID]int64)
|
||||
|
||||
// Calculate owned files storage size
|
||||
if len(ownedCollectionIDs) > 0 {
|
||||
ownedSize, err = impl.GetTotalStorageSizeByUser(ctx, userID, ownedCollectionIDs)
|
||||
if err != nil {
|
||||
return 0, 0, nil, fmt.Errorf("failed to calculate owned storage size: %w", err)
|
||||
}
|
||||
|
||||
// Get breakdown by owned collections
|
||||
for _, collectionID := range ownedCollectionIDs {
|
||||
size, sizeErr := impl.GetTotalStorageSizeByCollection(ctx, collectionID)
|
||||
if sizeErr != nil {
|
||||
impl.Logger.Warn("failed to get storage size for owned collection",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Error(sizeErr))
|
||||
continue
|
||||
}
|
||||
collectionBreakdown[collectionID] = size
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate shared files storage size
|
||||
if len(sharedCollectionIDs) > 0 {
|
||||
sharedSize, err = impl.GetTotalStorageSizeByUser(ctx, userID, sharedCollectionIDs)
|
||||
if err != nil {
|
||||
return 0, 0, nil, fmt.Errorf("failed to calculate shared storage size: %w", err)
|
||||
}
|
||||
|
||||
// Get breakdown by shared collections
|
||||
for _, collectionID := range sharedCollectionIDs {
|
||||
size, sizeErr := impl.GetTotalStorageSizeByCollection(ctx, collectionID)
|
||||
if sizeErr != nil {
|
||||
impl.Logger.Warn("failed to get storage size for shared collection",
|
||||
zap.String("collection_id", collectionID.String()),
|
||||
zap.Error(sizeErr))
|
||||
continue
|
||||
}
|
||||
// Note: For shared collections, this shows the total size of the collection,
|
||||
// not just the user's contribution to it
|
||||
collectionBreakdown[collectionID] = size
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Debug("calculated storage size breakdown successfully",
|
||||
zap.String("user_id", userID.String()),
|
||||
zap.Int64("owned_size_bytes", ownedSize),
|
||||
zap.Int64("shared_size_bytes", sharedSize),
|
||||
zap.Int("owned_collections_count", len(ownedCollectionIDs)),
|
||||
zap.Int("shared_collections_count", len(sharedCollectionIDs)))
|
||||
|
||||
return ownedSize, sharedSize, collectionBreakdown, nil
|
||||
}
|
||||
247
cloud/maplefile-backend/internal/repo/filemetadata/update.go
Normal file
247
cloud/maplefile-backend/internal/repo/filemetadata/update.go
Normal file
|
|
@ -0,0 +1,247 @@
|
|||
// monorepo/cloud/maplefile-backend/internal/maplefile/repo/filemetadata/update.go
|
||||
package filemetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"go.uber.org/zap"
|
||||
|
||||
dom_file "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/file"
|
||||
)
|
||||
|
||||
func (impl *fileMetadataRepositoryImpl) Update(file *dom_file.File) error {
|
||||
if file == nil {
|
||||
return fmt.Errorf("file cannot be nil")
|
||||
}
|
||||
|
||||
if !impl.isValidUUID(file.ID) {
|
||||
return fmt.Errorf("file ID is required")
|
||||
}
|
||||
|
||||
// Get existing file to compare changes
|
||||
existing, err := impl.Get(file.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get existing file: %w", err)
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return fmt.Errorf("file not found")
|
||||
}
|
||||
|
||||
// Update modified timestamp
|
||||
file.ModifiedAt = time.Now()
|
||||
|
||||
// Serialize encrypted file key
|
||||
encryptedKeyJSON, err := impl.serializeEncryptedFileKey(file.EncryptedFileKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize encrypted file key: %w", err)
|
||||
}
|
||||
|
||||
// Serialize tags
|
||||
tagsJSON, err := impl.serializeTags(file.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize tags: %w", err)
|
||||
}
|
||||
|
||||
batch := impl.Session.NewBatch(gocql.LoggedBatch)
|
||||
|
||||
// 1. Update main table
|
||||
batch.Query(`UPDATE maplefile.files_by_id SET
|
||||
collection_id = ?, owner_id = ?, encrypted_metadata = ?, encrypted_file_key = ?,
|
||||
encryption_version = ?, encrypted_hash = ?, encrypted_file_object_key = ?,
|
||||
encrypted_file_size_in_bytes = ?, encrypted_thumbnail_object_key = ?,
|
||||
encrypted_thumbnail_size_in_bytes = ?, tags = ?, created_at = ?, created_by_user_id = ?,
|
||||
modified_at = ?, modified_by_user_id = ?, version = ?, state = ?,
|
||||
tombstone_version = ?, tombstone_expiry = ?
|
||||
WHERE id = ?`,
|
||||
file.CollectionID, file.OwnerID, file.EncryptedMetadata, encryptedKeyJSON,
|
||||
file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedAt, file.ModifiedByUserID, file.Version, file.State,
|
||||
file.TombstoneVersion, file.TombstoneExpiry, file.ID)
|
||||
|
||||
// 2. Update collection table - delete old entry and insert new one
|
||||
if existing.CollectionID != file.CollectionID || existing.ModifiedAt != file.ModifiedAt {
|
||||
batch.Query(`DELETE FROM maplefile.files_by_collection
|
||||
WHERE collection_id = ? AND modified_at = ? AND id = ?`,
|
||||
existing.CollectionID, existing.ModifiedAt, file.ID)
|
||||
|
||||
batch.Query(`INSERT INTO maplefile.files_by_collection
|
||||
(collection_id, modified_at, id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CollectionID, file.ModifiedAt, file.ID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
}
|
||||
|
||||
// 3. Update owner table - delete old entry and insert new one
|
||||
if existing.OwnerID != file.OwnerID || existing.ModifiedAt != file.ModifiedAt {
|
||||
batch.Query(`DELETE FROM maplefile.files_by_owner
|
||||
WHERE owner_id = ? AND modified_at = ? AND id = ?`,
|
||||
existing.OwnerID, existing.ModifiedAt, file.ID)
|
||||
|
||||
batch.Query(`INSERT INTO maplefile.files_by_owner
|
||||
(owner_id, modified_at, id, collection_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key, encrypted_file_size_in_bytes,
|
||||
encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash, file.EncryptedFileObjectKey,
|
||||
file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
}
|
||||
|
||||
// 4. Update created_by table - only if creator changed (rare) or created date changed
|
||||
if existing.CreatedByUserID != file.CreatedByUserID || existing.CreatedAt != file.CreatedAt {
|
||||
batch.Query(`DELETE FROM maplefile.files_by_creator
|
||||
WHERE created_by_user_id = ? AND created_at = ? AND id = ?`,
|
||||
existing.CreatedByUserID, existing.CreatedAt, file.ID)
|
||||
|
||||
batch.Query(`INSERT INTO maplefile.files_by_creator
|
||||
(created_by_user_id, created_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.CreatedByUserID, file.CreatedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, file.ModifiedAt, file.ModifiedByUserID, file.Version,
|
||||
file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
}
|
||||
|
||||
// 5. Update user sync table - delete old entry and insert new one for owner
|
||||
batch.Query(`DELETE FROM maplefile.files_by_user
|
||||
WHERE user_id = ? AND modified_at = ? AND id = ?`,
|
||||
existing.OwnerID, existing.ModifiedAt, file.ID)
|
||||
|
||||
batch.Query(`INSERT INTO maplefile.files_by_user
|
||||
(user_id, modified_at, id, collection_id, owner_id, encrypted_metadata,
|
||||
encrypted_file_key, encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key, encrypted_thumbnail_size_in_bytes,
|
||||
tags, created_at, created_by_user_id, modified_by_user_id, version,
|
||||
state, tombstone_version, tombstone_expiry)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
file.OwnerID, file.ModifiedAt, file.ID, file.CollectionID, file.OwnerID,
|
||||
file.EncryptedMetadata, encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes, file.EncryptedThumbnailObjectKey,
|
||||
file.EncryptedThumbnailSizeInBytes, tagsJSON, file.CreatedAt, file.CreatedByUserID,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion, file.TombstoneExpiry)
|
||||
|
||||
// 6. Update denormalized files_by_tag_id table
|
||||
// Calculate tag changes
|
||||
oldTagsMap := make(map[gocql.UUID]bool)
|
||||
for _, tag := range existing.Tags {
|
||||
oldTagsMap[tag.ID] = true
|
||||
}
|
||||
|
||||
newTagsMap := make(map[gocql.UUID]bool)
|
||||
for _, tag := range file.Tags {
|
||||
newTagsMap[tag.ID] = true
|
||||
}
|
||||
|
||||
// Delete entries for removed tags
|
||||
for tagID := range oldTagsMap {
|
||||
if !newTagsMap[tagID] {
|
||||
impl.Logger.Debug("removing file from tag denormalized table",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("tag_id", tagID.String()))
|
||||
batch.Query(`DELETE FROM maplefile.files_by_tag_id
|
||||
WHERE tag_id = ? AND file_id = ?`,
|
||||
tagID, file.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert/Update entries for current tags
|
||||
for _, tag := range file.Tags {
|
||||
impl.Logger.Debug("updating file in tag denormalized table",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("tag_id", tag.ID.String()))
|
||||
|
||||
batch.Query(`INSERT INTO maplefile.files_by_tag_id
|
||||
(tag_id, file_id, collection_id, owner_id, encrypted_metadata, encrypted_file_key,
|
||||
encryption_version, encrypted_hash, encrypted_file_object_key,
|
||||
encrypted_file_size_in_bytes, encrypted_thumbnail_object_key,
|
||||
encrypted_thumbnail_size_in_bytes, tag_ids, created_at, created_by_user_id,
|
||||
modified_at, modified_by_user_id, version, state, tombstone_version, tombstone_expiry,
|
||||
created_from_ip_address, modified_from_ip_address, ip_anonymized_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
tag.ID, file.ID, file.CollectionID, file.OwnerID, file.EncryptedMetadata,
|
||||
encryptedKeyJSON, file.EncryptionVersion, file.EncryptedHash,
|
||||
file.EncryptedFileObjectKey, file.EncryptedFileSizeInBytes,
|
||||
file.EncryptedThumbnailObjectKey, file.EncryptedThumbnailSizeInBytes,
|
||||
tagsJSON, file.CreatedAt, file.CreatedByUserID, file.ModifiedAt,
|
||||
file.ModifiedByUserID, file.Version, file.State, file.TombstoneVersion,
|
||||
file.TombstoneExpiry,
|
||||
nil, nil, nil) // IP tracking fields not yet in domain model
|
||||
}
|
||||
|
||||
// Execute batch
|
||||
if err := impl.Session.ExecuteBatch(batch); err != nil {
|
||||
impl.Logger.Error("failed to update file",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.Error(err))
|
||||
return fmt.Errorf("failed to update file: %w", err)
|
||||
}
|
||||
|
||||
// Handle file count updates based on state changes
|
||||
wasActive := existing.State == dom_file.FileStateActive
|
||||
isActive := file.State == dom_file.FileStateActive
|
||||
|
||||
// Handle collection change for active files
|
||||
if existing.CollectionID != file.CollectionID && wasActive && isActive {
|
||||
// File moved from one collection to another while remaining active
|
||||
// Decrement old collection count
|
||||
if err := impl.CollectionRepo.DecrementFileCount(context.Background(), existing.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to decrement old collection file count",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", existing.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
// Increment new collection count
|
||||
if err := impl.CollectionRepo.IncrementFileCount(context.Background(), file.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to increment new collection file count",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", file.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
} else if wasActive && !isActive {
|
||||
// File transitioned from active to non-active (e.g., deleted)
|
||||
if err := impl.CollectionRepo.DecrementFileCount(context.Background(), existing.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to decrement collection file count",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", existing.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
} else if !wasActive && isActive {
|
||||
// File transitioned from non-active to active (e.g., restored)
|
||||
if err := impl.CollectionRepo.IncrementFileCount(context.Background(), file.CollectionID); err != nil {
|
||||
impl.Logger.Error("failed to increment collection file count",
|
||||
zap.String("file_id", file.ID.String()),
|
||||
zap.String("collection_id", file.CollectionID.String()),
|
||||
zap.Error(err))
|
||||
// Don't fail the entire operation if count update fails
|
||||
}
|
||||
}
|
||||
|
||||
impl.Logger.Info("file updated successfully",
|
||||
zap.String("file_id", file.ID.String()))
|
||||
|
||||
return nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue