Initial commit: Open sourcing all of the Maple Open Technologies code.
This commit is contained in:
commit
755d54a99d
2010 changed files with 448675 additions and 0 deletions
254
native/desktop/maplefile/internal/service/sync/file.go
Normal file
254
native/desktop/maplefile/internal/service/sync/file.go
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
|
||||
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
|
||||
fileDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
|
||||
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
|
||||
)
|
||||
|
||||
// FileSyncService defines the interface for file synchronization
|
||||
type FileSyncService interface {
|
||||
Execute(ctx context.Context, input *SyncInput) (*SyncResult, error)
|
||||
}
|
||||
|
||||
// RepositoryProvider provides access to user-specific repositories.
|
||||
// This interface allows sync services to work with dynamically initialized storage.
|
||||
// The storagemanager.Manager implements this interface.
|
||||
type RepositoryProvider interface {
|
||||
GetFileRepository() fileDomain.Repository
|
||||
GetCollectionRepository() collectionDomain.Repository
|
||||
GetSyncStateRepository() syncstate.Repository
|
||||
IsInitialized() bool
|
||||
}
|
||||
|
||||
type fileSyncService struct {
|
||||
logger *zap.Logger
|
||||
apiClient *client.Client
|
||||
repoProvider RepositoryProvider
|
||||
}
|
||||
|
||||
// ProvideFileSyncService creates a new file sync service for Wire
|
||||
func ProvideFileSyncService(
|
||||
logger *zap.Logger,
|
||||
apiClient *client.Client,
|
||||
repoProvider RepositoryProvider,
|
||||
) FileSyncService {
|
||||
return &fileSyncService{
|
||||
logger: logger.Named("FileSyncService"),
|
||||
apiClient: apiClient,
|
||||
repoProvider: repoProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// getFileRepo returns the file repository, or an error if not initialized
|
||||
func (s *fileSyncService) getFileRepo() (fileDomain.Repository, error) {
|
||||
if !s.repoProvider.IsInitialized() {
|
||||
return nil, fmt.Errorf("storage not initialized - user must be logged in")
|
||||
}
|
||||
repo := s.repoProvider.GetFileRepository()
|
||||
if repo == nil {
|
||||
return nil, fmt.Errorf("file repository not available")
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// getSyncStateRepo returns the sync state repository, or an error if not initialized
|
||||
func (s *fileSyncService) getSyncStateRepo() (syncstate.Repository, error) {
|
||||
if !s.repoProvider.IsInitialized() {
|
||||
return nil, fmt.Errorf("storage not initialized - user must be logged in")
|
||||
}
|
||||
repo := s.repoProvider.GetSyncStateRepository()
|
||||
if repo == nil {
|
||||
return nil, fmt.Errorf("sync state repository not available")
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// Execute synchronizes files from the cloud to local storage (metadata only)
|
||||
func (s *fileSyncService) Execute(ctx context.Context, input *SyncInput) (*SyncResult, error) {
|
||||
s.logger.Info("Starting file synchronization")
|
||||
|
||||
// Get repositories (will fail if user not logged in)
|
||||
syncStateRepo, err := s.getSyncStateRepo()
|
||||
if err != nil {
|
||||
s.logger.Error("Cannot sync - storage not initialized", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set defaults
|
||||
if input == nil {
|
||||
input = &SyncInput{}
|
||||
}
|
||||
if input.BatchSize <= 0 {
|
||||
input.BatchSize = DefaultBatchSize
|
||||
}
|
||||
if input.MaxBatches <= 0 {
|
||||
input.MaxBatches = DefaultMaxBatches
|
||||
}
|
||||
|
||||
// Get current sync state
|
||||
state, err := syncStateRepo.Get()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to get sync state", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &SyncResult{}
|
||||
batchCount := 0
|
||||
|
||||
// Sync loop - fetch and process batches until done or max reached
|
||||
for batchCount < input.MaxBatches {
|
||||
// Prepare API request
|
||||
syncInput := &client.SyncInput{
|
||||
Cursor: state.FileCursor,
|
||||
Limit: input.BatchSize,
|
||||
}
|
||||
|
||||
// Fetch batch from cloud
|
||||
resp, err := s.apiClient.SyncFiles(ctx, syncInput)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to fetch files from cloud", zap.Error(err))
|
||||
result.Errors = append(result.Errors, "failed to fetch files: "+err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
// Process each file in the batch
|
||||
for _, cloudFile := range resp.Files {
|
||||
if err := s.processFile(ctx, cloudFile, input.Password, result); err != nil {
|
||||
s.logger.Error("Failed to process file",
|
||||
zap.String("id", cloudFile.ID),
|
||||
zap.Error(err))
|
||||
result.Errors = append(result.Errors, "failed to process file "+cloudFile.ID+": "+err.Error())
|
||||
}
|
||||
result.FilesProcessed++
|
||||
}
|
||||
|
||||
// Update sync state with new cursor
|
||||
state.UpdateFileSync(resp.NextCursor, resp.HasMore)
|
||||
if err := syncStateRepo.Save(state); err != nil {
|
||||
s.logger.Error("Failed to save sync state", zap.Error(err))
|
||||
result.Errors = append(result.Errors, "failed to save sync state: "+err.Error())
|
||||
}
|
||||
|
||||
batchCount++
|
||||
|
||||
// Check if we're done
|
||||
if !resp.HasMore {
|
||||
s.logger.Info("File sync completed - no more items")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("File sync finished",
|
||||
zap.Int("processed", result.FilesProcessed),
|
||||
zap.Int("added", result.FilesAdded),
|
||||
zap.Int("updated", result.FilesUpdated),
|
||||
zap.Int("deleted", result.FilesDeleted),
|
||||
zap.Int("errors", len(result.Errors)))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// processFile handles a single file from the cloud
|
||||
// Note: ctx and password are reserved for future use (on-demand content decryption)
|
||||
func (s *fileSyncService) processFile(_ context.Context, cloudFile *client.File, _ string, result *SyncResult) error {
|
||||
// Get file repository
|
||||
fileRepo, err := s.getFileRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if file exists locally
|
||||
localFile, err := fileRepo.Get(cloudFile.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle deleted files
|
||||
if cloudFile.State == fileDomain.StateDeleted {
|
||||
if localFile != nil {
|
||||
if err := fileRepo.Delete(cloudFile.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
result.FilesDeleted++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create or update local file (metadata only - no content download)
|
||||
if localFile == nil {
|
||||
// Create new local file record
|
||||
newFile := s.mapCloudToLocal(cloudFile)
|
||||
if err := fileRepo.Create(newFile); err != nil {
|
||||
return err
|
||||
}
|
||||
result.FilesAdded++
|
||||
} else {
|
||||
// Update existing file metadata
|
||||
updatedFile := s.mapCloudToLocal(cloudFile)
|
||||
// Preserve local-only fields
|
||||
updatedFile.FilePath = localFile.FilePath
|
||||
updatedFile.EncryptedFilePath = localFile.EncryptedFilePath
|
||||
updatedFile.ThumbnailPath = localFile.ThumbnailPath
|
||||
updatedFile.Name = localFile.Name
|
||||
updatedFile.MimeType = localFile.MimeType
|
||||
updatedFile.Metadata = localFile.Metadata
|
||||
|
||||
// If file has local content, it's synced; otherwise it's cloud-only
|
||||
if localFile.HasLocalContent() {
|
||||
updatedFile.SyncStatus = fileDomain.SyncStatusSynced
|
||||
} else {
|
||||
updatedFile.SyncStatus = fileDomain.SyncStatusCloudOnly
|
||||
}
|
||||
updatedFile.LastSyncedAt = time.Now()
|
||||
|
||||
if err := fileRepo.Update(updatedFile); err != nil {
|
||||
return err
|
||||
}
|
||||
result.FilesUpdated++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapCloudToLocal converts a cloud file to local domain model
|
||||
func (s *fileSyncService) mapCloudToLocal(cloudFile *client.File) *fileDomain.File {
|
||||
return &fileDomain.File{
|
||||
ID: cloudFile.ID,
|
||||
CollectionID: cloudFile.CollectionID,
|
||||
OwnerID: cloudFile.UserID,
|
||||
EncryptedFileKey: fileDomain.EncryptedFileKeyData{
|
||||
Ciphertext: cloudFile.EncryptedFileKey.Ciphertext,
|
||||
Nonce: cloudFile.EncryptedFileKey.Nonce,
|
||||
},
|
||||
FileKeyNonce: cloudFile.FileKeyNonce,
|
||||
EncryptedMetadata: cloudFile.EncryptedMetadata,
|
||||
MetadataNonce: cloudFile.MetadataNonce,
|
||||
FileNonce: cloudFile.FileNonce,
|
||||
EncryptedSizeInBytes: cloudFile.EncryptedSizeInBytes,
|
||||
DecryptedSizeInBytes: cloudFile.DecryptedSizeInBytes,
|
||||
// Local paths are empty until file is downloaded (onloaded)
|
||||
EncryptedFilePath: "",
|
||||
FilePath: "",
|
||||
ThumbnailPath: "",
|
||||
// Metadata will be decrypted when file is onloaded
|
||||
Name: "",
|
||||
MimeType: "",
|
||||
Metadata: nil,
|
||||
SyncStatus: fileDomain.SyncStatusCloudOnly, // Files start as cloud-only
|
||||
LastSyncedAt: time.Now(),
|
||||
State: cloudFile.State,
|
||||
StorageMode: cloudFile.StorageMode,
|
||||
Version: cloudFile.Version,
|
||||
CreatedAt: cloudFile.CreatedAt,
|
||||
ModifiedAt: cloudFile.ModifiedAt,
|
||||
ThumbnailURL: cloudFile.ThumbnailURL,
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue