Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,225 @@
package sync
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// CollectionSyncService defines the interface for collection synchronization
type CollectionSyncService interface {
Execute(ctx context.Context, input *SyncInput) (*SyncResult, error)
}
type collectionSyncService struct {
logger *zap.Logger
apiClient *client.Client
repoProvider RepositoryProvider
}
// ProvideCollectionSyncService creates a new collection sync service for Wire
func ProvideCollectionSyncService(
logger *zap.Logger,
apiClient *client.Client,
repoProvider RepositoryProvider,
) CollectionSyncService {
return &collectionSyncService{
logger: logger.Named("CollectionSyncService"),
apiClient: apiClient,
repoProvider: repoProvider,
}
}
// getCollectionRepo returns the collection repository, or an error if not initialized
func (s *collectionSyncService) getCollectionRepo() (collectionDomain.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetCollectionRepository()
if repo == nil {
return nil, fmt.Errorf("collection repository not available")
}
return repo, nil
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *collectionSyncService) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// Execute synchronizes collections from the cloud to local storage
func (s *collectionSyncService) Execute(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting collection synchronization")
// Get repositories (will fail if user not logged in)
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
s.logger.Error("Cannot sync - storage not initialized", zap.Error(err))
return nil, err
}
// Set defaults
if input == nil {
input = &SyncInput{}
}
if input.BatchSize <= 0 {
input.BatchSize = DefaultBatchSize
}
if input.MaxBatches <= 0 {
input.MaxBatches = DefaultMaxBatches
}
// Get current sync state
state, err := syncStateRepo.Get()
if err != nil {
s.logger.Error("Failed to get sync state", zap.Error(err))
return nil, err
}
result := &SyncResult{}
batchCount := 0
// Sync loop - fetch and process batches until done or max reached
for batchCount < input.MaxBatches {
// Prepare API request
syncInput := &client.SyncInput{
Cursor: state.CollectionCursor,
Limit: input.BatchSize,
}
// Fetch batch from cloud
resp, err := s.apiClient.SyncCollections(ctx, syncInput)
if err != nil {
s.logger.Error("Failed to fetch collections from cloud", zap.Error(err))
result.Errors = append(result.Errors, "failed to fetch collections: "+err.Error())
break
}
// Process each collection in the batch
for _, cloudCol := range resp.Collections {
if err := s.processCollection(ctx, cloudCol, input.Password, result); err != nil {
s.logger.Error("Failed to process collection",
zap.String("id", cloudCol.ID),
zap.Error(err))
result.Errors = append(result.Errors, "failed to process collection "+cloudCol.ID+": "+err.Error())
}
result.CollectionsProcessed++
}
// Update sync state with new cursor
state.UpdateCollectionSync(resp.NextCursor, resp.HasMore)
if err := syncStateRepo.Save(state); err != nil {
s.logger.Error("Failed to save sync state", zap.Error(err))
result.Errors = append(result.Errors, "failed to save sync state: "+err.Error())
}
batchCount++
// Check if we're done
if !resp.HasMore {
s.logger.Info("Collection sync completed - no more items")
break
}
}
s.logger.Info("Collection sync finished",
zap.Int("processed", result.CollectionsProcessed),
zap.Int("added", result.CollectionsAdded),
zap.Int("updated", result.CollectionsUpdated),
zap.Int("deleted", result.CollectionsDeleted),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// processCollection handles a single collection from the cloud
// Note: ctx and password are reserved for future use (on-demand content decryption)
func (s *collectionSyncService) processCollection(_ context.Context, cloudCol *client.Collection, _ string, result *SyncResult) error {
// Get collection repository
collectionRepo, err := s.getCollectionRepo()
if err != nil {
return err
}
// Check if collection exists locally
localCol, err := collectionRepo.Get(cloudCol.ID)
if err != nil {
return err
}
// Handle deleted collections
if cloudCol.State == collectionDomain.StateDeleted {
if localCol != nil {
if err := collectionRepo.Delete(cloudCol.ID); err != nil {
return err
}
result.CollectionsDeleted++
}
return nil
}
// The collection name comes from the API already decrypted for owned collections.
// For shared collections, it would need decryption using the key chain.
// For now, we use the name as-is from the API response.
collectionName := cloudCol.Name
// Create or update local collection
if localCol == nil {
// Create new local collection
newCol := s.mapCloudToLocal(cloudCol, collectionName)
if err := collectionRepo.Create(newCol); err != nil {
return err
}
result.CollectionsAdded++
} else {
// Update existing collection
updatedCol := s.mapCloudToLocal(cloudCol, collectionName)
updatedCol.SyncStatus = localCol.SyncStatus // Preserve local sync status
updatedCol.LastSyncedAt = time.Now()
if err := collectionRepo.Update(updatedCol); err != nil {
return err
}
result.CollectionsUpdated++
}
return nil
}
// mapCloudToLocal converts a cloud collection to local domain model
func (s *collectionSyncService) mapCloudToLocal(cloudCol *client.Collection, decryptedName string) *collectionDomain.Collection {
return &collectionDomain.Collection{
ID: cloudCol.ID,
ParentID: cloudCol.ParentID,
OwnerID: cloudCol.UserID,
EncryptedCollectionKey: cloudCol.EncryptedCollectionKey.Ciphertext,
Nonce: cloudCol.EncryptedCollectionKey.Nonce,
Name: decryptedName,
Description: cloudCol.Description,
CustomIcon: cloudCol.CustomIcon, // Custom icon (emoji or "icon:<id>")
TotalFiles: cloudCol.TotalFiles,
TotalSizeInBytes: cloudCol.TotalSizeInBytes,
PermissionLevel: cloudCol.PermissionLevel,
IsOwner: cloudCol.IsOwner,
OwnerName: cloudCol.OwnerName,
OwnerEmail: cloudCol.OwnerEmail,
SyncStatus: collectionDomain.SyncStatusCloudOnly,
LastSyncedAt: time.Now(),
State: cloudCol.State,
CreatedAt: cloudCol.CreatedAt,
ModifiedAt: cloudCol.ModifiedAt,
}
}

View file

@ -0,0 +1,254 @@
package sync
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
fileDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// FileSyncService defines the interface for file synchronization
type FileSyncService interface {
Execute(ctx context.Context, input *SyncInput) (*SyncResult, error)
}
// RepositoryProvider provides access to user-specific repositories.
// This interface allows sync services to work with dynamically initialized storage.
// The storagemanager.Manager implements this interface.
type RepositoryProvider interface {
GetFileRepository() fileDomain.Repository
GetCollectionRepository() collectionDomain.Repository
GetSyncStateRepository() syncstate.Repository
IsInitialized() bool
}
type fileSyncService struct {
logger *zap.Logger
apiClient *client.Client
repoProvider RepositoryProvider
}
// ProvideFileSyncService creates a new file sync service for Wire
func ProvideFileSyncService(
logger *zap.Logger,
apiClient *client.Client,
repoProvider RepositoryProvider,
) FileSyncService {
return &fileSyncService{
logger: logger.Named("FileSyncService"),
apiClient: apiClient,
repoProvider: repoProvider,
}
}
// getFileRepo returns the file repository, or an error if not initialized
func (s *fileSyncService) getFileRepo() (fileDomain.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetFileRepository()
if repo == nil {
return nil, fmt.Errorf("file repository not available")
}
return repo, nil
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *fileSyncService) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// Execute synchronizes files from the cloud to local storage (metadata only)
func (s *fileSyncService) Execute(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting file synchronization")
// Get repositories (will fail if user not logged in)
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
s.logger.Error("Cannot sync - storage not initialized", zap.Error(err))
return nil, err
}
// Set defaults
if input == nil {
input = &SyncInput{}
}
if input.BatchSize <= 0 {
input.BatchSize = DefaultBatchSize
}
if input.MaxBatches <= 0 {
input.MaxBatches = DefaultMaxBatches
}
// Get current sync state
state, err := syncStateRepo.Get()
if err != nil {
s.logger.Error("Failed to get sync state", zap.Error(err))
return nil, err
}
result := &SyncResult{}
batchCount := 0
// Sync loop - fetch and process batches until done or max reached
for batchCount < input.MaxBatches {
// Prepare API request
syncInput := &client.SyncInput{
Cursor: state.FileCursor,
Limit: input.BatchSize,
}
// Fetch batch from cloud
resp, err := s.apiClient.SyncFiles(ctx, syncInput)
if err != nil {
s.logger.Error("Failed to fetch files from cloud", zap.Error(err))
result.Errors = append(result.Errors, "failed to fetch files: "+err.Error())
break
}
// Process each file in the batch
for _, cloudFile := range resp.Files {
if err := s.processFile(ctx, cloudFile, input.Password, result); err != nil {
s.logger.Error("Failed to process file",
zap.String("id", cloudFile.ID),
zap.Error(err))
result.Errors = append(result.Errors, "failed to process file "+cloudFile.ID+": "+err.Error())
}
result.FilesProcessed++
}
// Update sync state with new cursor
state.UpdateFileSync(resp.NextCursor, resp.HasMore)
if err := syncStateRepo.Save(state); err != nil {
s.logger.Error("Failed to save sync state", zap.Error(err))
result.Errors = append(result.Errors, "failed to save sync state: "+err.Error())
}
batchCount++
// Check if we're done
if !resp.HasMore {
s.logger.Info("File sync completed - no more items")
break
}
}
s.logger.Info("File sync finished",
zap.Int("processed", result.FilesProcessed),
zap.Int("added", result.FilesAdded),
zap.Int("updated", result.FilesUpdated),
zap.Int("deleted", result.FilesDeleted),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// processFile handles a single file from the cloud
// Note: ctx and password are reserved for future use (on-demand content decryption)
func (s *fileSyncService) processFile(_ context.Context, cloudFile *client.File, _ string, result *SyncResult) error {
// Get file repository
fileRepo, err := s.getFileRepo()
if err != nil {
return err
}
// Check if file exists locally
localFile, err := fileRepo.Get(cloudFile.ID)
if err != nil {
return err
}
// Handle deleted files
if cloudFile.State == fileDomain.StateDeleted {
if localFile != nil {
if err := fileRepo.Delete(cloudFile.ID); err != nil {
return err
}
result.FilesDeleted++
}
return nil
}
// Create or update local file (metadata only - no content download)
if localFile == nil {
// Create new local file record
newFile := s.mapCloudToLocal(cloudFile)
if err := fileRepo.Create(newFile); err != nil {
return err
}
result.FilesAdded++
} else {
// Update existing file metadata
updatedFile := s.mapCloudToLocal(cloudFile)
// Preserve local-only fields
updatedFile.FilePath = localFile.FilePath
updatedFile.EncryptedFilePath = localFile.EncryptedFilePath
updatedFile.ThumbnailPath = localFile.ThumbnailPath
updatedFile.Name = localFile.Name
updatedFile.MimeType = localFile.MimeType
updatedFile.Metadata = localFile.Metadata
// If file has local content, it's synced; otherwise it's cloud-only
if localFile.HasLocalContent() {
updatedFile.SyncStatus = fileDomain.SyncStatusSynced
} else {
updatedFile.SyncStatus = fileDomain.SyncStatusCloudOnly
}
updatedFile.LastSyncedAt = time.Now()
if err := fileRepo.Update(updatedFile); err != nil {
return err
}
result.FilesUpdated++
}
return nil
}
// mapCloudToLocal converts a cloud file to local domain model
func (s *fileSyncService) mapCloudToLocal(cloudFile *client.File) *fileDomain.File {
return &fileDomain.File{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
OwnerID: cloudFile.UserID,
EncryptedFileKey: fileDomain.EncryptedFileKeyData{
Ciphertext: cloudFile.EncryptedFileKey.Ciphertext,
Nonce: cloudFile.EncryptedFileKey.Nonce,
},
FileKeyNonce: cloudFile.FileKeyNonce,
EncryptedMetadata: cloudFile.EncryptedMetadata,
MetadataNonce: cloudFile.MetadataNonce,
FileNonce: cloudFile.FileNonce,
EncryptedSizeInBytes: cloudFile.EncryptedSizeInBytes,
DecryptedSizeInBytes: cloudFile.DecryptedSizeInBytes,
// Local paths are empty until file is downloaded (onloaded)
EncryptedFilePath: "",
FilePath: "",
ThumbnailPath: "",
// Metadata will be decrypted when file is onloaded
Name: "",
MimeType: "",
Metadata: nil,
SyncStatus: fileDomain.SyncStatusCloudOnly, // Files start as cloud-only
LastSyncedAt: time.Now(),
State: cloudFile.State,
StorageMode: cloudFile.StorageMode,
Version: cloudFile.Version,
CreatedAt: cloudFile.CreatedAt,
ModifiedAt: cloudFile.ModifiedAt,
ThumbnailURL: cloudFile.ThumbnailURL,
}
}

View file

@ -0,0 +1,149 @@
package sync
import (
"context"
"fmt"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// Service provides unified sync operations
type Service interface {
// SyncAll synchronizes both collections and files
SyncAll(ctx context.Context, input *SyncInput) (*SyncResult, error)
// SyncCollections synchronizes collections only
SyncCollections(ctx context.Context, input *SyncInput) (*SyncResult, error)
// SyncFiles synchronizes files only
SyncFiles(ctx context.Context, input *SyncInput) (*SyncResult, error)
// GetSyncStatus returns the current sync status
GetSyncStatus(ctx context.Context) (*SyncStatus, error)
// ResetSync resets all sync state for a fresh sync
ResetSync(ctx context.Context) error
}
type service struct {
logger *zap.Logger
collectionSync CollectionSyncService
fileSync FileSyncService
repoProvider RepositoryProvider
}
// ProvideService creates a new unified sync service for Wire
func ProvideService(
logger *zap.Logger,
collectionSync CollectionSyncService,
fileSync FileSyncService,
repoProvider RepositoryProvider,
) Service {
return &service{
logger: logger.Named("SyncService"),
collectionSync: collectionSync,
fileSync: fileSync,
repoProvider: repoProvider,
}
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *service) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// SyncAll synchronizes both collections and files
func (s *service) SyncAll(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting full sync (collections + files)")
// Sync collections first
colResult, err := s.collectionSync.Execute(ctx, input)
if err != nil {
s.logger.Error("Collection sync failed during full sync", zap.Error(err))
return nil, err
}
// Sync files
fileResult, err := s.fileSync.Execute(ctx, input)
if err != nil {
s.logger.Error("File sync failed during full sync", zap.Error(err))
// Return partial result with collection data
return &SyncResult{
CollectionsProcessed: colResult.CollectionsProcessed,
CollectionsAdded: colResult.CollectionsAdded,
CollectionsUpdated: colResult.CollectionsUpdated,
CollectionsDeleted: colResult.CollectionsDeleted,
Errors: append(colResult.Errors, "file sync failed: "+err.Error()),
}, err
}
// Merge results
result := &SyncResult{
CollectionsProcessed: colResult.CollectionsProcessed,
CollectionsAdded: colResult.CollectionsAdded,
CollectionsUpdated: colResult.CollectionsUpdated,
CollectionsDeleted: colResult.CollectionsDeleted,
FilesProcessed: fileResult.FilesProcessed,
FilesAdded: fileResult.FilesAdded,
FilesUpdated: fileResult.FilesUpdated,
FilesDeleted: fileResult.FilesDeleted,
Errors: append(colResult.Errors, fileResult.Errors...),
}
s.logger.Info("Full sync completed",
zap.Int("collections_processed", result.CollectionsProcessed),
zap.Int("files_processed", result.FilesProcessed),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// SyncCollections synchronizes collections only
func (s *service) SyncCollections(ctx context.Context, input *SyncInput) (*SyncResult, error) {
return s.collectionSync.Execute(ctx, input)
}
// SyncFiles synchronizes files only
func (s *service) SyncFiles(ctx context.Context, input *SyncInput) (*SyncResult, error) {
return s.fileSync.Execute(ctx, input)
}
// GetSyncStatus returns the current sync status
func (s *service) GetSyncStatus(ctx context.Context) (*SyncStatus, error) {
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
return nil, err
}
state, err := syncStateRepo.Get()
if err != nil {
return nil, err
}
return &SyncStatus{
CollectionsSynced: state.IsCollectionSyncComplete(),
FilesSynced: state.IsFileSyncComplete(),
FullySynced: state.IsFullySynced(),
}, nil
}
// ResetSync resets all sync state for a fresh sync
func (s *service) ResetSync(ctx context.Context) error {
s.logger.Info("Resetting sync state")
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
return err
}
return syncStateRepo.Reset()
}

View file

@ -0,0 +1,39 @@
package sync
// SyncResult represents the result of a sync operation
type SyncResult struct {
// Collection sync statistics
CollectionsProcessed int `json:"collections_processed"`
CollectionsAdded int `json:"collections_added"`
CollectionsUpdated int `json:"collections_updated"`
CollectionsDeleted int `json:"collections_deleted"`
// File sync statistics
FilesProcessed int `json:"files_processed"`
FilesAdded int `json:"files_added"`
FilesUpdated int `json:"files_updated"`
FilesDeleted int `json:"files_deleted"`
// Errors encountered during sync
Errors []string `json:"errors,omitempty"`
}
// SyncInput represents input parameters for sync operations
type SyncInput struct {
BatchSize int64 `json:"batch_size,omitempty"` // Number of items per batch (default: 50)
MaxBatches int `json:"max_batches,omitempty"` // Maximum batches to process (default: 100)
Password string `json:"password"` // Required for E2EE decryption
}
// SyncStatus represents the current sync status
type SyncStatus struct {
CollectionsSynced bool `json:"collections_synced"`
FilesSynced bool `json:"files_synced"`
FullySynced bool `json:"fully_synced"`
}
// DefaultBatchSize is the default number of items to fetch per API call
const DefaultBatchSize = 50
// DefaultMaxBatches is the default maximum number of batches to process
const DefaultMaxBatches = 100