Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,977 @@
package app
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"time"
"github.com/tyler-smith/go-bip39"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// RequestOTT requests a one-time token for login
func (a *Application) RequestOTT(email string) error {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return err
}
// Check rate limit before making request
// Note: We do NOT reset on success here - the rate limit prevents spamming
// the "request OTT" button. Users should wait between OTT requests.
if err := a.rateLimiter.Check(ratelimiter.OpRequestOTT, email); err != nil {
a.logger.Warn("OTT request rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return err
}
return a.authService.RequestOTT(a.ctx, email)
}
// Logout logs out the current user and deletes all local data (default behavior for security).
// Use LogoutWithOptions for more control over local data deletion.
func (a *Application) Logout() error {
return a.LogoutWithOptions(true) // Default to deleting local data for security
}
// LogoutWithOptions logs out the current user with control over local data deletion.
// If deleteLocalData is true, all locally cached files and metadata will be permanently deleted.
// If deleteLocalData is false, local data is preserved for faster login next time.
func (a *Application) LogoutWithOptions(deleteLocalData bool) error {
// Get session before clearing
session, _ := a.authService.GetCurrentSession(a.ctx)
var userEmail string
if session != nil {
userEmail = session.Email
}
// Stop token manager first
stopCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := a.tokenManager.Stop(stopCtx); err != nil {
a.logger.Error("Failed to stop token manager during logout", zap.Error(err))
// Continue with logout even if token manager stop failed
}
// Clear stored password from RAM
if session != nil {
if err := a.passwordStore.ClearPassword(session.Email); err != nil {
a.logger.Error("Failed to clear stored password", zap.Error(err))
} else {
a.logger.Info("Password cleared from secure RAM", zap.String("email", utils.MaskEmail(session.Email)))
}
// Clear cached master key from memory (if it exists)
if a.keyCache.HasMasterKey(session.Email) {
if err := a.keyCache.ClearMasterKey(session.Email); err != nil {
a.logger.Warn("Failed to clear cached master key", zap.Error(err))
} else {
a.logger.Info("Cached master key cleared from secure memory", zap.String("email", utils.MaskEmail(session.Email)))
}
} else {
a.logger.Debug("No cached master key to clear (expected after app restart)", zap.String("email", utils.MaskEmail(session.Email)))
}
}
// Close search index
if err := a.searchService.Close(); err != nil {
a.logger.Error("Failed to close search index during logout", zap.Error(err))
// Continue with logout even if search cleanup fails
} else {
a.logger.Info("Search index closed")
}
// Handle local data based on user preference
if deleteLocalData && userEmail != "" {
// Delete all local data permanently
if err := a.storageManager.DeleteUserData(userEmail); err != nil {
a.logger.Error("Failed to delete local user data", zap.Error(err))
// Continue with logout even if deletion fails
} else {
a.logger.Info("All local user data deleted", zap.String("email", utils.MaskEmail(userEmail)))
}
} else {
// Just cleanup storage connections (keep data on disk)
a.storageManager.Cleanup()
a.logger.Info("User storage connections closed, local data preserved")
}
// Clear session
return a.authService.Logout(a.ctx)
}
// GetLocalDataSize returns the size of locally stored data for the current user in bytes.
// This can be used to show the user how much data will be deleted on logout.
func (a *Application) GetLocalDataSize() (int64, error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return 0, nil
}
size, err := a.storageManager.GetUserDataSize(session.Email)
if err != nil {
a.logger.Warn("Failed to get local data size", zap.Error(err))
return 0, err
}
return size, nil
}
// IsLoggedIn checks if a user is logged in
func (a *Application) IsLoggedIn() (bool, error) {
return a.authService.IsLoggedIn(a.ctx)
}
// Register creates a new user account
func (a *Application) Register(input *client.RegisterInput) error {
// Validate input
if err := inputvalidation.ValidateEmail(input.Email); err != nil {
return err
}
if err := inputvalidation.ValidateDisplayName(input.FirstName, "first name"); err != nil {
return err
}
if err := inputvalidation.ValidateDisplayName(input.LastName, "last name"); err != nil {
return err
}
// Note: Password is not sent directly in RegisterInput - it's used client-side
// to derive encryption keys. The encrypted master key and salt are validated
// by their presence and format on the server side.
// Check rate limit before making request
// Note: We do NOT reset on success - registration is a one-time operation
// and keeping the rate limit prevents re-registration spam attempts.
if err := a.rateLimiter.Check(ratelimiter.OpRegister, input.Email); err != nil {
a.logger.Warn("Registration rate limited",
zap.String("email", utils.MaskEmail(input.Email)),
zap.Error(err))
return err
}
return a.authService.Register(a.ctx, input)
}
// VerifyEmail verifies the email with the verification code
func (a *Application) VerifyEmail(email, code string) error {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return err
}
if err := inputvalidation.ValidateOTT(code); err != nil {
return err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpVerifyEmail, email); err != nil {
a.logger.Warn("Email verification rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return err
}
input := &client.VerifyEmailInput{
Email: email,
Code: code,
}
err := a.authService.VerifyEmail(a.ctx, input)
if err == nil {
// Reset rate limit on success
a.rateLimiter.Reset(ratelimiter.OpVerifyEmail, email)
}
return err
}
// VerifyOTTResponse contains the OTT verification response with encrypted challenge
type VerifyOTTResponse struct {
Message string `json:"message"`
ChallengeID string `json:"challengeId"`
EncryptedChallenge string `json:"encryptedChallenge"`
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
PublicKey string `json:"publicKey"`
// KDFAlgorithm specifies which key derivation algorithm to use.
// Value: "PBKDF2-SHA256"
KDFAlgorithm string `json:"kdfAlgorithm"`
}
// VerifyOTT verifies the one-time token and returns the encrypted challenge
func (a *Application) VerifyOTT(email, ott string) (*VerifyOTTResponse, error) {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return nil, err
}
if err := inputvalidation.ValidateOTT(ott); err != nil {
return nil, err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpVerifyOTT, email); err != nil {
a.logger.Warn("OTT verification rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, err
}
resp, err := a.authService.VerifyOTT(a.ctx, email, ott)
if err != nil {
a.logger.Error("OTT verification failed", zap.Error(err))
return nil, err
}
// Reset rate limit on success
a.rateLimiter.Reset(ratelimiter.OpVerifyOTT, email)
// Get KDF algorithm from response, default to PBKDF2-SHA256
kdfAlgorithm := resp.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
return &VerifyOTTResponse{
Message: resp.Message,
ChallengeID: resp.ChallengeID,
EncryptedChallenge: resp.EncryptedChallenge,
Salt: resp.Salt,
EncryptedMasterKey: resp.EncryptedMasterKey,
EncryptedPrivateKey: resp.EncryptedPrivateKey,
PublicKey: resp.PublicKey,
KDFAlgorithm: kdfAlgorithm,
}, nil
}
// CompleteLoginInput contains the data needed to complete login
type CompleteLoginInput struct {
Email string `json:"email"`
ChallengeID string `json:"challengeId"`
DecryptedData string `json:"decryptedData"`
Password string `json:"password"`
// Encrypted user data for future password verification
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
PublicKey string `json:"publicKey"`
// KDFAlgorithm specifies which key derivation algorithm to use.
// Value: "PBKDF2-SHA256"
KDFAlgorithm string `json:"kdfAlgorithm"`
}
// CompleteLogin completes the login process with the decrypted challenge
func (a *Application) CompleteLogin(input *CompleteLoginInput) error {
// Validate input
if err := inputvalidation.ValidateEmail(input.Email); err != nil {
return err
}
if err := inputvalidation.ValidatePassword(input.Password); err != nil {
return err
}
if input.ChallengeID == "" {
return fmt.Errorf("challenge ID is required")
}
if input.DecryptedData == "" {
return fmt.Errorf("decrypted data is required")
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpCompleteLogin, input.Email); err != nil {
a.logger.Warn("Login completion rate limited",
zap.String("email", utils.MaskEmail(input.Email)),
zap.Error(err))
return err
}
clientInput := &client.CompleteLoginInput{
Email: input.Email,
ChallengeID: input.ChallengeID,
DecryptedData: input.DecryptedData,
}
_, err := a.authService.CompleteLogin(a.ctx, clientInput)
if err != nil {
a.logger.Error("Login completion failed", zap.Error(err))
return err
}
// Reset all rate limits for this user on successful login
a.rateLimiter.ResetAll(input.Email)
// Store encrypted user data in session for future password verification
session, err := a.authService.GetCurrentSession(a.ctx)
if err == nil && session != nil {
session.Salt = input.Salt
session.EncryptedMasterKey = input.EncryptedMasterKey
session.EncryptedPrivateKey = input.EncryptedPrivateKey
session.PublicKey = input.PublicKey
// Store KDF algorithm so VerifyPassword knows which algorithm to use
session.KDFAlgorithm = input.KDFAlgorithm
if session.KDFAlgorithm == "" {
session.KDFAlgorithm = e2ee.PBKDF2Algorithm
}
// Update session with encrypted data
if err := a.authService.UpdateSession(a.ctx, session); err != nil {
a.logger.Warn("Failed to update session with encrypted data", zap.Error(err))
// Continue anyway - password storage will still work
} else {
a.logger.Info("Encrypted user data stored in session for password verification")
}
}
// Store password in secure RAM
if err := a.passwordStore.StorePassword(input.Email, input.Password); err != nil {
a.logger.Error("Failed to store password in RAM", zap.Error(err))
// Don't fail login if password storage fails
} else {
a.logger.Info("Password stored securely in RAM for E2EE operations", zap.String("email", utils.MaskEmail(input.Email)))
}
// Cache master key for session to avoid re-decrypting for every file operation
if input.Salt != "" && input.EncryptedMasterKey != "" && input.Password != "" {
kdfAlgorithm := input.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
if err := a.cacheMasterKeyFromPassword(input.Email, input.Password, input.Salt, input.EncryptedMasterKey, kdfAlgorithm); err != nil {
a.logger.Warn("Failed to cache master key during login", zap.Error(err))
// Continue anyway - user can still use the app, just slower
}
}
a.logger.Info("User logged in successfully", zap.String("email", utils.MaskEmail(input.Email)))
// Initialize user-specific storage for the logged-in user
if err := a.storageManager.InitializeForUser(input.Email); err != nil {
a.logger.Error("Failed to initialize user storage", zap.Error(err))
// Don't fail login - user can still use cloud features, just not local storage
} else {
a.logger.Info("User storage initialized", zap.String("email", utils.MaskEmail(input.Email)))
}
// Initialize search index for the logged-in user
if err := a.searchService.Initialize(a.ctx, input.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
// Don't fail login if search initialization fails - it's not critical
// The app can still function without search
} else {
a.logger.Info("Search index initialized", zap.String("email", utils.MaskEmail(input.Email)))
// Rebuild search index from local data in the background
userEmail := input.Email // Capture email before goroutine
go func() {
if err := a.rebuildSearchIndexForUser(userEmail); err != nil {
a.logger.Warn("Failed to rebuild search index after login", zap.Error(err))
}
}()
}
// Start token manager for automatic token refresh
a.tokenManager.Start()
a.logger.Info("Token manager started for new session")
return nil
}
// DecryptLoginChallenge decrypts the login challenge using the user's password.
// The kdfAlgorithm parameter specifies which key derivation function to use.
// If kdfAlgorithm is empty, it defaults to "PBKDF2-SHA256".
func (a *Application) DecryptLoginChallenge(password, saltBase64, encryptedMasterKeyBase64, encryptedChallengeBase64, encryptedPrivateKeyBase64, publicKeyBase64, kdfAlgorithm string) (string, error) {
// Default to PBKDF2-SHA256
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
a.logger.Debug("Decrypting login challenge", zap.String("kdf_algorithm", kdfAlgorithm))
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(saltBase64)
if err != nil {
a.logger.Error("Failed to decode salt", zap.Error(err))
return "", fmt.Errorf("invalid salt encoding: %w", err)
}
encryptedChallenge, err := base64.StdEncoding.DecodeString(encryptedChallengeBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted challenge", zap.Error(err))
return "", fmt.Errorf("invalid challenge encoding: %w", err)
}
publicKey, err := base64.StdEncoding.DecodeString(publicKeyBase64)
if err != nil {
a.logger.Error("Failed to decode public key", zap.Error(err))
return "", fmt.Errorf("invalid public key encoding: %w", err)
}
// Decode encrypted private key
encryptedPrivateKeyCombined, err := base64.StdEncoding.DecodeString(encryptedPrivateKeyBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted private key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted private key encoding: %w", err)
}
// Decode encrypted master key
encryptedMasterKeyCombined, err := base64.StdEncoding.DecodeString(encryptedMasterKeyBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted master key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted master key encoding: %w", err)
}
// 1. Derive KEK from password and salt using PBKDF2-SHA256
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
a.logger.Error("Failed to create secure keychain", zap.Error(err), zap.String("kdf_algorithm", kdfAlgorithm))
return "", fmt.Errorf("failed to derive key from password: %w", err)
}
defer keychain.Clear()
// 2. Decrypt master key with KEK into protected memory
// Auto-detect nonce size: web frontend uses 24-byte nonces (XSalsa20), native uses 12-byte (ChaCha20)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedMasterKeyCombined)
if err != nil {
a.logger.Error("Failed to split encrypted master key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted master key format: %w", err)
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
a.logger.Error("Failed to decrypt master key", zap.Error(err), zap.String("kdf_algorithm", kdfAlgorithm))
return "", fmt.Errorf("failed to decrypt master key (wrong password?): %w", err)
}
defer masterKey.Destroy()
// 3. Decrypt private key with master key into protected memory
// Auto-detect nonce size based on the encrypted data
privateKeyNonce, privateKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedPrivateKeyCombined)
if err != nil {
a.logger.Error("Failed to split encrypted private key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted private key format: %w", err)
}
encryptedPrivateKeyStruct := &e2ee.EncryptedKey{
Ciphertext: privateKeyCiphertext,
Nonce: privateKeyNonce,
}
privateKey, err := e2ee.DecryptPrivateKeySecure(encryptedPrivateKeyStruct, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt private key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt private key: %w", err)
}
defer privateKey.Destroy()
// 4. Decrypt the challenge using the private key (NaCl anonymous box)
decryptedChallenge, err := e2ee.DecryptAnonymousBox(encryptedChallenge, publicKey, privateKey.Bytes())
if err != nil {
a.logger.Error("Failed to decrypt challenge", zap.Error(err))
return "", fmt.Errorf("failed to decrypt login challenge: %w", err)
}
// Convert decrypted challenge to base64 for sending to server
decryptedChallengeBase64 := base64.StdEncoding.EncodeToString(decryptedChallenge)
a.logger.Info("Successfully decrypted login challenge")
return decryptedChallengeBase64, nil
}
// RegistrationKeys contains all the E2EE keys needed for registration
type RegistrationKeys struct {
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
PublicKey string `json:"publicKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
EncryptedRecoveryKey string `json:"encryptedRecoveryKey"`
MasterKeyEncryptedWithRecoveryKey string `json:"masterKeyEncryptedWithRecoveryKey"`
// RecoveryMnemonic is the 12-word BIP39 mnemonic phrase that must be shown to the user
// The user MUST save this phrase securely - it's their only way to recover their account
RecoveryMnemonic string `json:"recoveryMnemonic"`
}
// RecoveryInitiateResponse contains the response from initiating account recovery
type RecoveryInitiateResponse struct {
Message string `json:"message"`
SessionID string `json:"sessionId"`
EncryptedChallenge string `json:"encryptedChallenge"`
}
// InitiateRecovery starts the account recovery process for the given email
func (a *Application) InitiateRecovery(email string) (*RecoveryInitiateResponse, error) {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return nil, err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpRequestOTT, email); err != nil {
a.logger.Warn("Recovery initiation rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, err
}
resp, err := a.authService.InitiateRecovery(a.ctx, email, "recovery_key")
if err != nil {
a.logger.Error("Recovery initiation failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery initiated successfully", zap.String("email", utils.MaskEmail(email)))
return &RecoveryInitiateResponse{
Message: resp.Message,
SessionID: resp.SessionID,
EncryptedChallenge: resp.EncryptedChallenge,
}, nil
}
// DecryptRecoveryChallengeInput contains the data needed to process recovery challenge
type DecryptRecoveryChallengeInput struct {
RecoveryMnemonic string `json:"recoveryMnemonic"`
EncryptedChallenge string `json:"encryptedChallenge"`
}
// DecryptRecoveryChallengeResult contains the result of processing recovery challenge
type DecryptRecoveryChallengeResult struct {
DecryptedChallenge string `json:"decryptedChallenge"`
IsValid bool `json:"isValid"`
}
// DecryptRecoveryChallenge validates the recovery mnemonic and processes the challenge.
// Note: The backend currently sends an unencrypted challenge (base64-encoded plaintext).
// This function validates the recovery phrase format and passes through the challenge.
// When the backend implements proper encryption, this function will decrypt the challenge.
func (a *Application) DecryptRecoveryChallenge(input *DecryptRecoveryChallengeInput) (*DecryptRecoveryChallengeResult, error) {
// Validate recovery mnemonic (must be 12 words)
if input.RecoveryMnemonic == "" {
return nil, fmt.Errorf("recovery mnemonic is required")
}
// Validate the mnemonic is a valid BIP39 phrase
if !bip39.IsMnemonicValid(input.RecoveryMnemonic) {
a.logger.Warn("Invalid recovery mnemonic format")
return nil, fmt.Errorf("invalid recovery phrase: must be 12 valid BIP39 words")
}
// Count words to ensure we have exactly 12
words := len(splitMnemonic(input.RecoveryMnemonic))
if words != 12 {
return nil, fmt.Errorf("invalid recovery phrase: must be exactly 12 words, got %d", words)
}
// Validate the encrypted challenge is present
if input.EncryptedChallenge == "" {
return nil, fmt.Errorf("encrypted challenge is required")
}
// Derive recovery key from mnemonic to validate it's a valid recovery phrase
// This also prepares for future decryption when backend implements encryption
seed := bip39.NewSeed(input.RecoveryMnemonic, "")
recoveryKey := seed[:32]
a.logger.Debug("Recovery key derived successfully",
zap.Int("key_length", len(recoveryKey)),
zap.Int("word_count", words))
// TEMPORARY WORKAROUND: Backend currently sends base64-encoded plaintext challenge
// instead of encrypted challenge. See backend TODO in recovery_initiate.go:108-113
// Until backend implements proper encryption, we just validate and pass through.
// Decode the challenge to validate it's valid base64
challengeBytes, err := base64.StdEncoding.DecodeString(input.EncryptedChallenge)
if err != nil {
a.logger.Error("Failed to decode challenge", zap.Error(err))
return nil, fmt.Errorf("invalid challenge format: %w", err)
}
// Re-encode to base64 for sending to backend
decryptedChallengeBase64 := base64.StdEncoding.EncodeToString(challengeBytes)
a.logger.Info("Recovery challenge processed successfully (backend workaround active)")
return &DecryptRecoveryChallengeResult{
DecryptedChallenge: decryptedChallengeBase64,
IsValid: true,
}, nil
}
// splitMnemonic splits a mnemonic phrase into words
func splitMnemonic(mnemonic string) []string {
var words []string
for _, word := range splitByWhitespace(mnemonic) {
if word != "" {
words = append(words, word)
}
}
return words
}
// splitByWhitespace splits a string by whitespace characters
func splitByWhitespace(s string) []string {
return splitString(s)
}
// splitString splits a string into words by spaces
func splitString(s string) []string {
var result []string
word := ""
for _, r := range s {
if r == ' ' || r == '\t' || r == '\n' || r == '\r' {
if word != "" {
result = append(result, word)
word = ""
}
} else {
word += string(r)
}
}
if word != "" {
result = append(result, word)
}
return result
}
// RecoveryVerifyResponse contains the response from verifying recovery
type RecoveryVerifyResponse struct {
Message string `json:"message"`
RecoveryToken string `json:"recoveryToken"`
CanResetCredentials bool `json:"canResetCredentials"`
}
// VerifyRecovery verifies the recovery challenge with the server
func (a *Application) VerifyRecovery(sessionID, decryptedChallenge string) (*RecoveryVerifyResponse, error) {
if sessionID == "" {
return nil, fmt.Errorf("session ID is required")
}
if decryptedChallenge == "" {
return nil, fmt.Errorf("decrypted challenge is required")
}
input := &client.RecoveryVerifyInput{
SessionID: sessionID,
DecryptedChallenge: decryptedChallenge,
}
resp, err := a.authService.VerifyRecovery(a.ctx, input)
if err != nil {
a.logger.Error("Recovery verification failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery verification successful")
return &RecoveryVerifyResponse{
Message: resp.Message,
RecoveryToken: resp.RecoveryToken,
CanResetCredentials: resp.CanResetCredentials,
}, nil
}
// CompleteRecoveryInput contains the data needed to complete account recovery
type CompleteRecoveryInput struct {
RecoveryToken string `json:"recoveryToken"`
RecoveryMnemonic string `json:"recoveryMnemonic"`
NewPassword string `json:"newPassword"`
}
// CompleteRecoveryResponse contains the response from completing recovery
type CompleteRecoveryResponse struct {
Message string `json:"message"`
Success bool `json:"success"`
}
// CompleteRecovery completes the account recovery by re-encrypting keys with a new password.
// This function:
// 1. Validates the recovery mnemonic
// 2. Derives the recovery key from the mnemonic
// 3. Generates new encryption keys with the new password
// 4. Sends the new encrypted keys to the server
func (a *Application) CompleteRecovery(input *CompleteRecoveryInput) (*CompleteRecoveryResponse, error) {
// Validate inputs
if input.RecoveryToken == "" {
return nil, fmt.Errorf("recovery token is required")
}
if input.RecoveryMnemonic == "" {
return nil, fmt.Errorf("recovery mnemonic is required")
}
if err := inputvalidation.ValidatePassword(input.NewPassword); err != nil {
return nil, err
}
// Validate the mnemonic is a valid BIP39 phrase
if !bip39.IsMnemonicValid(input.RecoveryMnemonic) {
return nil, fmt.Errorf("invalid recovery phrase: must be 12 valid BIP39 words")
}
// Count words to ensure we have exactly 12
words := len(splitMnemonic(input.RecoveryMnemonic))
if words != 12 {
return nil, fmt.Errorf("invalid recovery phrase: must be exactly 12 words, got %d", words)
}
a.logger.Info("Starting recovery completion - generating new encryption keys")
// 1. Derive recovery key from mnemonic
seed := bip39.NewSeed(input.RecoveryMnemonic, "")
recoveryKeyBytes := seed[:32]
recoveryKey, err := e2ee.NewSecureBuffer(recoveryKeyBytes)
if err != nil {
e2ee.ClearBytes(recoveryKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for recovery key: %w", err)
}
defer recoveryKey.Destroy()
e2ee.ClearBytes(recoveryKeyBytes)
// 2. Generate new salt for the new password
newSalt, err := e2ee.GenerateSalt()
if err != nil {
return nil, fmt.Errorf("failed to generate new salt: %w", err)
}
// 3. Create new keychain with PBKDF2-SHA256 (for web frontend compatibility)
newKeychain, err := e2ee.NewSecureKeyChainWithAlgorithm(input.NewPassword, newSalt, e2ee.PBKDF2Algorithm)
if err != nil {
return nil, fmt.Errorf("failed to create new keychain: %w", err)
}
defer newKeychain.Clear()
// 4. Generate new master key
masterKeyBytes, err := e2ee.GenerateMasterKey()
if err != nil {
return nil, fmt.Errorf("failed to generate new master key: %w", err)
}
masterKey, err := e2ee.NewSecureBuffer(masterKeyBytes)
if err != nil {
e2ee.ClearBytes(masterKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for master key: %w", err)
}
defer masterKey.Destroy()
e2ee.ClearBytes(masterKeyBytes)
// 5. Encrypt master key with new KEK
encryptedMasterKey, err := newKeychain.EncryptMasterKeySecretBox(masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt master key: %w", err)
}
// 6. Generate new keypair
newPublicKey, privateKeyBytes, err := e2ee.GenerateKeyPair()
if err != nil {
return nil, fmt.Errorf("failed to generate new keypair: %w", err)
}
privateKey, err := e2ee.NewSecureBuffer(privateKeyBytes)
if err != nil {
e2ee.ClearBytes(privateKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for private key: %w", err)
}
defer privateKey.Destroy()
e2ee.ClearBytes(privateKeyBytes)
// 7. Encrypt private key with master key
encryptedPrivateKey, err := e2ee.EncryptPrivateKeySecretBox(privateKey.Bytes(), masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt private key: %w", err)
}
// 8. Encrypt recovery key with master key
encryptedRecoveryKey, err := e2ee.EncryptRecoveryKeySecretBox(recoveryKey.Bytes(), masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt recovery key: %w", err)
}
// 9. Encrypt master key with recovery key (for future recovery)
masterKeyEncryptedWithRecoveryKey, err := e2ee.EncryptMasterKeyWithRecoveryKeySecretBox(masterKey.Bytes(), recoveryKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt master key with recovery key: %w", err)
}
// 10. Convert all keys to base64 for transport
newSaltBase64 := base64.StdEncoding.EncodeToString(newSalt)
newPublicKeyBase64 := base64.StdEncoding.EncodeToString(newPublicKey)
newEncryptedMasterKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedMasterKey.Nonce, encryptedMasterKey.Ciphertext),
)
newEncryptedPrivateKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedPrivateKey.Nonce, encryptedPrivateKey.Ciphertext),
)
newEncryptedRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedRecoveryKey.Nonce, encryptedRecoveryKey.Ciphertext),
)
newMasterKeyEncryptedWithRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(masterKeyEncryptedWithRecoveryKey.Nonce, masterKeyEncryptedWithRecoveryKey.Ciphertext),
)
// 11. Call API to complete recovery
apiInput := &client.RecoveryCompleteInput{
RecoveryToken: input.RecoveryToken,
NewSalt: newSaltBase64,
NewPublicKey: newPublicKeyBase64,
NewEncryptedMasterKey: newEncryptedMasterKeyBase64,
NewEncryptedPrivateKey: newEncryptedPrivateKeyBase64,
NewEncryptedRecoveryKey: newEncryptedRecoveryKeyBase64,
NewMasterKeyEncryptedWithRecoveryKey: newMasterKeyEncryptedWithRecoveryKeyBase64,
}
resp, err := a.authService.CompleteRecovery(a.ctx, apiInput)
if err != nil {
a.logger.Error("Recovery completion failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery completed successfully - new encryption keys set")
return &CompleteRecoveryResponse{
Message: resp.Message,
Success: resp.Success,
}, nil
}
// GenerateRegistrationKeys generates all E2EE keys needed for user registration.
// This function uses PBKDF2-SHA256 for key derivation and XSalsa20-Poly1305 (SecretBox)
// for symmetric encryption to ensure compatibility with the web frontend.
func (a *Application) GenerateRegistrationKeys(password string) (*RegistrationKeys, error) {
// 1. Generate salt (16 bytes for PBKDF2)
salt, err := e2ee.GenerateSalt()
if err != nil {
a.logger.Error("Failed to generate salt", zap.Error(err))
return nil, err
}
// 2. Create secure keychain using PBKDF2-SHA256 (compatible with web frontend)
// This derives KEK from password + salt using PBKDF2-SHA256 with 100,000 iterations
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, e2ee.PBKDF2Algorithm)
if err != nil {
a.logger.Error("Failed to create secure keychain", zap.Error(err))
return nil, err
}
defer keychain.Clear() // Clear sensitive data when done
// 3. Generate master key in protected memory
masterKeyBytes, err := e2ee.GenerateMasterKey()
if err != nil {
a.logger.Error("Failed to generate master key", zap.Error(err))
return nil, err
}
masterKey, err := e2ee.NewSecureBuffer(masterKeyBytes)
if err != nil {
e2ee.ClearBytes(masterKeyBytes)
a.logger.Error("Failed to create secure buffer for master key", zap.Error(err))
return nil, err
}
defer masterKey.Destroy()
e2ee.ClearBytes(masterKeyBytes)
// 4. Encrypt master key with KEK using XSalsa20-Poly1305 (SecretBox)
// This produces 24-byte nonces compatible with web frontend's libsodium
encryptedMasterKey, err := keychain.EncryptMasterKeySecretBox(masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt master key", zap.Error(err))
return nil, err
}
// 5. Generate NaCl keypair for asymmetric encryption
publicKey, privateKeyBytes, err := e2ee.GenerateKeyPair()
if err != nil {
a.logger.Error("Failed to generate keypair", zap.Error(err))
return nil, err
}
privateKey, err := e2ee.NewSecureBuffer(privateKeyBytes)
if err != nil {
e2ee.ClearBytes(privateKeyBytes)
a.logger.Error("Failed to create secure buffer for private key", zap.Error(err))
return nil, err
}
defer privateKey.Destroy()
e2ee.ClearBytes(privateKeyBytes)
// 6. Encrypt private key with master key using XSalsa20-Poly1305 (SecretBox)
encryptedPrivateKey, err := e2ee.EncryptPrivateKeySecretBox(privateKey.Bytes(), masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt private key", zap.Error(err))
return nil, err
}
// 7. Generate BIP39 mnemonic (12 words) for account recovery
// This matches the web frontend's approach for cross-platform compatibility
entropy := make([]byte, 16) // 128 bits = 12 words
if _, err := rand.Read(entropy); err != nil {
a.logger.Error("Failed to generate entropy for recovery mnemonic", zap.Error(err))
return nil, err
}
recoveryMnemonic, err := bip39.NewMnemonic(entropy)
if err != nil {
a.logger.Error("Failed to generate recovery mnemonic", zap.Error(err))
return nil, err
}
a.logger.Info("Generated 12-word recovery mnemonic")
// Convert mnemonic to seed (64 bytes via HMAC-SHA512) then take first 32 bytes
// This matches web frontend's mnemonicToRecoveryKey() function
seed := bip39.NewSeed(recoveryMnemonic, "") // Empty passphrase like web frontend
recoveryKeyBytes := seed[:32] // Use first 32 bytes as recovery key
recoveryKey, err := e2ee.NewSecureBuffer(recoveryKeyBytes)
if err != nil {
e2ee.ClearBytes(recoveryKeyBytes)
a.logger.Error("Failed to create secure buffer for recovery key", zap.Error(err))
return nil, err
}
defer recoveryKey.Destroy()
e2ee.ClearBytes(recoveryKeyBytes)
// 8. Encrypt recovery key with master key using XSalsa20-Poly1305 (SecretBox)
encryptedRecoveryKey, err := e2ee.EncryptRecoveryKeySecretBox(recoveryKey.Bytes(), masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt recovery key", zap.Error(err))
return nil, err
}
// 9. Encrypt master key with recovery key using XSalsa20-Poly1305 (SecretBox)
masterKeyEncryptedWithRecoveryKey, err := e2ee.EncryptMasterKeyWithRecoveryKeySecretBox(masterKey.Bytes(), recoveryKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt master key with recovery key", zap.Error(err))
return nil, err
}
// Convert all keys to base64 for transport
// Combine nonce and ciphertext for each encrypted key
encryptedMasterKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedMasterKey.Nonce, encryptedMasterKey.Ciphertext),
)
encryptedPrivateKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedPrivateKey.Nonce, encryptedPrivateKey.Ciphertext),
)
encryptedRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedRecoveryKey.Nonce, encryptedRecoveryKey.Ciphertext),
)
masterKeyEncryptedWithRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(masterKeyEncryptedWithRecoveryKey.Nonce, masterKeyEncryptedWithRecoveryKey.Ciphertext),
)
a.logger.Info("Successfully generated E2EE registration keys using PBKDF2-SHA256 + XSalsa20-Poly1305")
return &RegistrationKeys{
Salt: base64.StdEncoding.EncodeToString(salt),
EncryptedMasterKey: encryptedMasterKeyBase64,
PublicKey: base64.StdEncoding.EncodeToString(publicKey),
EncryptedPrivateKey: encryptedPrivateKeyBase64,
EncryptedRecoveryKey: encryptedRecoveryKeyBase64,
MasterKeyEncryptedWithRecoveryKey: masterKeyEncryptedWithRecoveryKeyBase64,
RecoveryMnemonic: recoveryMnemonic,
}, nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,444 @@
package app
import (
"encoding/json"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// DashboardData contains the formatted dashboard data for the frontend
type DashboardData struct {
Summary DashboardSummary `json:"summary"`
StorageUsageTrend StorageUsageTrend `json:"storage_usage_trend"`
RecentFiles []DashboardRecentFile `json:"recent_files"`
}
// DashboardSummary contains summary statistics
type DashboardSummary struct {
TotalFiles int `json:"total_files"`
TotalFolders int `json:"total_folders"`
StorageUsed string `json:"storage_used"`
StorageLimit string `json:"storage_limit"`
StorageUsagePercentage int `json:"storage_usage_percentage"`
}
// StorageUsageTrend contains storage usage trend data
type StorageUsageTrend struct {
Period string `json:"period"`
DataPoints []StorageTrendDataPoint `json:"data_points"`
}
// StorageTrendDataPoint represents a single data point in the storage trend
type StorageTrendDataPoint struct {
Date string `json:"date"`
Usage string `json:"usage"`
}
// DashboardRecentFile represents a recent file for dashboard display
type DashboardRecentFile struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
Name string `json:"name"`
Size string `json:"size"`
SizeInBytes int64 `json:"size_in_bytes"`
MimeType string `json:"mime_type"`
CreatedAt string `json:"created_at"`
IsDecrypted bool `json:"is_decrypted"`
SyncStatus string `json:"sync_status"`
HasLocalContent bool `json:"has_local_content"`
}
// GetDashboardData fetches and formats dashboard data from the backend
func (a *Application) GetDashboardData() (*DashboardData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
// This is important after app restarts or hot reloads
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
a.logger.Debug("Restored tokens to API client for dashboard request",
zap.String("user_id", session.UserID),
zap.Time("token_expires_at", session.ExpiresAt))
// Check if access token is about to expire or already expired
timeUntilExpiry := time.Until(session.ExpiresAt)
now := time.Now()
sessionAge := now.Sub(session.CreatedAt)
a.logger.Debug("Token status check",
zap.Time("now", now),
zap.Time("expires_at", session.ExpiresAt),
zap.Duration("time_until_expiry", timeUntilExpiry),
zap.Duration("session_age", sessionAge))
if timeUntilExpiry < 0 {
a.logger.Warn("Access token already expired, refresh should happen automatically",
zap.Duration("expired_since", -timeUntilExpiry))
} else if timeUntilExpiry < 2*time.Minute {
a.logger.Info("Access token expiring soon, refresh may be needed",
zap.Duration("time_until_expiry", timeUntilExpiry))
}
// If session is very old (more than 1 day), recommend fresh login
if sessionAge > 24*time.Hour {
a.logger.Warn("Session is very old, consider logging out and logging in again",
zap.Duration("session_age", sessionAge))
}
// Fetch dashboard data from backend
// The client will automatically refresh the token if it gets a 401
a.logger.Debug("Calling backend API for dashboard data")
resp, err := apiClient.GetDashboard(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch dashboard data",
zap.Error(err),
zap.String("error_type", fmt.Sprintf("%T", err)))
// Check if this is an unauthorized error that should trigger token refresh
if apiErr, ok := err.(*client.APIError); ok {
a.logger.Error("API Error details",
zap.Int("status", apiErr.Status),
zap.String("title", apiErr.Title),
zap.String("detail", apiErr.Detail))
}
return nil, fmt.Errorf("failed to fetch dashboard: %w", err)
}
if resp.Dashboard == nil {
return nil, fmt.Errorf("dashboard data is empty")
}
dashboard := resp.Dashboard
// Format summary data
summary := DashboardSummary{
TotalFiles: dashboard.Summary.TotalFiles,
TotalFolders: dashboard.Summary.TotalFolders,
StorageUsed: formatStorageAmount(dashboard.Summary.StorageUsed),
StorageLimit: formatStorageAmount(dashboard.Summary.StorageLimit),
StorageUsagePercentage: dashboard.Summary.StorageUsagePercentage,
}
// Format storage usage trend
dataPoints := make([]StorageTrendDataPoint, len(dashboard.StorageUsageTrend.DataPoints))
for i, dp := range dashboard.StorageUsageTrend.DataPoints {
dataPoints[i] = StorageTrendDataPoint{
Date: dp.Date,
Usage: formatStorageAmount(dp.Usage),
}
}
trend := StorageUsageTrend{
Period: dashboard.StorageUsageTrend.Period,
DataPoints: dataPoints,
}
// Get master key for decryption (needed for cloud-only files)
masterKey, cleanup, masterKeyErr := a.keyCache.GetMasterKey(session.Email)
if masterKeyErr != nil {
a.logger.Warn("Master key not available for dashboard file decryption",
zap.Error(masterKeyErr))
} else {
defer cleanup()
}
// Build a cache of collection keys for efficient decryption
// First, pre-populate from the dashboard response's collection_keys (if available)
// This avoids making additional API calls for each collection
collectionKeyCache := make(map[string][]byte) // collectionID -> decrypted collection key
if masterKeyErr == nil && len(dashboard.CollectionKeys) > 0 {
a.logger.Debug("Pre-populating collection key cache from dashboard response",
zap.Int("collection_keys_count", len(dashboard.CollectionKeys)))
for _, ck := range dashboard.CollectionKeys {
// Decode the encrypted collection key
collKeyCiphertext, decodeErr := tryDecodeBase64(ck.EncryptedCollectionKey)
if decodeErr != nil {
a.logger.Warn("Failed to decode collection key ciphertext from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decodeErr))
continue
}
collKeyNonce, decodeErr := tryDecodeBase64(ck.EncryptedCollectionKeyNonce)
if decodeErr != nil {
a.logger.Warn("Failed to decode collection key nonce from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decodeErr))
continue
}
// Handle combined ciphertext format (nonce prepended to ciphertext)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
// Decrypt the collection key with the master key
collectionKey, decryptErr := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if decryptErr != nil {
a.logger.Warn("Failed to decrypt collection key from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decryptErr))
continue
}
// Cache the decrypted collection key
collectionKeyCache[ck.CollectionID] = collectionKey
a.logger.Debug("Cached collection key from dashboard response",
zap.String("collection_id", ck.CollectionID))
}
a.logger.Info("Pre-populated collection key cache from dashboard",
zap.Int("cached_keys", len(collectionKeyCache)))
}
// Format recent files (use local data if available, otherwise decrypt from cloud)
recentFiles := make([]DashboardRecentFile, 0, len(dashboard.RecentFiles))
for _, cloudFile := range dashboard.RecentFiles {
// Debug: Log what we received from the API
a.logger.Debug("Processing dashboard recent file",
zap.String("file_id", cloudFile.ID),
zap.String("collection_id", cloudFile.CollectionID),
zap.Int("encrypted_file_key_ciphertext_len", len(cloudFile.EncryptedFileKey.Ciphertext)),
zap.Int("encrypted_file_key_nonce_len", len(cloudFile.EncryptedFileKey.Nonce)),
zap.String("encrypted_file_key_ciphertext_preview", truncateForLog(cloudFile.EncryptedFileKey.Ciphertext, 50)),
zap.Int("encrypted_metadata_len", len(cloudFile.EncryptedMetadata)))
// Default values for files not in local repository
filename := "Encrypted File"
isDecrypted := false
syncStatus := file.SyncStatusCloudOnly // Default: cloud only
hasLocalContent := false
sizeInBytes := cloudFile.EncryptedFileSizeInBytes
mimeType := "application/octet-stream"
// Check local repository for this file to get decrypted name and sync status
localFile, err := a.mustGetFileRepo().Get(cloudFile.ID)
if err == nil && localFile != nil && localFile.State != file.StateDeleted {
// File exists locally - use local data
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
// Use decrypted filename if available
if localFile.Name != "" {
filename = localFile.Name
isDecrypted = true
}
// Use decrypted mime type if available
if localFile.MimeType != "" {
mimeType = localFile.MimeType
}
// Use local size (decrypted) if available
if localFile.DecryptedSizeInBytes > 0 {
sizeInBytes = localFile.DecryptedSizeInBytes
}
} else if masterKeyErr == nil && cloudFile.EncryptedMetadata != "" {
// File not in local repo, but we have the master key - try to decrypt from cloud data
decryptedFilename, decryptedMimeType, decryptErr := a.decryptDashboardFileMetadata(
cloudFile, masterKey, collectionKeyCache, apiClient)
if decryptErr != nil {
// Log at Warn level for better visibility during troubleshooting
a.logger.Warn("Failed to decrypt dashboard file metadata",
zap.String("file_id", cloudFile.ID),
zap.String("collection_id", cloudFile.CollectionID),
zap.Int("encrypted_file_key_ciphertext_len", len(cloudFile.EncryptedFileKey.Ciphertext)),
zap.Int("encrypted_file_key_nonce_len", len(cloudFile.EncryptedFileKey.Nonce)),
zap.Error(decryptErr))
} else {
filename = decryptedFilename
mimeType = decryptedMimeType
isDecrypted = true
}
}
recentFiles = append(recentFiles, DashboardRecentFile{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
Name: filename,
Size: formatFileSize(sizeInBytes),
SizeInBytes: sizeInBytes,
MimeType: mimeType,
CreatedAt: cloudFile.CreatedAt.Format(time.RFC3339),
IsDecrypted: isDecrypted,
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
})
}
dashboardData := &DashboardData{
Summary: summary,
StorageUsageTrend: trend,
RecentFiles: recentFiles,
}
a.logger.Info("Dashboard data fetched successfully",
zap.Int("total_files", summary.TotalFiles),
zap.Int("recent_files", len(recentFiles)))
return dashboardData, nil
}
// formatStorageAmount converts StorageAmount to human-readable string
func formatStorageAmount(amount client.StorageAmount) string {
if amount.Value == 0 {
return "0 B"
}
return fmt.Sprintf("%.2f %s", amount.Value, amount.Unit)
}
// formatFileSize converts bytes to human-readable format
func formatFileSize(bytes int64) string {
if bytes == 0 {
return "0 B"
}
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
units := []string{"B", "KB", "MB", "GB", "TB"}
return fmt.Sprintf("%.1f %s", float64(bytes)/float64(div), units[exp+1])
}
// decryptDashboardFileMetadata decrypts file metadata for a dashboard recent file
// Collection keys should already be pre-populated in the cache from the dashboard API response
func (a *Application) decryptDashboardFileMetadata(
cloudFile client.RecentFileDashboard,
masterKey []byte,
collectionKeyCache map[string][]byte,
apiClient *client.Client,
) (filename string, mimeType string, err error) {
// Step 1: Get the collection key from cache (should be pre-populated from dashboard API response)
collectionKey, exists := collectionKeyCache[cloudFile.CollectionID]
if !exists {
// Collection key was not provided by the dashboard API - this shouldn't happen
// but we log a warning for debugging
a.logger.Warn("Collection key not found in cache - dashboard API should have provided it",
zap.String("collection_id", cloudFile.CollectionID),
zap.String("file_id", cloudFile.ID))
return "", "", fmt.Errorf("collection key not available for collection %s", cloudFile.CollectionID)
}
// Step 2: Get the file's encrypted_file_key
// First try using the dashboard data, but if empty, fetch from the file endpoint directly
var fileKeyCiphertext, fileKeyNonce []byte
if cloudFile.EncryptedFileKey.Ciphertext != "" && cloudFile.EncryptedFileKey.Nonce != "" {
// Use data from dashboard response
var decodeErr error
fileKeyCiphertext, decodeErr = tryDecodeBase64(cloudFile.EncryptedFileKey.Ciphertext)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key ciphertext: %w", decodeErr)
}
fileKeyNonce, decodeErr = tryDecodeBase64(cloudFile.EncryptedFileKey.Nonce)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key nonce: %w", decodeErr)
}
} else {
// Dashboard response has empty encrypted_file_key, fetch from file endpoint
// This endpoint properly deserializes the encrypted_file_key through the repository
a.logger.Debug("Dashboard encrypted_file_key is empty, fetching from file endpoint",
zap.String("file_id", cloudFile.ID))
file, fetchErr := apiClient.GetFile(a.ctx, cloudFile.ID)
if fetchErr != nil {
return "", "", fmt.Errorf("failed to fetch file %s: %w", cloudFile.ID, fetchErr)
}
if file.EncryptedFileKey.Ciphertext == "" || file.EncryptedFileKey.Nonce == "" {
return "", "", fmt.Errorf("file endpoint also returned empty encrypted_file_key for file %s", cloudFile.ID)
}
var decodeErr error
fileKeyCiphertext, decodeErr = tryDecodeBase64(file.EncryptedFileKey.Ciphertext)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key ciphertext from file endpoint: %w", decodeErr)
}
fileKeyNonce, decodeErr = tryDecodeBase64(file.EncryptedFileKey.Nonce)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key nonce from file endpoint: %w", decodeErr)
}
}
// Handle combined ciphertext format for file key
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return "", "", fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 3: Decrypt the file metadata with the file key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(cloudFile.EncryptedMetadata)
if err != nil {
return "", "", fmt.Errorf("failed to decode encrypted metadata: %w", err)
}
// Split nonce and ciphertext from the combined metadata (auto-detect nonce size)
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", "", fmt.Errorf("failed to split metadata nonce/ciphertext: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", "", fmt.Errorf("failed to decrypt metadata: %w", err)
}
// Step 4: Parse the decrypted metadata JSON
var metadata struct {
Name string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", "", fmt.Errorf("failed to parse metadata JSON: %w", err)
}
return metadata.Name, metadata.MimeType, nil
}
// truncateForLog truncates a string for logging purposes
func truncateForLog(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View file

@ -0,0 +1,451 @@
package app
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
sysRuntime "runtime"
"strings"
"time"
"unicode"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
)
// =============================================================================
// EXPORT TYPES AND UTILITIES
// =============================================================================
// ExportError represents an error that occurred during export
type ExportError struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
CollectionID string `json:"collection_id"`
ErrorMessage string `json:"error_message"`
Timestamp string `json:"timestamp"`
}
// ExportEstimate provides an estimate of what will be exported
type ExportEstimate struct {
TotalCollections int `json:"total_collections"`
OwnedCollections int `json:"owned_collections"`
SharedCollections int `json:"shared_collections"`
TotalFiles int `json:"total_files"`
TotalSizeBytes int64 `json:"total_size_bytes"`
LocalFilesCount int `json:"local_files_count"`
CloudOnlyCount int `json:"cloud_only_count"`
EstimatedTime string `json:"estimated_time"`
}
// UserProfileExport represents exported user profile data
type UserProfileExport struct {
ID string `json:"id"`
Email string `json:"email"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Name string `json:"name"`
Phone string `json:"phone,omitempty"`
Country string `json:"country,omitempty"`
Timezone string `json:"timezone,omitempty"`
CreatedAt string `json:"created_at"`
ExportedAt string `json:"exported_at"`
}
// CollectionExportData represents a single collection in the export
type CollectionExportData struct {
ID string `json:"id"`
Name string `json:"name"`
CollectionType string `json:"collection_type"`
ParentID string `json:"parent_id,omitempty"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
IsShared bool `json:"is_shared"`
}
// CollectionsExport represents all exported collections
type CollectionsExport struct {
OwnedCollections []*CollectionExportData `json:"owned_collections"`
SharedCollections []*CollectionExportData `json:"shared_collections"`
TotalCount int `json:"total_count"`
ExportedAt string `json:"exported_at"`
}
// FileExportData represents a single file's metadata in the export
type FileExportData struct {
ID string `json:"id"`
Filename string `json:"filename"`
MimeType string `json:"mime_type"`
SizeBytes int64 `json:"size_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
CollectionID string `json:"collection_id"`
CollectionName string `json:"collection_name"`
}
// FilesMetadataExport represents all exported file metadata
type FilesMetadataExport struct {
Files []*FileExportData `json:"files"`
TotalCount int `json:"total_count"`
TotalSize int64 `json:"total_size_bytes"`
ExportedAt string `json:"exported_at"`
}
// FileExportResult represents the result of exporting a single file
type FileExportResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
SourceType string `json:"source_type"`
SizeBytes int64 `json:"size_bytes"`
DestPath string `json:"dest_path"`
Success bool `json:"success"`
ErrorMessage string `json:"error_message,omitempty"`
}
// ExportSummary is the final summary of the export operation
type ExportSummary struct {
ExportedAt string `json:"exported_at"`
ExportPath string `json:"export_path"`
TotalCollections int `json:"total_collections"`
OwnedCollections int `json:"owned_collections"`
SharedCollections int `json:"shared_collections"`
TotalFiles int `json:"total_files"`
FilesExported int `json:"files_exported"`
FilesCopiedLocal int `json:"files_copied_local"`
FilesDownloaded int `json:"files_downloaded"`
FilesFailed int `json:"files_failed"`
TotalSizeBytes int64 `json:"total_size_bytes"`
Errors []ExportError `json:"errors,omitempty"`
}
// =============================================================================
// EXPORT SETUP OPERATIONS
// =============================================================================
// SelectExportDirectory opens a dialog for the user to select an export directory
func (a *Application) SelectExportDirectory() (string, error) {
// Get user's home directory as default
homeDir, err := os.UserHomeDir()
if err != nil {
homeDir = ""
}
dir, err := runtime.OpenDirectoryDialog(a.ctx, runtime.OpenDialogOptions{
DefaultDirectory: homeDir,
Title: "Select Export Directory",
CanCreateDirectories: true,
ShowHiddenFiles: false,
TreatPackagesAsDirectories: false,
})
if err != nil {
a.logger.Error("Failed to open directory dialog", zap.Error(err))
return "", fmt.Errorf("failed to open directory dialog: %w", err)
}
return dir, nil
}
// GetExportEstimate returns an estimate of what will be exported
func (a *Application) GetExportEstimate() (*ExportEstimate, error) {
a.logger.Info("Getting export estimate")
// Get current session
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("not authenticated: %w", err)
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get dashboard for storage stats
dashResp, err := apiClient.GetDashboard(a.ctx)
if err != nil {
a.logger.Warn("Failed to get dashboard for estimate", zap.Error(err))
}
// Get owned collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
// Get shared collections
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
// Count files and check local availability
totalFiles := 0
localFilesCount := 0
cloudOnlyCount := 0
var totalSizeBytes int64 = 0
allCollections := append(ownedCollections, sharedCollections...)
for _, coll := range allCollections {
totalFiles += coll.TotalFiles
}
// Check local file repository for files with decrypted content available
// We check for FilePath (decrypted file) since that's what we copy during export
localFiles, err := a.mustGetFileRepo().List()
if err == nil {
for _, f := range localFiles {
if f.FilePath != "" {
localFilesCount++
totalSizeBytes += f.DecryptedSizeInBytes
}
}
}
cloudOnlyCount = totalFiles - localFilesCount
if cloudOnlyCount < 0 {
cloudOnlyCount = 0
}
// Note: Dashboard has storage in formatted units (e.g., "1.5 GB")
// We use our calculated totalSizeBytes instead for accuracy
_ = dashResp // Suppress unused variable warning if dashboard call failed
// Estimate time based on file count and sizes
estimatedTime := "Less than a minute"
if cloudOnlyCount > 0 {
// Rough estimate: 1 file per second for cloud downloads
seconds := cloudOnlyCount
if seconds > 60 {
minutes := seconds / 60
if minutes > 60 {
estimatedTime = fmt.Sprintf("About %d hours", minutes/60)
} else {
estimatedTime = fmt.Sprintf("About %d minutes", minutes)
}
} else {
estimatedTime = fmt.Sprintf("About %d seconds", seconds)
}
}
estimate := &ExportEstimate{
TotalCollections: len(allCollections),
OwnedCollections: len(ownedCollections),
SharedCollections: len(sharedCollections),
TotalFiles: totalFiles,
TotalSizeBytes: totalSizeBytes,
LocalFilesCount: localFilesCount,
CloudOnlyCount: cloudOnlyCount,
EstimatedTime: estimatedTime,
}
a.logger.Info("Export estimate calculated",
zap.Int("total_collections", estimate.TotalCollections),
zap.Int("total_files", estimate.TotalFiles),
zap.Int("local_files", estimate.LocalFilesCount),
zap.Int("cloud_only", estimate.CloudOnlyCount))
return estimate, nil
}
// CreateExportDirectory creates the export directory with timestamp
func (a *Application) CreateExportDirectory(basePath string) (string, error) {
timestamp := time.Now().Format("2006-01-02_15-04-05")
exportDir := filepath.Join(basePath, fmt.Sprintf("MapleFile_Export_%s", timestamp))
if err := os.MkdirAll(exportDir, 0755); err != nil {
return "", fmt.Errorf("failed to create export directory: %w", err)
}
// Create subdirectories
filesDir := filepath.Join(exportDir, "files")
if err := os.MkdirAll(filesDir, 0755); err != nil {
return "", fmt.Errorf("failed to create files directory: %w", err)
}
return exportDir, nil
}
// OpenExportFolder opens the export folder in the system file manager
func (a *Application) OpenExportFolder(path string) error {
// Security: Validate the path before passing to exec.Command
if path == "" {
return fmt.Errorf("path cannot be empty")
}
// Get absolute path and clean it
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("invalid path: %w", err)
}
absPath = filepath.Clean(absPath)
// Verify the path exists and is a directory
info, err := os.Stat(absPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("path does not exist: %s", absPath)
}
return fmt.Errorf("failed to access path: %w", err)
}
if !info.IsDir() {
return fmt.Errorf("path is not a directory: %s", absPath)
}
a.logger.Info("Opening export folder",
zap.String("path", absPath))
var cmd *exec.Cmd
switch sysRuntime.GOOS {
case "darwin":
cmd = exec.Command("open", absPath)
case "windows":
cmd = exec.Command("explorer", absPath)
case "linux":
cmd = exec.Command("xdg-open", absPath)
default:
return fmt.Errorf("unsupported operating system: %s", sysRuntime.GOOS)
}
return cmd.Start()
}
// =============================================================================
// HELPER FUNCTIONS
// =============================================================================
// sanitizeFilename removes or replaces characters that are invalid in filenames.
// This function provides defense-in-depth against path traversal attacks by:
// 1. Extracting only the base filename (removing any path components)
// 2. Handling special directory references (. and ..)
// 3. Removing control characters
// 4. Replacing invalid filesystem characters
// 5. Handling Windows reserved names
// 6. Limiting filename length
func sanitizeFilename(name string) string {
// Step 1: Extract only the base filename to prevent path traversal
// This handles cases like "../../../etc/passwd" -> "passwd"
name = filepath.Base(name)
// Step 2: Handle special directory references
if name == "." || name == ".." || name == "" {
return "unnamed"
}
// Step 3: Trim leading/trailing whitespace and dots
// Windows doesn't allow filenames ending with dots or spaces
name = strings.TrimSpace(name)
name = strings.Trim(name, ".")
if name == "" {
return "unnamed"
}
// Step 4: Remove control characters (ASCII 0-31)
result := make([]rune, 0, len(name))
for _, r := range name {
if r < 32 || !unicode.IsPrint(r) {
continue // Skip control characters
}
result = append(result, r)
}
name = string(result)
// Step 5: Replace invalid filesystem characters
// These are invalid on Windows: \ / : * ? " < > |
// Forward/back slashes are also dangerous for path traversal
replacer := map[rune]rune{
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '\'',
'<': '(',
'>': ')',
'|': '-',
}
result = make([]rune, 0, len(name))
for _, r := range name {
if replacement, ok := replacer[r]; ok {
result = append(result, replacement)
} else {
result = append(result, r)
}
}
name = string(result)
// Step 6: Handle Windows reserved names
// These names are reserved regardless of extension: CON, PRN, AUX, NUL,
// COM1-COM9, LPT1-LPT9
upperName := strings.ToUpper(name)
// Extract name without extension for comparison
nameWithoutExt := upperName
if idx := strings.LastIndex(upperName, "."); idx > 0 {
nameWithoutExt = upperName[:idx]
}
reservedNames := map[string]bool{
"CON": true, "PRN": true, "AUX": true, "NUL": true,
"COM1": true, "COM2": true, "COM3": true, "COM4": true,
"COM5": true, "COM6": true, "COM7": true, "COM8": true, "COM9": true,
"LPT1": true, "LPT2": true, "LPT3": true, "LPT4": true,
"LPT5": true, "LPT6": true, "LPT7": true, "LPT8": true, "LPT9": true,
}
if reservedNames[nameWithoutExt] {
name = "_" + name
}
// Step 7: Limit filename length
// Most filesystems support 255 bytes; we use 200 to leave room for path
const maxFilenameLength = 200
if len(name) > maxFilenameLength {
// Try to preserve the extension
ext := filepath.Ext(name)
if len(ext) < maxFilenameLength-10 {
nameWithoutExt := name[:len(name)-len(ext)]
if len(nameWithoutExt) > maxFilenameLength-len(ext) {
nameWithoutExt = nameWithoutExt[:maxFilenameLength-len(ext)]
}
name = nameWithoutExt + ext
} else {
name = name[:maxFilenameLength]
}
}
// Final check
if name == "" {
return "unnamed"
}
return name
}
// copyFile copies a file from src to dst
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
if err != nil {
return err
}
return destFile.Sync()
}

View file

@ -0,0 +1,204 @@
package app
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"go.uber.org/zap"
)
// =============================================================================
// EXPORT DATA OPERATIONS (Profile, Collections, Metadata)
// =============================================================================
// ExportUserProfile exports the user's profile data
func (a *Application) ExportUserProfile(exportPath string) (*UserProfileExport, error) {
a.logger.Info("Exporting user profile", zap.String("export_path", exportPath))
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("not authenticated: %w", err)
}
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get user profile
me, err := apiClient.GetMe(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get user profile: %w", err)
}
profile := &UserProfileExport{
ID: me.ID,
Email: me.Email,
FirstName: me.FirstName,
LastName: me.LastName,
Name: me.Name,
Phone: me.Phone,
Country: me.Country,
Timezone: me.Timezone,
CreatedAt: me.CreatedAt.Format(time.RFC3339),
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
profilePath := filepath.Join(exportPath, "profile.json")
data, err := json.MarshalIndent(profile, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal profile: %w", err)
}
if err := os.WriteFile(profilePath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write profile file: %w", err)
}
a.logger.Info("User profile exported successfully", zap.String("path", profilePath))
return profile, nil
}
// ExportCollections exports all collections (owned and shared)
func (a *Application) ExportCollections(exportPath string) (*CollectionsExport, error) {
a.logger.Info("Exporting collections", zap.String("export_path", exportPath))
// Get owned collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
// Get shared collections
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
// Convert to export format
ownedExport := make([]*CollectionExportData, len(ownedCollections))
for i, c := range ownedCollections {
ownedExport[i] = &CollectionExportData{
ID: c.ID,
Name: c.Name,
CollectionType: c.CollectionType,
ParentID: c.ParentID,
FileCount: c.TotalFiles,
CreatedAt: c.CreatedAt,
ModifiedAt: c.ModifiedAt,
IsShared: false,
}
}
sharedExport := make([]*CollectionExportData, len(sharedCollections))
for i, c := range sharedCollections {
sharedExport[i] = &CollectionExportData{
ID: c.ID,
Name: c.Name,
CollectionType: c.CollectionType,
ParentID: c.ParentID,
FileCount: c.TotalFiles,
CreatedAt: c.CreatedAt,
ModifiedAt: c.ModifiedAt,
IsShared: true,
}
}
export := &CollectionsExport{
OwnedCollections: ownedExport,
SharedCollections: sharedExport,
TotalCount: len(ownedExport) + len(sharedExport),
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
collectionsPath := filepath.Join(exportPath, "collections.json")
data, err := json.MarshalIndent(export, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal collections: %w", err)
}
if err := os.WriteFile(collectionsPath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write collections file: %w", err)
}
a.logger.Info("Collections exported successfully",
zap.String("path", collectionsPath),
zap.Int("owned", len(ownedExport)),
zap.Int("shared", len(sharedExport)))
return export, nil
}
// ExportAllFilesMetadata exports metadata for all files in all collections
func (a *Application) ExportAllFilesMetadata(exportPath string) (*FilesMetadataExport, error) {
a.logger.Info("Exporting all files metadata", zap.String("export_path", exportPath))
// Get all collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
allCollections := append(ownedCollections, sharedCollections...)
allFiles := make([]*FileExportData, 0)
var totalSize int64 = 0
// Get files for each collection
for _, coll := range allCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
allFiles = append(allFiles, &FileExportData{
ID: f.ID,
Filename: f.Filename,
MimeType: f.ContentType,
SizeBytes: f.Size,
CreatedAt: f.CreatedAt,
ModifiedAt: f.ModifiedAt,
CollectionID: coll.ID,
CollectionName: coll.Name,
})
totalSize += f.Size
}
}
export := &FilesMetadataExport{
Files: allFiles,
TotalCount: len(allFiles),
TotalSize: totalSize,
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
metadataPath := filepath.Join(exportPath, "files_metadata.json")
data, err := json.MarshalIndent(export, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal files metadata: %w", err)
}
if err := os.WriteFile(metadataPath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write files metadata file: %w", err)
}
a.logger.Info("Files metadata exported successfully",
zap.String("path", metadataPath),
zap.Int("total_files", len(allFiles)),
zap.Int64("total_size", totalSize))
return export, nil
}

View file

@ -0,0 +1,346 @@
package app
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
)
// =============================================================================
// EXPORT FILE CONTENT OPERATIONS
// =============================================================================
// ExportFileContent exports a single file - copies from local if available, otherwise downloads from cloud
func (a *Application) ExportFileContent(fileID string, collectionName string, exportPath string) (*FileExportResult, error) {
a.logger.Debug("Exporting file content",
zap.String("file_id", fileID),
zap.String("collection_name", collectionName))
result := &FileExportResult{
FileID: fileID,
Success: false,
}
// Create collection folder in export path
// Sanitize collection name for filesystem
safeName := sanitizeFilename(collectionName)
collectionDir := filepath.Join(exportPath, "files", safeName)
if err := os.MkdirAll(collectionDir, 0755); err != nil {
result.ErrorMessage = fmt.Sprintf("failed to create directory: %v", err)
return result, nil
}
// Check if file exists locally with decrypted content (FilePath must be non-empty)
// Note: HasLocalContent() returns true for encrypted-only files, but we need the
// decrypted FilePath to copy, not EncryptedFilePath
localFile, err := a.mustGetFileRepo().Get(fileID)
if err == nil && localFile != nil && localFile.FilePath != "" {
// File exists locally with decrypted content - copy it
result.Filename = localFile.Name
result.SourceType = "local"
result.SizeBytes = localFile.DecryptedSizeInBytes
destPath := filepath.Join(collectionDir, sanitizeFilename(localFile.Name))
result.DestPath = destPath
// Copy the file
if err := copyFile(localFile.FilePath, destPath); err != nil {
result.ErrorMessage = fmt.Sprintf("failed to copy local file: %v", err)
return result, nil
}
result.Success = true
a.logger.Debug("File copied from local storage",
zap.String("file_id", fileID),
zap.String("dest", destPath))
return result, nil
}
// File not available locally - download from cloud and save directly to export path
result.SourceType = "cloud"
// Download and decrypt file directly to export directory
filename, fileSize, err := a.downloadFileToPath(fileID, collectionDir)
if err != nil {
result.ErrorMessage = fmt.Sprintf("failed to download file: %v", err)
return result, nil
}
result.Filename = filename
result.SizeBytes = fileSize
result.DestPath = filepath.Join(collectionDir, sanitizeFilename(filename))
result.Success = true
a.logger.Debug("File downloaded from cloud",
zap.String("file_id", fileID),
zap.String("dest", result.DestPath))
return result, nil
}
// downloadFileToPath downloads and decrypts a file directly to a specified directory.
// Returns the filename, file size, and error. This is used for bulk exports without user dialog.
func (a *Application) downloadFileToPath(fileID string, destDir string) (string, int64, error) {
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return "", 0, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
return "", 0, fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
// Use SDK client which has automatic token refresh on 401
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata using SDK (has automatic 401 retry)
fileData, err := apiClient.GetFile(a.ctx, fileID)
if err != nil {
return "", 0, fmt.Errorf("failed to get file: %w", err)
}
// Step 2: Get collection to decrypt collection key using SDK (has automatic 401 retry)
collData, err := apiClient.GetCollection(a.ctx, fileData.CollectionID)
if err != nil {
return "", 0, fmt.Errorf("failed to get collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// SDK returns EncryptedCollectionKey as an object with Ciphertext and Nonce fields
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return "", 0, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return "", 0, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return "", 0, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return "", 0, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return "", 0, fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", 0, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", 0, fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL using SDK (has automatic 401 retry)
downloadResp, err := apiClient.GetPresignedDownloadURL(a.ctx, fileID)
if err != nil {
return "", 0, fmt.Errorf("failed to get download URL: %w", err)
}
// Step 7: Download encrypted file from S3 using SDK helper
encryptedContent, err := apiClient.DownloadFromPresignedURL(a.ctx, downloadResp.FileURL)
if err != nil {
return "", 0, fmt.Errorf("failed to download file: %w", err)
}
// Step 8: Decrypt file content
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt file: %w", err)
}
// Step 9: Write decrypted content to destination
destPath := filepath.Join(destDir, sanitizeFilename(metadata.Filename))
if err := os.WriteFile(destPath, decryptedContent, 0600); err != nil {
return "", 0, fmt.Errorf("failed to save file: %w", err)
}
return metadata.Filename, int64(len(decryptedContent)), nil
}
// ExportAllFiles exports all files from all collections
func (a *Application) ExportAllFiles(exportPath string, progressCallback func(current, total int, filename string)) (*ExportSummary, error) {
a.logger.Info("Exporting all files", zap.String("export_path", exportPath))
summary := &ExportSummary{
ExportedAt: time.Now().Format(time.RFC3339),
ExportPath: exportPath,
Errors: make([]ExportError, 0),
}
// Get all collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
summary.OwnedCollections = len(ownedCollections)
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
summary.SharedCollections = len(sharedCollections)
summary.TotalCollections = summary.OwnedCollections + summary.SharedCollections
// Build list of all files to export
type fileToExport struct {
fileID string
collectionID string
collectionName string
}
allFilesToExport := make([]fileToExport, 0)
// Collect files from owned collections
for _, coll := range ownedCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
allFilesToExport = append(allFilesToExport, fileToExport{
fileID: f.ID,
collectionID: coll.ID,
collectionName: coll.Name,
})
}
}
// Collect files from shared collections
for _, coll := range sharedCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for shared collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
// Prefix shared collection names to distinguish them
collName := "Shared - " + coll.Name
allFilesToExport = append(allFilesToExport, fileToExport{
fileID: f.ID,
collectionID: coll.ID,
collectionName: collName,
})
}
}
summary.TotalFiles = len(allFilesToExport)
// Export each file
for i, f := range allFilesToExport {
if progressCallback != nil {
progressCallback(i+1, summary.TotalFiles, f.collectionName)
}
result, err := a.ExportFileContent(f.fileID, f.collectionName, exportPath)
if err != nil || !result.Success {
summary.FilesFailed++
errMsg := "unknown error"
if err != nil {
errMsg = err.Error()
} else if result.ErrorMessage != "" {
errMsg = result.ErrorMessage
}
summary.Errors = append(summary.Errors, ExportError{
FileID: f.fileID,
Filename: result.Filename,
CollectionID: f.collectionID,
ErrorMessage: errMsg,
Timestamp: time.Now().Format(time.RFC3339),
})
continue
}
summary.FilesExported++
summary.TotalSizeBytes += result.SizeBytes
if result.SourceType == "local" {
summary.FilesCopiedLocal++
} else {
summary.FilesDownloaded++
}
}
// Save export manifest
manifestPath := filepath.Join(exportPath, "export_manifest.json")
manifestData, err := json.MarshalIndent(summary, "", " ")
if err != nil {
a.logger.Warn("Failed to marshal export manifest", zap.Error(err))
} else {
if err := os.WriteFile(manifestPath, manifestData, 0644); err != nil {
a.logger.Warn("Failed to write export manifest", zap.Error(err))
}
}
a.logger.Info("Export completed",
zap.Int("files_exported", summary.FilesExported),
zap.Int("files_failed", summary.FilesFailed),
zap.Int("copied_local", summary.FilesCopiedLocal),
zap.Int("downloaded", summary.FilesDownloaded))
return summary, nil
}

View file

@ -0,0 +1,610 @@
package app
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// =============================================================================
// FILE QUERY OPERATIONS
// =============================================================================
// EmbeddedTagData represents a tag attached to a file (for display purposes)
type EmbeddedTagData struct {
ID string `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
}
// FileDetailData represents detailed file information for the frontend
type FileDetailData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
Filename string `json:"filename"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
// Sync status fields
SyncStatus string `json:"sync_status"`
HasLocalContent bool `json:"has_local_content"`
LocalFilePath string `json:"local_file_path,omitempty"`
// Tags assigned to this file
Tags []*EmbeddedTagData `json:"tags"`
}
// ListFilesByCollection lists all files in a collection
func (a *Application) ListFilesByCollection(collectionID string) ([]*FileData, error) {
// Validate input
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return nil, err
}
apiClient := a.authService.GetAPIClient()
files, err := apiClient.ListFilesByCollection(a.ctx, collectionID)
if err != nil {
a.logger.Error("Failed to list files",
zap.String("collection_id", collectionID),
zap.Error(err))
return nil, fmt.Errorf("failed to list files: %w", err)
}
// Get collection key for decrypting file metadata (needed for cloud-only files)
var collectionKey []byte
collectionKeyReady := false
// Lazy-load collection key only when needed
getCollectionKey := func() ([]byte, error) {
if collectionKeyReady {
return collectionKey, nil
}
// Get session for master key
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get session: %w", err)
}
// Get master key from cache
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Define a custom response struct that matches the actual backend API
// (the client SDK's Collection struct has EncryptedCollectionKey as string)
type collectionAPIResponse struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
// Make direct HTTP request to get collection
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+collectionID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
accessToken, _ := apiClient.GetTokens()
req.Header.Set("Authorization", "Bearer "+accessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to fetch collection: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch collection: status %d", resp.StatusCode)
}
var collection collectionAPIResponse
if err := json.NewDecoder(resp.Body).Decode(&collection); err != nil {
return nil, fmt.Errorf("failed to decode collection response: %w", err)
}
// Decode collection key components
// Use tryDecodeBase64 to handle multiple base64 encoding formats
keyCiphertext, err := tryDecodeBase64(collection.EncryptedCollectionKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
keyNonce, err := tryDecodeBase64(collection.EncryptedCollectionKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualKeyCiphertext := extractActualCiphertext(keyCiphertext, keyNonce)
// Decrypt collection key with master key
collectionKey, err = e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualKeyCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
collectionKeyReady = true
return collectionKey, nil
}
result := make([]*FileData, 0, len(files))
for _, cloudFile := range files {
// Skip deleted files - don't show them in the GUI
if cloudFile.State == file.StateDeleted {
continue
}
// Default values
filename := "Encrypted File"
contentType := "application/octet-stream"
fileSize := cloudFile.EncryptedSizeInBytes
// Check local repository for sync status
syncStatus := file.SyncStatusCloudOnly // Default: cloud only (from API)
hasLocalContent := false
localFilePath := ""
localFile, err := a.mustGetFileRepo().Get(cloudFile.ID)
if err == nil && localFile != nil {
// Skip if local file is marked as deleted
if localFile.State == file.StateDeleted {
continue
}
// File exists in local repo - use local data
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
localFilePath = localFile.FilePath
// Use decrypted data from local storage
if localFile.Name != "" {
filename = localFile.Name
}
if localFile.MimeType != "" {
contentType = localFile.MimeType
}
if localFile.DecryptedSizeInBytes > 0 {
fileSize = localFile.DecryptedSizeInBytes
}
} else {
// File not in local repo - decrypt metadata from cloud
colKey, err := getCollectionKey()
if err != nil {
a.logger.Warn("Failed to get collection key for metadata decryption",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
// Continue with placeholder values
} else {
// Decrypt file key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyCiphertext, err := tryDecodeBase64(cloudFile.EncryptedFileKey.Ciphertext)
if err != nil {
a.logger.Warn("Failed to decode file key ciphertext",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
fileKeyNonce, err := tryDecodeBase64(cloudFile.EncryptedFileKey.Nonce)
if err != nil {
a.logger.Warn("Failed to decode file key nonce",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, colKey)
if err != nil {
a.logger.Warn("Failed to decrypt file key",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Decrypt metadata
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(cloudFile.EncryptedMetadata)
if err != nil {
a.logger.Warn("Failed to decode encrypted metadata",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
a.logger.Warn("Failed to split metadata nonce/ciphertext",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
a.logger.Warn("Failed to decrypt metadata",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Parse decrypted metadata JSON
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
a.logger.Warn("Failed to parse metadata JSON",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Successfully decrypted - use actual values
if metadata.Filename != "" {
filename = metadata.Filename
}
if metadata.MimeType != "" {
contentType = metadata.MimeType
}
if metadata.Size > 0 {
fileSize = metadata.Size
}
}
}
}
}
}
}
}
}
}
// Process embedded tags from the API response
// The backend includes tags in the list response, so we decrypt them here
// instead of making separate API calls per file
embeddedTags := make([]*EmbeddedTagData, 0, len(cloudFile.Tags))
if len(cloudFile.Tags) > 0 {
// Get master key for tag decryption (we need it for each file with tags)
// Note: This is inside the file loop, so we get a fresh key reference for each file
session, err := a.authService.GetCurrentSession(a.ctx)
if err == nil {
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err == nil {
// Decrypt each embedded tag
for _, tagData := range cloudFile.Tags {
// Convert to client.Tag format for decryption
clientTag := &client.Tag{
ID: tagData.ID,
EncryptedName: tagData.EncryptedName,
EncryptedColor: tagData.EncryptedColor,
EncryptedTagKey: tagData.EncryptedTagKey,
}
// Decrypt the tag
decryptedTag, err := a.decryptTag(clientTag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt embedded tag for file, skipping",
zap.String("file_id", cloudFile.ID),
zap.String("tag_id", tagData.ID),
zap.Error(err))
continue
}
embeddedTags = append(embeddedTags, &EmbeddedTagData{
ID: decryptedTag.ID,
Name: decryptedTag.Name,
Color: decryptedTag.Color,
})
}
cleanup()
} else {
a.logger.Debug("Failed to get master key for tag decryption, skipping tags",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
}
} else {
a.logger.Debug("Failed to get session for tag decryption, skipping tags",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
}
}
result = append(result, &FileData{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
Filename: filename,
Size: fileSize,
ContentType: contentType,
CreatedAt: cloudFile.CreatedAt.Format(time.RFC3339),
ModifiedAt: cloudFile.ModifiedAt.Format(time.RFC3339),
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
LocalFilePath: localFilePath,
Tags: embeddedTags,
})
}
a.logger.Info("Listed files",
zap.String("collection_id", collectionID),
zap.Int("count", len(result)))
return result, nil
}
// GetFile retrieves a single file's details by ID
func (a *Application) GetFile(fileID string) (*FileDetailData, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
// Get current session
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get the cached master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Make HTTP request to get file details
// Note: Backend uses /api/v1/file/{id} (singular) not /api/v1/files/{id}
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
a.logger.Error("Failed to create get file request", zap.Error(err))
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
a.logger.Error("Failed to send get file request", zap.Error(err))
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to get file",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to get file: %s", string(body))
}
// Parse response
var fileResp struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
Tags []struct {
ID string `json:"id"`
EncryptedName string `json:"encrypted_name"`
EncryptedColor string `json:"encrypted_color"`
EncryptedTagKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_tag_key"`
} `json:"tags,omitempty"`
}
if err := json.NewDecoder(resp.Body).Decode(&fileResp); err != nil {
a.logger.Error("Failed to decode file response", zap.Error(err))
return nil, fmt.Errorf("failed to decode response: %w", err)
}
// Now we need to get the collection to decrypt the file key
// First get the collection's encrypted collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileResp.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create get collection request", zap.Error(err))
return nil, fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
a.logger.Error("Failed to get collection for file", zap.Error(err))
return nil, fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
a.logger.Error("Failed to get collection",
zap.Int("status", collResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
// Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode collection key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode collection key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileResp.EncryptedFileKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode file key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileResp.EncryptedFileKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode file key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
a.logger.Error("Failed to decrypt file key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Decrypt file metadata with file key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(fileResp.EncryptedMetadata)
if err != nil {
a.logger.Error("Failed to decode encrypted metadata", zap.Error(err))
return nil, fmt.Errorf("failed to decode metadata: %w", err)
}
// Split nonce and ciphertext (auto-detect nonce size: 12 for ChaCha20, 24 for XSalsa20)
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
a.logger.Error("Failed to split metadata nonce/ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
a.logger.Error("Failed to decrypt file metadata", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt metadata: %w", err)
}
// Parse decrypted metadata JSON
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
a.logger.Error("Failed to parse file metadata", zap.Error(err))
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
// Check local repository for sync status
syncStatus := file.SyncStatusCloudOnly // Default: cloud only
hasLocalContent := false
localFilePath := ""
localFile, err := a.mustGetFileRepo().Get(fileResp.ID)
if err == nil && localFile != nil {
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
localFilePath = localFile.FilePath
}
// Process embedded tags from the API response
embeddedTags := make([]*EmbeddedTagData, 0, len(fileResp.Tags))
for _, tagData := range fileResp.Tags {
// Convert the embedded tag structure to client.Tag format for decryption
clientTag := &client.Tag{
ID: tagData.ID,
EncryptedName: tagData.EncryptedName,
EncryptedColor: tagData.EncryptedColor,
EncryptedTagKey: &client.EncryptedTagKey{
Ciphertext: tagData.EncryptedTagKey.Ciphertext,
Nonce: tagData.EncryptedTagKey.Nonce,
},
}
// Decrypt the tag using the existing decryptTag helper
decryptedTag, err := a.decryptTag(clientTag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt embedded tag, skipping",
zap.String("file_id", fileResp.ID),
zap.String("tag_id", tagData.ID),
zap.Error(err))
continue
}
embeddedTags = append(embeddedTags, &EmbeddedTagData{
ID: decryptedTag.ID,
Name: decryptedTag.Name,
Color: decryptedTag.Color,
})
a.logger.Debug("Decrypted embedded tag for file",
zap.String("file_id", fileResp.ID),
zap.String("tag_id", decryptedTag.ID),
zap.String("name", decryptedTag.Name),
zap.String("color", decryptedTag.Color))
}
return &FileDetailData{
ID: fileResp.ID,
CollectionID: fileResp.CollectionID,
Filename: metadata.Filename,
MimeType: metadata.MimeType,
Size: metadata.Size,
EncryptedFileSizeInBytes: fileResp.EncryptedFileSizeInBytes,
CreatedAt: fileResp.CreatedAt,
ModifiedAt: fileResp.ModifiedAt,
Version: fileResp.Version,
State: fileResp.State,
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
LocalFilePath: localFilePath,
Tags: embeddedTags,
}, nil
}

View file

@ -0,0 +1,191 @@
package app
import (
"fmt"
"os"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// =============================================================================
// FILE CLEANUP OPERATIONS
// =============================================================================
// DeleteFile soft-deletes a file from both the cloud and local storage
func (a *Application) DeleteFile(fileID string) error {
a.logger.Info("DeleteFile called", zap.String("file_id", fileID))
// Use the SDK client which has automatic token refresh on 401
apiClient := a.authService.GetAPIClient()
// Use SDK's DeleteFile method which has automatic 401 retry
if err := apiClient.DeleteFile(a.ctx, fileID); err != nil {
a.logger.Error("Failed to delete file from cloud",
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to delete file: %w", err)
}
// Cloud delete succeeded - now clean up local data
a.cleanupLocalFile(fileID)
a.logger.Info("File deleted successfully", zap.String("file_id", fileID))
return nil
}
// cleanupLocalFile removes physical binary files immediately and marks the metadata as deleted.
// The metadata record is kept for background cleanup later.
func (a *Application) cleanupLocalFile(fileID string) {
// Get the local file record
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil || localFile == nil {
a.logger.Debug("No local file record to clean up", zap.String("file_id", fileID))
return
}
// IMMEDIATELY delete physical binary files
// Delete the physical decrypted file if it exists
if localFile.FilePath != "" {
if err := os.Remove(localFile.FilePath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local decrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local decrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath))
}
}
// Delete the physical encrypted file if it exists
if localFile.EncryptedFilePath != "" {
if err := os.Remove(localFile.EncryptedFilePath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local encrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.EncryptedFilePath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local encrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.EncryptedFilePath))
}
}
// Delete the thumbnail if it exists
if localFile.ThumbnailPath != "" {
if err := os.Remove(localFile.ThumbnailPath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local thumbnail",
zap.String("file_id", fileID),
zap.String("path", localFile.ThumbnailPath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local thumbnail",
zap.String("file_id", fileID),
zap.String("path", localFile.ThumbnailPath))
}
}
// Mark the metadata record as deleted (will be cleaned up later by background process)
// Clear the file paths since the physical files are now deleted
localFile.State = file.StateDeleted
localFile.FilePath = ""
localFile.EncryptedFilePath = ""
localFile.ThumbnailPath = ""
localFile.ModifiedAt = time.Now()
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Warn("Failed to mark local file metadata as deleted",
zap.String("file_id", fileID),
zap.Error(err))
} else {
a.logger.Info("Marked local file metadata as deleted (will be cleaned up later)",
zap.String("file_id", fileID))
// Remove from search index
if err := a.searchService.DeleteFile(fileID); err != nil {
a.logger.Warn("Failed to remove file from search index",
zap.String("file_id", fileID),
zap.Error(err))
}
}
}
// purgeDeletedFileMetadata permanently removes a deleted file's metadata record.
// This is called by the background cleanup process after a retention period.
func (a *Application) purgeDeletedFileMetadata(fileID string) {
if err := a.mustGetFileRepo().Delete(fileID); err != nil {
a.logger.Warn("Failed to purge deleted file metadata",
zap.String("file_id", fileID),
zap.Error(err))
} else {
a.logger.Info("Purged deleted file metadata",
zap.String("file_id", fileID))
}
}
// deletedFileRetentionPeriod is how long to keep deleted file metadata before purging.
// This allows for potential recovery or sync conflict resolution.
const deletedFileRetentionPeriod = 7 * 24 * time.Hour // 7 days
// cleanupDeletedFiles runs in the background to clean up deleted files.
// It handles two cases:
// 1. Files marked as deleted that still have physical files (cleans up binaries immediately)
// 2. Files marked as deleted past the retention period (purges metadata)
func (a *Application) cleanupDeletedFiles() {
a.logger.Info("Starting background cleanup of deleted files")
// Get all local files
localFiles, err := a.mustGetFileRepo().List()
if err != nil {
a.logger.Error("Failed to list local files for cleanup", zap.Error(err))
return
}
binaryCleanedCount := 0
metadataPurgedCount := 0
now := time.Now()
for _, localFile := range localFiles {
// Only process deleted files
if localFile.State != file.StateDeleted {
continue
}
// Check if there are still physical files to clean up
if localFile.FilePath != "" || localFile.EncryptedFilePath != "" || localFile.ThumbnailPath != "" {
a.logger.Info("Cleaning up orphaned binary files for deleted record",
zap.String("file_id", localFile.ID))
a.cleanupLocalFile(localFile.ID)
binaryCleanedCount++
continue
}
// Check if metadata is past retention period and can be purged
if now.Sub(localFile.ModifiedAt) > deletedFileRetentionPeriod {
a.logger.Info("Purging deleted file metadata (past retention period)",
zap.String("file_id", localFile.ID),
zap.Time("deleted_at", localFile.ModifiedAt))
a.purgeDeletedFileMetadata(localFile.ID)
metadataPurgedCount++
}
}
if binaryCleanedCount > 0 || metadataPurgedCount > 0 {
a.logger.Info("Background cleanup completed",
zap.Int("binaries_cleaned", binaryCleanedCount),
zap.Int("metadata_purged", metadataPurgedCount))
} else {
a.logger.Debug("Background cleanup completed, no cleanup needed")
}
}

View file

@ -0,0 +1,880 @@
package app
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
sysRuntime "runtime"
"strings"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// =============================================================================
// FILE DOWNLOAD OPERATIONS
// =============================================================================
// tryDecodeBase64 attempts to decode a base64 string using multiple encoding variants.
// The web frontend uses URL-safe base64 without padding (libsodium default),
// while Go typically uses standard base64 with padding.
func tryDecodeBase64(s string) ([]byte, error) {
var lastErr error
// Try URL-safe base64 without padding FIRST (libsodium's URLSAFE_NO_PADDING)
// This is the format used by the web frontend
if data, err := base64.RawURLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 with padding (Go's default)
if data, err := base64.StdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 without padding
if data, err := base64.RawStdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try URL-safe base64 with padding
if data, err := base64.URLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
return nil, fmt.Errorf("failed to decode base64 with any encoding variant (input length: %d, first 50 chars: %s, last error: %w)", len(s), truncateString(s, 50), lastErr)
}
// truncateString truncates a string to the specified length
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
// GetFileDownloadURL gets a presigned download URL for a file
func (a *Application) GetFileDownloadURL(fileID string) (string, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return "", err
}
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return "", fmt.Errorf("not authenticated: %w", err)
}
apiClient := a.authService.GetAPIClient()
// Make the HTTP GET request for download URL
// Note: Backend uses singular "file" not plural "files" in the path
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID+"/download-url", nil)
if err != nil {
a.logger.Error("Failed to create download URL request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
a.logger.Error("Failed to send download URL request", zap.Error(err))
return "", fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to get download URL",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return "", fmt.Errorf("failed to get download URL: %s", string(body))
}
// Response structure matches backend's GetPresignedDownloadURLResponseDTO
var urlResp struct {
PresignedDownloadURL string `json:"presigned_download_url"`
DownloadURLExpirationTime string `json:"download_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&urlResp); err != nil {
a.logger.Error("Failed to decode download URL response", zap.Error(err))
return "", fmt.Errorf("failed to decode response: %w", err)
}
return urlResp.PresignedDownloadURL, nil
}
// DownloadFile downloads, decrypts, and saves a file to the user's chosen location.
// If the file already exists locally, it copies from local storage instead of re-downloading.
func (a *Application) DownloadFile(fileID string) (string, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return "", err
}
a.logger.Info("Starting file download", zap.String("file_id", fileID))
// First, check if file already exists locally
localFile, err := a.mustGetFileRepo().Get(fileID)
if err == nil && localFile != nil && localFile.FilePath != "" {
// Check if local file actually exists on disk
if _, statErr := os.Stat(localFile.FilePath); statErr == nil {
a.logger.Info("File exists locally, using local copy",
zap.String("file_id", fileID),
zap.String("local_path", localFile.FilePath))
// Open save dialog for user to choose location
savePath, dialogErr := runtime.SaveFileDialog(a.ctx, runtime.SaveDialogOptions{
Title: "Save File As",
DefaultFilename: localFile.Name,
})
if dialogErr != nil {
return "", fmt.Errorf("failed to open save dialog: %w", dialogErr)
}
// User cancelled the dialog
if savePath == "" {
a.logger.Info("User cancelled save dialog")
return "", nil
}
// Copy local file to chosen location
srcFile, copyErr := os.Open(localFile.FilePath)
if copyErr != nil {
return "", fmt.Errorf("failed to open local file: %w", copyErr)
}
defer srcFile.Close()
dstFile, copyErr := os.Create(savePath)
if copyErr != nil {
return "", fmt.Errorf("failed to create destination file: %w", copyErr)
}
defer dstFile.Close()
if _, copyErr := io.Copy(dstFile, srcFile); copyErr != nil {
return "", fmt.Errorf("failed to copy file: %w", copyErr)
}
a.logger.Info("File saved from local copy",
zap.String("file_id", fileID),
zap.String("save_path", savePath))
return savePath, nil
}
}
// File not available locally, download from cloud
a.logger.Info("File not available locally, downloading from cloud", zap.String("file_id", fileID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return "", fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return "", fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata
fileReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
a.logger.Error("Failed to create get file request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
fileReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
fileResp, err := a.httpClient.Do(fileReq)
if err != nil {
a.logger.Error("Failed to get file metadata", zap.Error(err))
return "", fmt.Errorf("failed to get file: %w", err)
}
defer fileResp.Body.Close()
if fileResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(fileResp.Body)
a.logger.Error("Failed to get file", zap.Int("status", fileResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to get file: %s", string(body))
}
var fileData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
}
if err := json.NewDecoder(fileResp.Body).Decode(&fileData); err != nil {
a.logger.Error("Failed to decode file response", zap.Error(err))
return "", fmt.Errorf("failed to decode response: %w", err)
}
// Step 2: Get collection to decrypt collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileData.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create get collection request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
a.logger.Error("Failed to get collection", zap.Error(err))
return "", fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
a.logger.Error("Failed to get collection", zap.Int("status", collResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return "", fmt.Errorf("failed to decode collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return "", fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return "", fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return "", fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return "", fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
a.logger.Info("Decrypting file key",
zap.Int("nonce_size", len(fileKeyNonce)),
zap.Int("ciphertext_size", len(actualFileKeyCiphertext)),
zap.Int("collection_key_size", len(collectionKey)))
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
a.logger.Error("Failed to decrypt file key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt file key: %w", err)
}
a.logger.Info("File key decrypted successfully", zap.Int("file_key_size", len(fileKey)))
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle URL-safe base64 without padding (libsodium format)
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return "", fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL
downloadURL, err := a.GetFileDownloadURL(fileID)
if err != nil {
return "", fmt.Errorf("failed to get download URL: %w", err)
}
// Step 6.5: Validate download URL before use (SSRF protection)
if err := inputvalidation.ValidateDownloadURL(downloadURL); err != nil {
a.logger.Error("Download URL validation failed",
zap.String("file_id", fileID),
zap.Error(err))
return "", fmt.Errorf("download URL validation failed: %w", err)
}
// Step 7: Download encrypted file from S3 (use large download client - no timeout for big files)
a.logger.Info("Downloading encrypted file from S3", zap.String("filename", metadata.Filename))
downloadResp, err := a.httpClient.GetLargeDownload(downloadURL)
if err != nil {
a.logger.Error("Failed to download file from S3", zap.Error(err))
return "", fmt.Errorf("failed to download file: %w", err)
}
defer downloadResp.Body.Close()
if downloadResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(downloadResp.Body)
a.logger.Error("S3 download failed", zap.Int("status", downloadResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to download file from storage: status %d", downloadResp.StatusCode)
}
encryptedContent, err := io.ReadAll(downloadResp.Body)
if err != nil {
a.logger.Error("Failed to read encrypted content", zap.Error(err))
return "", fmt.Errorf("failed to read file content: %w", err)
}
a.logger.Info("Downloaded encrypted file", zap.Int("encrypted_size", len(encryptedContent)))
// Step 8: Decrypt file content
a.logger.Info("Decrypting file content",
zap.Int("encrypted_size", len(encryptedContent)),
zap.Int("file_key_size", len(fileKey)),
zap.Int("first_bytes_of_content", int(encryptedContent[0])))
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
a.logger.Error("Failed to decrypt file content",
zap.Error(err),
zap.Int("encrypted_size", len(encryptedContent)),
zap.Int("file_key_size", len(fileKey)))
return "", fmt.Errorf("failed to decrypt file: %w", err)
}
a.logger.Info("File content decrypted successfully",
zap.Int("decrypted_size", len(decryptedContent)))
a.logger.Info("Decrypted file", zap.Int("decrypted_size", len(decryptedContent)))
// Step 9: Open save dialog for user to choose location
savePath, err := runtime.SaveFileDialog(a.ctx, runtime.SaveDialogOptions{
Title: "Save File As",
DefaultFilename: metadata.Filename,
})
if err != nil {
a.logger.Error("Failed to open save dialog", zap.Error(err))
return "", fmt.Errorf("failed to open save dialog: %w", err)
}
// User cancelled the dialog
if savePath == "" {
a.logger.Info("User cancelled save dialog")
return "", nil
}
// Step 10: Write decrypted content to file (0600 = owner read/write only for security)
if err := os.WriteFile(savePath, decryptedContent, 0600); err != nil {
a.logger.Error("Failed to write file", zap.Error(err), zap.String("path", savePath))
return "", fmt.Errorf("failed to save file: %w", err)
}
a.logger.Info("File downloaded and decrypted successfully",
zap.String("file_id", fileID),
zap.String("filename", metadata.Filename),
zap.String("save_path", savePath),
zap.Int("size", len(decryptedContent)))
return savePath, nil
}
// OnloadFileResult represents the result of onloading a file for offline access
type OnloadFileResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
LocalFilePath string `json:"local_file_path"`
Size int64 `json:"size"`
Success bool `json:"success"`
Message string `json:"message"`
}
// OnloadFile downloads and stores a file locally for offline access (no save dialog)
func (a *Application) OnloadFile(fileID string) (*OnloadFileResult, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
a.logger.Info("Onloading file for offline access", zap.String("file_id", fileID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return nil, fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata
fileReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
fileReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
fileResp, err := a.httpClient.Do(fileReq)
if err != nil {
return nil, fmt.Errorf("failed to get file: %w", err)
}
defer fileResp.Body.Close()
if fileResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(fileResp.Body)
return nil, fmt.Errorf("failed to get file: %s", string(body))
}
var fileData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
FileNonce string `json:"file_nonce"`
EncryptedSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
}
if err := json.NewDecoder(fileResp.Body).Decode(&fileData); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
// Step 2: Get collection to decrypt collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileData.CollectionID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
return nil, fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
return nil, fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle URL-safe base64 without padding (libsodium format)
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return nil, fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL
downloadURL, err := a.GetFileDownloadURL(fileID)
if err != nil {
return nil, fmt.Errorf("failed to get download URL: %w", err)
}
// Step 6.5: Validate download URL before use (SSRF protection)
if err := inputvalidation.ValidateDownloadURL(downloadURL); err != nil {
a.logger.Error("Download URL validation failed",
zap.String("file_id", fileID),
zap.Error(err))
return nil, fmt.Errorf("download URL validation failed: %w", err)
}
// Step 7: Download encrypted file from S3 (use large download client - no timeout for big files)
a.logger.Info("Downloading encrypted file from S3", zap.String("filename", metadata.Filename))
downloadResp, err := a.httpClient.GetLargeDownload(downloadURL)
if err != nil {
return nil, fmt.Errorf("failed to download file: %w", err)
}
defer downloadResp.Body.Close()
if downloadResp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to download file from storage: status %d", downloadResp.StatusCode)
}
encryptedContent, err := io.ReadAll(downloadResp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read file content: %w", err)
}
// Step 8: Decrypt file content
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt file: %w", err)
}
// Step 9: Create local storage directory structure
dataDir, err := a.config.GetAppDataDirPath(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get data directory: %w", err)
}
// Create files directory: <data_dir>/files/<collection_id>/
filesDir := filepath.Join(dataDir, "files", fileData.CollectionID)
if err := os.MkdirAll(filesDir, 0700); err != nil {
return nil, fmt.Errorf("failed to create files directory: %w", err)
}
// Save decrypted file
localFilePath := filepath.Join(filesDir, metadata.Filename)
if err := os.WriteFile(localFilePath, decryptedContent, 0600); err != nil {
return nil, fmt.Errorf("failed to save file: %w", err)
}
// Step 10: Update local file repository with sync status
localFile := &file.File{
ID: fileID,
CollectionID: fileData.CollectionID,
Name: metadata.Filename,
MimeType: metadata.MimeType,
FilePath: localFilePath,
DecryptedSizeInBytes: int64(len(decryptedContent)),
EncryptedSizeInBytes: fileData.EncryptedSizeInBytes,
SyncStatus: file.SyncStatusSynced,
EncryptedFileKey: file.EncryptedFileKeyData{
Ciphertext: fileData.EncryptedFileKey.Ciphertext,
Nonce: fileData.EncryptedFileKey.Nonce,
},
EncryptedMetadata: fileData.EncryptedMetadata,
FileNonce: fileData.FileNonce,
}
// Check if file already exists in local repo
existingFile, _ := a.mustGetFileRepo().Get(fileID)
if existingFile != nil {
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Warn("Failed to update local file record", zap.Error(err))
}
} else {
if err := a.mustGetFileRepo().Create(localFile); err != nil {
a.logger.Warn("Failed to create local file record", zap.Error(err))
}
}
a.logger.Info("File onloaded successfully",
zap.String("file_id", fileID),
zap.String("filename", metadata.Filename),
zap.String("local_path", localFilePath),
zap.Int("size", len(decryptedContent)))
return &OnloadFileResult{
FileID: fileID,
Filename: metadata.Filename,
LocalFilePath: localFilePath,
Size: int64(len(decryptedContent)),
Success: true,
Message: "File downloaded for offline access",
}, nil
}
// OffloadFile removes the local copy of a file while keeping it in the cloud
func (a *Application) OffloadFile(fileID string) error {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
a.logger.Info("Offloading file to cloud-only", zap.String("file_id", fileID))
// Get the file from local repository
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil {
a.logger.Error("Failed to get file from local repo", zap.Error(err))
return fmt.Errorf("file not found locally: %w", err)
}
if localFile == nil {
return fmt.Errorf("file not found in local storage")
}
if !localFile.HasLocalContent() {
a.logger.Info("File already cloud-only, nothing to offload")
return nil
}
// Delete the local file from disk
if localFile.FilePath != "" {
if err := os.Remove(localFile.FilePath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local file", zap.Error(err), zap.String("path", localFile.FilePath))
// Continue anyway - we'll update the metadata
} else {
a.logger.Info("Deleted local file", zap.String("path", localFile.FilePath))
}
}
// Delete encrypted file if it exists
if localFile.EncryptedFilePath != "" {
if err := os.Remove(localFile.EncryptedFilePath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete encrypted file", zap.Error(err), zap.String("path", localFile.EncryptedFilePath))
}
}
// Delete thumbnail if it exists
if localFile.ThumbnailPath != "" {
if err := os.Remove(localFile.ThumbnailPath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete thumbnail", zap.Error(err), zap.String("path", localFile.ThumbnailPath))
}
}
// Update the local file record to cloud-only status
localFile.FilePath = ""
localFile.EncryptedFilePath = ""
localFile.ThumbnailPath = ""
localFile.SyncStatus = file.SyncStatusCloudOnly
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Error("Failed to update file record", zap.Error(err))
return fmt.Errorf("failed to update file record: %w", err)
}
a.logger.Info("File offloaded successfully",
zap.String("file_id", fileID),
zap.String("filename", localFile.Name))
return nil
}
// OpenFile opens a locally stored file with the system's default application
func (a *Application) OpenFile(fileID string) error {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
a.logger.Info("Opening file", zap.String("file_id", fileID))
// Get the file from local repository
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil {
a.logger.Error("Failed to get file from local repo", zap.Error(err))
return fmt.Errorf("file not found locally: %w", err)
}
if localFile == nil {
return fmt.Errorf("file not found in local storage")
}
if localFile.FilePath == "" {
return fmt.Errorf("file has not been downloaded for offline access")
}
// Security: Validate file path is within expected application data directory
appDataDir, err := a.config.GetAppDataDirPath(a.ctx)
if err != nil {
a.logger.Error("Failed to get app data directory", zap.Error(err))
return fmt.Errorf("failed to validate file path: %w", err)
}
if err := validatePathWithinDirectory(localFile.FilePath, appDataDir); err != nil {
a.logger.Error("File path validation failed",
zap.String("file_path", localFile.FilePath),
zap.String("expected_dir", appDataDir),
zap.Error(err))
return fmt.Errorf("invalid file path: %w", err)
}
// Check if file exists on disk
if _, err := os.Stat(localFile.FilePath); os.IsNotExist(err) {
return fmt.Errorf("file no longer exists at %s", localFile.FilePath)
}
// Open the file with the system's default application
var cmd *exec.Cmd
switch sysRuntime.GOOS {
case "darwin":
cmd = exec.Command("open", localFile.FilePath)
case "windows":
cmd = exec.Command("cmd", "/c", "start", "", localFile.FilePath)
case "linux":
cmd = exec.Command("xdg-open", localFile.FilePath)
default:
return fmt.Errorf("unsupported operating system: %s", sysRuntime.GOOS)
}
if err := cmd.Start(); err != nil {
a.logger.Error("Failed to open file", zap.Error(err), zap.String("path", localFile.FilePath))
return fmt.Errorf("failed to open file: %w", err)
}
a.logger.Info("File opened successfully",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath))
return nil
}
// validatePathWithinDirectory checks that a file path is within the expected directory.
// This is a defense-in-depth measure to prevent path traversal attacks.
func validatePathWithinDirectory(filePath, expectedDir string) error {
// Get absolute paths to handle any relative path components
absFilePath, err := filepath.Abs(filePath)
if err != nil {
return fmt.Errorf("failed to resolve file path: %w", err)
}
absExpectedDir, err := filepath.Abs(expectedDir)
if err != nil {
return fmt.Errorf("failed to resolve expected directory: %w", err)
}
// Clean paths to remove any . or .. components
absFilePath = filepath.Clean(absFilePath)
absExpectedDir = filepath.Clean(absExpectedDir)
// Ensure the expected directory ends with a separator to prevent partial matches
// e.g., /app/data should not match /app/data-other/file
if !strings.HasSuffix(absExpectedDir, string(filepath.Separator)) {
absExpectedDir = absExpectedDir + string(filepath.Separator)
}
// Check if the file path starts with the expected directory
if !strings.HasPrefix(absFilePath, absExpectedDir) && absFilePath != strings.TrimSuffix(absExpectedDir, string(filepath.Separator)) {
return fmt.Errorf("path is outside application data directory")
}
return nil
}

View file

@ -0,0 +1,401 @@
package app
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"github.com/google/uuid"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// =============================================================================
// FILE UPLOAD OPERATIONS
// =============================================================================
// SelectFile opens a native file dialog and returns the selected file path
func (a *Application) SelectFile() (string, error) {
selection, err := runtime.OpenFileDialog(a.ctx, runtime.OpenDialogOptions{
Title: "Select File to Upload",
Filters: []runtime.FileFilter{
{DisplayName: "All Files", Pattern: "*.*"},
{DisplayName: "Images", Pattern: "*.jpg;*.jpeg;*.png;*.gif;*.webp;*.bmp"},
{DisplayName: "Documents", Pattern: "*.pdf;*.doc;*.docx;*.txt;*.md"},
{DisplayName: "Videos", Pattern: "*.mp4;*.mov;*.avi;*.mkv;*.webm"},
},
})
if err != nil {
a.logger.Error("Failed to open file dialog", zap.Error(err))
return "", fmt.Errorf("failed to open file dialog: %w", err)
}
return selection, nil
}
// FileUploadInput represents the input for uploading a file
type FileUploadInput struct {
FilePath string `json:"file_path"`
CollectionID string `json:"collection_id"`
TagIDs []string `json:"tag_ids,omitempty"` // Tag IDs to assign to this file
}
// FileUploadResult represents the result of a file upload
type FileUploadResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
Size int64 `json:"size"`
Success bool `json:"success"`
Message string `json:"message"`
}
// UploadFile encrypts and uploads a file to a collection
func (a *Application) UploadFile(input FileUploadInput) (*FileUploadResult, error) {
a.logger.Info("Starting file upload",
zap.String("file_path", input.FilePath),
zap.String("collection_id", input.CollectionID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from key cache
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return nil, fmt.Errorf("master key not available - please log in again: %w", err)
}
defer cleanup()
apiClient := a.authService.GetAPIClient()
// Step 1: Read the file from disk
fileContent, err := os.ReadFile(input.FilePath)
if err != nil {
a.logger.Error("Failed to read file", zap.Error(err))
return nil, fmt.Errorf("failed to read file: %w", err)
}
filename := filepath.Base(input.FilePath)
fileSize := int64(len(fileContent))
mimeType := http.DetectContentType(fileContent)
a.logger.Info("File read successfully",
zap.String("filename", filename),
zap.Int64("size", fileSize),
zap.String("mime_type", mimeType))
// Step 2: Get collection key (need to fetch collection first)
a.logger.Info("Step 2: Fetching collection for upload",
zap.String("collection_id", input.CollectionID),
zap.String("api_url", apiClient.GetBaseURL()+"/api/v1/collections/"+input.CollectionID))
collectionReq, err := http.NewRequestWithContext(a.ctx, "GET",
apiClient.GetBaseURL()+"/api/v1/collections/"+input.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create collection request", zap.Error(err))
return nil, fmt.Errorf("failed to create collection request: %w", err)
}
collectionReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collectionResp, err := a.httpClient.Do(collectionReq)
if err != nil {
a.logger.Error("Failed to fetch collection", zap.Error(err))
return nil, fmt.Errorf("failed to fetch collection: %w", err)
}
defer collectionResp.Body.Close()
a.logger.Info("Step 2a: Collection fetch response", zap.Int("status", collectionResp.StatusCode))
if collectionResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collectionResp.Body)
a.logger.Error("Failed to fetch collection - bad status",
zap.Int("status", collectionResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to fetch collection: %s", string(body))
}
var collectionData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collectionResp.Body).Decode(&collectionData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
a.logger.Info("Step 2b: Collection data decoded",
zap.Int("ciphertext_len", len(collectionData.EncryptedCollectionKey.Ciphertext)),
zap.Int("nonce_len", len(collectionData.EncryptedCollectionKey.Nonce)))
// Decrypt collection key
collectionKeyCiphertext, err := base64.StdEncoding.DecodeString(collectionData.EncryptedCollectionKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode collection key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
collectionKeyNonce, err := base64.StdEncoding.DecodeString(collectionData.EncryptedCollectionKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode collection key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollectionKeyCiphertext := extractActualCiphertext(collectionKeyCiphertext, collectionKeyNonce)
a.logger.Info("Step 2c: Decrypting collection key",
zap.Int("ciphertext_bytes", len(actualCollectionKeyCiphertext)),
zap.Int("nonce_bytes", len(collectionKeyNonce)),
zap.Int("master_key_bytes", len(masterKey)))
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollectionKeyCiphertext,
Nonce: collectionKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
a.logger.Info("Collection key decrypted successfully", zap.Int("key_length", len(collectionKey)))
// Step 3: Generate a new file key
fileKey, err := e2ee.GenerateFileKey()
if err != nil {
return nil, fmt.Errorf("failed to generate file key: %w", err)
}
// Step 4: Encrypt file content using SecretBox (XSalsa20-Poly1305) for web frontend compatibility
encryptedContent, err := e2ee.EncryptFileSecretBox(fileContent, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt file: %w", err)
}
// Step 5: Encrypt metadata using SecretBox (XSalsa20-Poly1305) for web frontend compatibility
metadata := &e2ee.FileMetadata{
Name: filename,
MimeType: mimeType,
Size: fileSize,
}
encryptedMetadata, err := e2ee.EncryptMetadataSecretBox(metadata, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt metadata: %w", err)
}
// Step 6: Encrypt file key with collection key using SecretBox for web frontend compatibility
encryptedFileKey, err := e2ee.EncryptFileKeySecretBox(fileKey, collectionKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt file key: %w", err)
}
// Step 7: Compute encrypted hash
hash := sha256.Sum256(encryptedContent)
encryptedHash := base64.StdEncoding.EncodeToString(hash[:])
// Step 8: Generate client-side file ID
fileID := uuid.New().String()
// Step 9: Create pending file request
// NOTE: The web frontend sends ciphertext and nonce as SEPARATE fields (not combined).
// The ciphertext field contains only the encrypted data (from crypto_secretbox_easy),
// and the nonce field contains the nonce separately.
pendingFileReq := map[string]interface{}{
"id": fileID,
"collection_id": input.CollectionID,
"encrypted_metadata": encryptedMetadata,
"encrypted_file_key": map[string]string{
"ciphertext": base64.StdEncoding.EncodeToString(encryptedFileKey.Ciphertext),
"nonce": base64.StdEncoding.EncodeToString(encryptedFileKey.Nonce),
},
"encryption_version": "xsalsa20-poly1305-v1",
"encrypted_hash": encryptedHash,
"expected_file_size_in_bytes": int64(len(encryptedContent)),
"content_type": mimeType,
}
// Add tag IDs if provided
if len(input.TagIDs) > 0 {
pendingFileReq["tag_ids"] = input.TagIDs
a.logger.Info("Adding tags to file upload",
zap.Int("tag_count", len(input.TagIDs)))
}
pendingBody, err := json.Marshal(pendingFileReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal pending file request: %w", err)
}
req, err := http.NewRequestWithContext(a.ctx, "POST",
apiClient.GetBaseURL()+"/api/v1/files/pending",
bytes.NewReader(pendingBody))
if err != nil {
return nil, fmt.Errorf("failed to create pending file request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
req.Header.Set("Content-Type", "application/json")
resp, err := a.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to create pending file: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to create pending file",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to create pending file: %s", string(body))
}
var pendingResp struct {
File struct {
ID string `json:"id"`
} `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
UploadURLExpirationTime string `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&pendingResp); err != nil {
return nil, fmt.Errorf("failed to decode pending file response: %w", err)
}
if !pendingResp.Success {
return nil, fmt.Errorf("failed to create pending file: %s", pendingResp.Message)
}
a.logger.Info("Pending file created, uploading to S3",
zap.String("file_id", pendingResp.File.ID),
zap.String("presigned_url", pendingResp.PresignedUploadURL[:50]+"..."))
// Step 10: Upload encrypted content to S3
uploadReq, err := http.NewRequestWithContext(a.ctx, "PUT",
pendingResp.PresignedUploadURL,
bytes.NewReader(encryptedContent))
if err != nil {
return nil, fmt.Errorf("failed to create upload request: %w", err)
}
uploadReq.Header.Set("Content-Type", "application/octet-stream")
uploadReq.ContentLength = int64(len(encryptedContent))
uploadResp, err := a.httpClient.DoLargeDownload(uploadReq)
if err != nil {
return nil, fmt.Errorf("failed to upload to S3: %w", err)
}
defer uploadResp.Body.Close()
if uploadResp.StatusCode != http.StatusOK && uploadResp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(uploadResp.Body)
a.logger.Error("Failed to upload to S3",
zap.Int("status", uploadResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to upload to S3: status %d", uploadResp.StatusCode)
}
a.logger.Info("File uploaded to S3, completing upload")
// Step 11: Complete the upload
completeReq := map[string]interface{}{
"actual_file_size_in_bytes": int64(len(encryptedContent)),
"upload_confirmed": true,
}
completeBody, err := json.Marshal(completeReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal complete request: %w", err)
}
completeHTTPReq, err := http.NewRequestWithContext(a.ctx, "POST",
apiClient.GetBaseURL()+"/api/v1/file/"+pendingResp.File.ID+"/complete",
bytes.NewReader(completeBody))
if err != nil {
return nil, fmt.Errorf("failed to create complete request: %w", err)
}
completeHTTPReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
completeHTTPReq.Header.Set("Content-Type", "application/json")
completeResp, err := a.httpClient.Do(completeHTTPReq)
if err != nil {
return nil, fmt.Errorf("failed to complete upload: %w", err)
}
defer completeResp.Body.Close()
if completeResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(completeResp.Body)
a.logger.Error("Failed to complete upload",
zap.Int("status", completeResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to complete upload: %s", string(body))
}
var completeRespData struct {
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(completeResp.Body).Decode(&completeRespData); err != nil {
return nil, fmt.Errorf("failed to decode complete response: %w", err)
}
// Save file metadata to local repository so it appears in dashboard and file list
localFile := &file.File{
ID: pendingResp.File.ID,
CollectionID: input.CollectionID,
OwnerID: session.UserID,
Name: filename,
MimeType: mimeType,
DecryptedSizeInBytes: fileSize,
EncryptedSizeInBytes: int64(len(encryptedContent)),
FilePath: input.FilePath, // Original file path
SyncStatus: file.SyncStatusSynced,
State: file.StateActive,
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
LastSyncedAt: time.Now(),
}
if err := a.mustGetFileRepo().Create(localFile); err != nil {
// Log but don't fail - the upload succeeded, just local tracking failed
a.logger.Warn("Failed to save file to local repository",
zap.String("file_id", pendingResp.File.ID),
zap.Error(err))
} else {
a.logger.Info("File saved to local repository",
zap.String("file_id", pendingResp.File.ID),
zap.String("filename", filename))
// Index the file in the search index
if err := a.indexFileForSearch(pendingResp.File.ID, input.CollectionID, filename, input.TagIDs, fileSize); err != nil {
a.logger.Warn("Failed to index file in search",
zap.String("file_id", pendingResp.File.ID),
zap.Error(err))
}
}
a.logger.Info("File upload completed successfully",
zap.String("file_id", pendingResp.File.ID),
zap.String("filename", filename),
zap.Int64("size", fileSize))
return &FileUploadResult{
FileID: pendingResp.File.ID,
Filename: filename,
Size: fileSize,
Success: true,
Message: "File uploaded successfully",
}, nil
}

View file

@ -0,0 +1,225 @@
package app
import (
"encoding/base64"
"fmt"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// VerifyPassword verifies a password against stored encrypted data
func (a *Application) VerifyPassword(password string) (bool, error) {
// Get current session with encrypted data
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false, fmt.Errorf("no active session")
}
// Check if we have the encrypted data needed for verification
if session.Salt == "" || session.EncryptedMasterKey == "" {
return false, fmt.Errorf("session missing encrypted data for password verification")
}
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(session.Salt)
if err != nil {
a.logger.Error("Failed to decode salt", zap.Error(err))
return false, fmt.Errorf("invalid salt encoding")
}
encryptedMasterKeyBytes, err := base64.StdEncoding.DecodeString(session.EncryptedMasterKey)
if err != nil {
a.logger.Error("Failed to decode encrypted master key", zap.Error(err))
return false, fmt.Errorf("invalid master key encoding")
}
// Determine which KDF algorithm to use
kdfAlgorithm := session.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
// Try to derive KEK and decrypt master key using SecureKeyChain
// If decryption succeeds, password is correct
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
a.logger.Debug("Password verification failed - could not derive key", zap.String("email", utils.MaskEmail(session.Email)))
return false, nil // Password is incorrect, but not an error condition
}
defer keychain.Clear()
// Split nonce and ciphertext from encrypted master key
// Use auto-detection to handle both ChaCha20 (12-byte nonce) and XSalsa20 (24-byte nonce)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMasterKeyBytes)
if err != nil {
a.logger.Error("Failed to split encrypted master key", zap.Error(err))
return false, fmt.Errorf("invalid master key format")
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
// Try to decrypt the master key into protected memory
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
a.logger.Debug("Password verification failed - incorrect password", zap.String("email", utils.MaskEmail(session.Email)))
return false, nil // Password is incorrect, but not an error condition
}
// Copy master key bytes before destroying the buffer
// We'll cache it after verification succeeds
masterKeyBytes := make([]byte, masterKey.Size())
copy(masterKeyBytes, masterKey.Bytes())
masterKey.Destroy()
// Cache the master key for the session (already decrypted, no need to re-derive)
if err := a.keyCache.StoreMasterKey(session.Email, masterKeyBytes); err != nil {
a.logger.Warn("Failed to cache master key during password verification", zap.Error(err))
// Don't fail verification if caching fails
} else {
a.logger.Info("Master key cached successfully during password verification", zap.String("email", utils.MaskEmail(session.Email)))
}
a.logger.Info("Password verified successfully", zap.String("email", utils.MaskEmail(session.Email)))
return true, nil
}
// StorePasswordForSession stores password for current session (used by PasswordPrompt)
func (a *Application) StorePasswordForSession(password string) error {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session")
}
if err := a.passwordStore.StorePassword(session.Email, password); err != nil {
a.logger.Error("Failed to store password for session", zap.String("email", utils.MaskEmail(session.Email)), zap.Error(err))
return err
}
a.logger.Info("Password re-stored in secure RAM after app restart", zap.String("email", utils.MaskEmail(session.Email)))
// Note: Master key caching is now handled in VerifyPassword()
// to avoid running PBKDF2 twice. The password verification step
// already derives KEK and decrypts the master key, so we cache it there.
// This eliminates redundant key derivation delay.
return nil
}
// GetStoredPassword retrieves the stored password for current session
func (a *Application) GetStoredPassword() (string, error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return "", fmt.Errorf("no active session")
}
return a.passwordStore.GetPassword(session.Email)
}
// HasStoredPassword checks if password is stored for current session
func (a *Application) HasStoredPassword() bool {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false
}
return a.passwordStore.HasPassword(session.Email)
}
// ClearStoredPassword clears the stored password (optional, for security-sensitive operations)
func (a *Application) ClearStoredPassword() error {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session")
}
return a.passwordStore.ClearPassword(session.Email)
}
// cacheMasterKeyFromPassword decrypts and caches the master key for the session
// This is an internal helper method used by CompleteLogin and StorePasswordForSession
func (a *Application) cacheMasterKeyFromPassword(email, password, saltBase64, encryptedMasterKeyBase64, kdfAlgorithm string) error {
// Default to PBKDF2-SHA256
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(saltBase64)
if err != nil {
return fmt.Errorf("invalid salt encoding: %w", err)
}
encryptedMasterKeyBytes, err := base64.StdEncoding.DecodeString(encryptedMasterKeyBase64)
if err != nil {
return fmt.Errorf("invalid master key encoding: %w", err)
}
// Create secure keychain to derive KEK using the correct KDF algorithm
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
return fmt.Errorf("failed to derive KEK: %w", err)
}
defer keychain.Clear()
// Split nonce and ciphertext using 24-byte nonce (XSalsa20 secretbox format from web frontend)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedMasterKeyBytes)
if err != nil {
return fmt.Errorf("invalid master key format: %w", err)
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
// Decrypt master key into secure buffer (auto-detects cipher based on nonce size)
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
return fmt.Errorf("failed to decrypt master key: %w", err)
}
// CRITICAL: Copy bytes BEFORE destroying the buffer to avoid SIGBUS fault
// masterKey.Bytes() returns a pointer to LockedBuffer memory which becomes
// invalid after Destroy() is called
masterKeyBytes := make([]byte, masterKey.Size())
copy(masterKeyBytes, masterKey.Bytes())
// Now safely destroy the secure buffer
masterKey.Destroy()
// Store the copied bytes in cache
if err := a.keyCache.StoreMasterKey(email, masterKeyBytes); err != nil {
return fmt.Errorf("failed to cache master key: %w", err)
}
a.logger.Info("Master key cached successfully for session", zap.String("email", utils.MaskEmail(email)))
return nil
}
// GetCachedMasterKey retrieves the cached master key for the current session
// This is exported and can be called from frontend for file operations
// Returns the master key bytes and a cleanup function that MUST be called when done
func (a *Application) GetCachedMasterKey() ([]byte, func(), error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, nil, fmt.Errorf("no active session")
}
return a.keyCache.GetMasterKey(session.Email)
}
// HasCachedMasterKey checks if a master key is cached for the current session
func (a *Application) HasCachedMasterKey() bool {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false
}
return a.keyCache.HasMasterKey(session.Email)
}

View file

@ -0,0 +1,324 @@
// app_search.go contains the search-related application layer code.
//
// This file provides:
// - Search index initialization and rebuild logic
// - Wails bindings for frontend search functionality
// - File and collection indexing helpers
//
// The search feature uses Bleve for local full-text search. Each user has their
// own isolated search index stored in their local application data directory.
// Search results are deduplicated by filename to avoid showing the same file
// multiple times when it exists in multiple collections.
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
)
// =============================================================================
// SEARCH INDEXING OPERATIONS
// These functions are called internally when files/collections are created,
// updated, or when the index needs to be rebuilt.
// =============================================================================
// indexFileForSearch indexes a single file in the search index.
// This is called when a new file is uploaded to add it to the search index immediately.
func (a *Application) indexFileForSearch(fileID, collectionID, filename string, tags []string, size int64) error {
// Get collection name for denormalization
collectionName := ""
collectionRepo := a.getCollectionRepo()
if collectionRepo != nil {
collection, err := collectionRepo.Get(collectionID)
if err == nil && collection != nil {
collectionName = collection.Name
}
}
// Create file document for search
fileDoc := &search.FileDocument{
ID: fileID,
Filename: filename,
Description: "", // No description field in current implementation
CollectionID: collectionID,
CollectionName: collectionName,
Tags: tags,
Size: size,
CreatedAt: time.Now(),
Type: "file",
}
return a.searchService.IndexFile(fileDoc)
}
// indexCollectionForSearch indexes a collection in the search index
func (a *Application) indexCollectionForSearch(collectionID, name string, tags []string, fileCount int) error {
// Create collection document for search
collectionDoc := &search.CollectionDocument{
ID: collectionID,
Name: name,
Description: "", // No description field in current implementation
Tags: tags,
FileCount: fileCount,
CreatedAt: time.Now(),
Type: "collection",
}
return a.searchService.IndexCollection(collectionDoc)
}
// InitializeSearchIndex initializes the search index for the current user.
// This can be called manually if the index needs to be initialized.
func (a *Application) InitializeSearchIndex() error {
a.logger.Info("Manually initializing search index")
// Get current session to get user email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session found")
}
// Initialize the search service
if err := a.searchService.Initialize(a.ctx, session.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
return fmt.Errorf("failed to initialize search index: %w", err)
}
a.logger.Info("Search index initialized successfully")
// Rebuild index from local data
if err := a.rebuildSearchIndexForUser(session.Email); err != nil {
a.logger.Warn("Failed to rebuild search index", zap.Error(err))
return fmt.Errorf("failed to rebuild search index: %w", err)
}
return nil
}
// rebuildSearchIndexForUser rebuilds the entire search index from the local file repository.
// This is called on app startup and after login to ensure the search index is up-to-date.
// The rebuild process:
// 1. Lists all files from the local repository
// 2. Deduplicates files by ID (in case of repository corruption)
// 3. Skips deleted files
// 4. Passes all files to the search service for batch indexing
func (a *Application) rebuildSearchIndexForUser(userEmail string) error {
a.logger.Info("Rebuilding search index from local data", zap.String("email", userEmail))
fileRepo := a.getFileRepo()
if fileRepo == nil {
return fmt.Errorf("file repository not available")
}
// Get all local files
localFiles, err := fileRepo.List()
if err != nil {
a.logger.Error("Failed to list files for search index rebuild", zap.Error(err))
return fmt.Errorf("failed to list files: %w", err)
}
// Convert to search documents - use map to deduplicate by ID
fileDocumentsMap := make(map[string]*search.FileDocument)
for _, f := range localFiles {
// Skip deleted files
if f.State == file.StateDeleted {
continue
}
// Check for duplicates in local file repo
if _, exists := fileDocumentsMap[f.ID]; exists {
a.logger.Warn("Duplicate file found in local repository",
zap.String("id", f.ID),
zap.String("name", f.Name))
continue
}
// Get collection name if available
collectionName := ""
collectionRepo := a.getCollectionRepo()
if collectionRepo != nil {
collection, err := collectionRepo.Get(f.CollectionID)
if err == nil && collection != nil {
collectionName = collection.Name
}
}
fileDoc := &search.FileDocument{
ID: f.ID,
Filename: f.Name,
Description: "",
CollectionID: f.CollectionID,
CollectionName: collectionName,
Tags: []string{}, // Tags not stored in file entity currently
Size: f.DecryptedSizeInBytes,
CreatedAt: f.CreatedAt,
Type: "file",
}
fileDocumentsMap[f.ID] = fileDoc
}
// Convert map to slice
fileDocuments := make([]*search.FileDocument, 0, len(fileDocumentsMap))
for _, doc := range fileDocumentsMap {
fileDocuments = append(fileDocuments, doc)
}
a.logger.Info("Prepared files for indexing",
zap.Int("total_from_repo", len(localFiles)),
zap.Int("unique_files", len(fileDocuments)))
// For now, we don't index collections separately since they're fetched from cloud
// Collections will be indexed when they're explicitly created/updated
collectionDocuments := []*search.CollectionDocument{}
// Rebuild the index
if err := a.searchService.RebuildIndex(userEmail, fileDocuments, collectionDocuments); err != nil {
a.logger.Error("Failed to rebuild search index", zap.Error(err))
return fmt.Errorf("failed to rebuild search index: %w", err)
}
a.logger.Info("Search index rebuilt successfully",
zap.Int("files_indexed", len(fileDocuments)),
zap.Int("collections_indexed", len(collectionDocuments)))
return nil
}
// =============================================================================
// WAILS BINDINGS - Exposed to Frontend
// =============================================================================
// SearchInput represents the input for search
type SearchInput struct {
Query string `json:"query"`
Limit int `json:"limit,omitempty"`
}
// SearchResultData represents search results for the frontend
type SearchResultData struct {
Files []FileSearchResult `json:"files"`
Collections []CollectionSearchResult `json:"collections"`
TotalFiles int `json:"total_files"`
TotalCollections int `json:"total_collections"`
TotalHits uint64 `json:"total_hits"`
MaxScore float64 `json:"max_score"`
Query string `json:"query"`
}
// FileSearchResult represents a file in search results
type FileSearchResult struct {
ID string `json:"id"`
Filename string `json:"filename"`
CollectionID string `json:"collection_id"`
CollectionName string `json:"collection_name"`
Tags []string `json:"tags"`
Size int64 `json:"size"`
CreatedAt string `json:"created_at"`
}
// CollectionSearchResult represents a collection in search results
type CollectionSearchResult struct {
ID string `json:"id"`
Name string `json:"name"`
Tags []string `json:"tags"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
}
// Search performs a full-text search across files and collections.
// This is the main Wails binding exposed to the frontend for search functionality.
//
// Features:
// - Case-insensitive substring matching (e.g., "mesh" finds "meshtastic")
// - Deduplication by filename (same filename in multiple collections shows once)
// - Auto-initialization if search index is not ready
// - Support for Bleve query syntax (+, -, "", *, ?)
func (a *Application) Search(input SearchInput) (*SearchResultData, error) {
a.logger.Info("Performing search", zap.String("query", input.Query))
// Validate input
if input.Query == "" {
return nil, fmt.Errorf("search query cannot be empty")
}
// Set default limit if not specified
limit := input.Limit
if limit == 0 {
limit = 50
}
// Perform search
result, err := a.searchService.Search(input.Query, limit)
if err != nil {
// If search index is not initialized, try to initialize it automatically
if err.Error() == "search index not initialized" {
a.logger.Warn("Search index not initialized, attempting to initialize now")
if initErr := a.InitializeSearchIndex(); initErr != nil {
a.logger.Error("Failed to auto-initialize search index", zap.Error(initErr))
return nil, fmt.Errorf("search index not initialized. Please log out and log back in, or contact support")
}
// Retry search after initialization
result, err = a.searchService.Search(input.Query, limit)
if err != nil {
a.logger.Error("Search failed after auto-initialization", zap.String("query", input.Query), zap.Error(err))
return nil, fmt.Errorf("search failed: %w", err)
}
} else {
a.logger.Error("Search failed", zap.String("query", input.Query), zap.Error(err))
return nil, fmt.Errorf("search failed: %w", err)
}
}
// Convert to frontend format with deduplication by filename
// Only show one file per unique filename (first occurrence wins)
files := make([]FileSearchResult, 0, len(result.Files))
seenFilenames := make(map[string]bool)
for _, f := range result.Files {
// Skip if we've already seen this filename
if seenFilenames[f.Filename] {
continue
}
seenFilenames[f.Filename] = true
files = append(files, FileSearchResult{
ID: f.ID,
Filename: f.Filename,
CollectionID: f.CollectionID,
CollectionName: f.CollectionName,
Tags: f.Tags,
Size: f.Size,
CreatedAt: f.CreatedAt.Format(time.RFC3339),
})
}
collections := make([]CollectionSearchResult, 0, len(result.Collections))
for _, c := range result.Collections {
collections = append(collections, CollectionSearchResult{
ID: c.ID,
Name: c.Name,
Tags: c.Tags,
FileCount: c.FileCount,
CreatedAt: c.CreatedAt.Format(time.RFC3339),
})
}
a.logger.Info("Search completed",
zap.String("query", input.Query),
zap.Int("files_found", len(files)),
zap.Int("collections_found", len(collections)))
return &SearchResultData{
Files: files,
Collections: collections,
TotalFiles: len(files),
TotalCollections: len(collections),
TotalHits: result.TotalHits,
MaxScore: result.MaxScore,
Query: input.Query,
}, nil
}

View file

@ -0,0 +1,38 @@
package app
// GetTheme returns the current theme setting
func (a *Application) GetTheme() (string, error) {
return a.config.GetTheme(a.ctx)
}
// SetTheme updates the theme setting
func (a *Application) SetTheme(theme string) error {
return a.config.SetTheme(a.ctx, theme)
}
// GetWindowSize returns the configured window size
func (a *Application) GetWindowSize() (map[string]int, error) {
width, height, err := a.config.GetWindowSize(a.ctx)
if err != nil {
return nil, err
}
return map[string]int{
"width": width,
"height": height,
}, nil
}
// SetWindowSize updates the window size configuration
func (a *Application) SetWindowSize(width, height int) error {
return a.config.SetWindowSize(a.ctx, width, height)
}
// GetCloudProviderAddress returns the backend API URL
func (a *Application) GetCloudProviderAddress() (string, error) {
return a.config.GetCloudProviderAddress(a.ctx)
}
// SetCloudProviderAddress updates the backend API URL
func (a *Application) SetCloudProviderAddress(address string) error {
return a.config.SetCloudProviderAddress(a.ctx, address)
}

View file

@ -0,0 +1,148 @@
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
)
// SyncStatusData represents the current sync status for the frontend
type SyncStatusData struct {
IsSyncing bool `json:"is_syncing"`
LastSyncTime string `json:"last_sync_time,omitempty"`
LastSyncSuccess bool `json:"last_sync_success"`
LastSyncError string `json:"last_sync_error,omitempty"`
CollectionsSynced bool `json:"collections_synced"`
FilesSynced bool `json:"files_synced"`
FullySynced bool `json:"fully_synced"`
}
// SyncResultData represents the result of a sync operation
type SyncResultData struct {
CollectionsProcessed int `json:"collections_processed"`
CollectionsAdded int `json:"collections_added"`
CollectionsUpdated int `json:"collections_updated"`
CollectionsDeleted int `json:"collections_deleted"`
FilesProcessed int `json:"files_processed"`
FilesAdded int `json:"files_added"`
FilesUpdated int `json:"files_updated"`
FilesDeleted int `json:"files_deleted"`
Errors []string `json:"errors,omitempty"`
}
// GetSyncStatus returns the current sync status
func (a *Application) GetSyncStatus() (*SyncStatusData, error) {
status, err := a.syncService.GetSyncStatus(a.ctx)
if err != nil {
a.logger.Error("Failed to get sync status", zap.Error(err))
return nil, fmt.Errorf("failed to get sync status: %w", err)
}
return &SyncStatusData{
IsSyncing: false,
LastSyncTime: time.Now().Format(time.RFC3339),
LastSyncSuccess: true,
LastSyncError: "",
CollectionsSynced: status.CollectionsSynced,
FilesSynced: status.FilesSynced,
FullySynced: status.FullySynced,
}, nil
}
// TriggerSync triggers a full sync of collections and files
func (a *Application) TriggerSync() error {
a.logger.Info("Manual sync triggered")
// Get current session for email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get session for sync", zap.Error(err))
return fmt.Errorf("not authenticated: %w", err)
}
// Get the stored password for decryption
var password string
if a.passwordStore.HasPassword(session.Email) {
password, err = a.passwordStore.GetPassword(session.Email)
if err != nil {
a.logger.Warn("Failed to get stored password, sync will skip decryption", zap.Error(err))
}
} else {
a.logger.Warn("No stored password, sync will skip decryption")
}
input := &sync.SyncInput{
BatchSize: 50,
MaxBatches: 100,
Password: password,
}
result, err := a.syncService.SyncAll(a.ctx, input)
if err != nil {
a.logger.Error("Sync failed", zap.Error(err))
return fmt.Errorf("sync failed: %w", err)
}
a.logger.Info("Sync completed",
zap.Int("collections_added", result.CollectionsAdded),
zap.Int("files_added", result.FilesAdded),
zap.Int("errors", len(result.Errors)))
return nil
}
// TriggerSyncWithResult triggers a full sync and returns the result
func (a *Application) TriggerSyncWithResult() (*SyncResultData, error) {
a.logger.Info("Manual sync with result triggered")
// Get current session for email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get session for sync", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get the stored password for decryption
var password string
if a.passwordStore.HasPassword(session.Email) {
password, err = a.passwordStore.GetPassword(session.Email)
if err != nil {
a.logger.Warn("Failed to get stored password, sync will skip decryption", zap.Error(err))
}
} else {
a.logger.Warn("No stored password, sync will skip decryption")
}
input := &sync.SyncInput{
BatchSize: 50,
MaxBatches: 100,
Password: password,
}
result, err := a.syncService.SyncAll(a.ctx, input)
if err != nil {
a.logger.Error("Sync failed", zap.Error(err))
return nil, fmt.Errorf("sync failed: %w", err)
}
return &SyncResultData{
CollectionsProcessed: result.CollectionsProcessed,
CollectionsAdded: result.CollectionsAdded,
CollectionsUpdated: result.CollectionsUpdated,
CollectionsDeleted: result.CollectionsDeleted,
FilesProcessed: result.FilesProcessed,
FilesAdded: result.FilesAdded,
FilesUpdated: result.FilesUpdated,
FilesDeleted: result.FilesDeleted,
Errors: result.Errors,
}, nil
}
// ResetSync resets all sync state for a fresh sync
func (a *Application) ResetSync() error {
a.logger.Info("Resetting sync state")
return a.syncService.ResetSync(a.ctx)
}

View file

@ -0,0 +1,861 @@
package app
import (
"bytes"
"encoding/base64"
"fmt"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// ============================================================================
// Tag Management
// ============================================================================
// TagData represents a decrypted tag for the frontend
type TagData struct {
ID string `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
}
// ListTags fetches all tags for the current user and decrypts them
func (a *Application) ListTags() ([]*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Fetch tags from API
tags, err := apiClient.ListTags(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch tags from API", zap.Error(err))
return nil, fmt.Errorf("failed to fetch tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Decrypt tags
decryptedTags := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decrypted, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt tag",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue // Skip tags that fail to decrypt
}
decryptedTags = append(decryptedTags, decrypted)
}
a.logger.Info("Tags fetched and decrypted successfully",
zap.Int("total", len(tags)),
zap.Int("decrypted", len(decryptedTags)))
return decryptedTags, nil
}
// decryptTag decrypts a single tag using the master key
func (a *Application) decryptTag(tag *client.Tag, masterKey []byte) (*TagData, error) {
// Decode encrypted tag key
if tag.EncryptedTagKey == nil {
return nil, fmt.Errorf("tag has no encrypted tag key")
}
// Decode base64 nonce and ciphertext
keyNonce, err := base64.StdEncoding.DecodeString(tag.EncryptedTagKey.Nonce)
if err != nil {
// Try URL-safe encoding without padding
keyNonce, err = base64.RawURLEncoding.DecodeString(tag.EncryptedTagKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key nonce: %w", err)
}
}
keyCiphertext, err := base64.StdEncoding.DecodeString(tag.EncryptedTagKey.Ciphertext)
if err != nil {
// Try URL-safe encoding without padding
keyCiphertext, err = base64.RawURLEncoding.DecodeString(tag.EncryptedTagKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key ciphertext: %w", err)
}
}
// Extract actual ciphertext (skip nonce if it's prepended)
var actualCiphertext []byte
if len(keyCiphertext) > len(keyNonce) && bytes.Equal(keyCiphertext[:len(keyNonce)], keyNonce) {
// Nonce is prepended to ciphertext
actualCiphertext = keyCiphertext[len(keyNonce):]
} else {
actualCiphertext = keyCiphertext
}
// Decrypt tag key using XSalsa20-Poly1305 (SecretBox)
tagKey, err := e2ee.DecryptTagKey(&e2ee.EncryptedKey{
Ciphertext: actualCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag key: %w", err)
}
// Decrypt tag name
name, err := decryptTagField(tag.EncryptedName, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag name: %w", err)
}
// Decrypt tag color
color, err := decryptTagField(tag.EncryptedColor, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag color: %w", err)
}
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// decryptTagField decrypts an encrypted tag field (name or color)
// Format: "ciphertext:nonce" both in base64
func decryptTagField(encryptedField string, tagKey []byte) (string, error) {
// Split by colon to get ciphertext and nonce
parts := bytes.Split([]byte(encryptedField), []byte(":"))
if len(parts) != 2 {
return "", fmt.Errorf("invalid encrypted field format (expected 'ciphertext:nonce')")
}
ciphertextB64 := string(parts[0])
nonceB64 := string(parts[1])
// Decode base64 (try URL-safe without padding first, then standard)
ciphertext, err := base64.RawURLEncoding.DecodeString(ciphertextB64)
if err != nil {
ciphertext, err = base64.StdEncoding.DecodeString(ciphertextB64)
if err != nil {
return "", fmt.Errorf("failed to decode ciphertext: %w", err)
}
}
nonce, err := base64.RawURLEncoding.DecodeString(nonceB64)
if err != nil {
nonce, err = base64.StdEncoding.DecodeString(nonceB64)
if err != nil {
return "", fmt.Errorf("failed to decode nonce: %w", err)
}
}
// Decrypt using XSalsa20-Poly1305
decrypted, err := e2ee.DecryptWithSecretBox(ciphertext, nonce, tagKey)
if err != nil {
return "", fmt.Errorf("failed to decrypt field: %w", err)
}
return string(decrypted), nil
}
// CreateTag creates a new tag with encrypted name and color
func (a *Application) CreateTag(name, color string) (*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for encryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Generate new tag key (32 bytes for XSalsa20-Poly1305)
tagKey := e2ee.GenerateKey()
// Encrypt tag name
encryptedName, err := encryptTagField(name, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag name: %w", err)
}
// Encrypt tag color
encryptedColor, err := encryptTagField(color, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag color: %w", err)
}
// Encrypt tag key with master key
encryptedTagKey, err := e2ee.EncryptTagKeySecretBox(tagKey, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag key: %w", err)
}
// Prepare API request
tagID := uuid.New().String()
now := time.Now()
input := &client.CreateTagInput{
ID: tagID,
EncryptedName: encryptedName,
EncryptedColor: encryptedColor,
EncryptedTagKey: &client.EncryptedTagKey{
Ciphertext: base64.StdEncoding.EncodeToString(append(encryptedTagKey.Nonce, encryptedTagKey.Ciphertext...)),
Nonce: base64.StdEncoding.EncodeToString(encryptedTagKey.Nonce),
KeyVersion: 1,
},
CreatedAt: now.Format(time.RFC3339),
ModifiedAt: now.Format(time.RFC3339),
Version: 1,
State: "active",
}
// Create tag via API
tag, err := apiClient.CreateTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to create tag", zap.Error(err))
return nil, fmt.Errorf("failed to create tag: %w", err)
}
a.logger.Info("Tag created successfully", zap.String("tag_id", tag.ID))
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// encryptTagField encrypts a tag field (name or color) with the tag key
// Returns format: "ciphertext:nonce" both in base64
func encryptTagField(plaintext string, tagKey []byte) (string, error) {
encrypted, err := e2ee.EncryptWithSecretBox([]byte(plaintext), tagKey)
if err != nil {
return "", err
}
// Encode to base64 (URL-safe without padding to match web app)
ciphertextB64 := base64.RawURLEncoding.EncodeToString(encrypted.Ciphertext)
nonceB64 := base64.RawURLEncoding.EncodeToString(encrypted.Nonce)
return fmt.Sprintf("%s:%s", ciphertextB64, nonceB64), nil
}
// UpdateTag updates an existing tag's name and/or color
func (a *Application) UpdateTag(tagID, name, color string) (*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get current tag to retrieve encrypted tag key
currentTag, err := apiClient.GetTag(a.ctx, tagID)
if err != nil {
a.logger.Error("Failed to get current tag", zap.String("tag_id", tagID), zap.Error(err))
return nil, fmt.Errorf("failed to get current tag: %w", err)
}
// Get master key for encryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Decrypt tag key
keyNonce, err := base64.StdEncoding.DecodeString(currentTag.EncryptedTagKey.Nonce)
if err != nil {
keyNonce, err = base64.RawURLEncoding.DecodeString(currentTag.EncryptedTagKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key nonce: %w", err)
}
}
keyCiphertext, err := base64.StdEncoding.DecodeString(currentTag.EncryptedTagKey.Ciphertext)
if err != nil {
keyCiphertext, err = base64.RawURLEncoding.DecodeString(currentTag.EncryptedTagKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key ciphertext: %w", err)
}
}
// Extract actual ciphertext
var actualCiphertext []byte
if len(keyCiphertext) > len(keyNonce) && bytes.Equal(keyCiphertext[:len(keyNonce)], keyNonce) {
actualCiphertext = keyCiphertext[len(keyNonce):]
} else {
actualCiphertext = keyCiphertext
}
tagKey, err := e2ee.DecryptTagKey(&e2ee.EncryptedKey{
Ciphertext: actualCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag key: %w", err)
}
// Encrypt new name and color
encryptedName, err := encryptTagField(name, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag name: %w", err)
}
encryptedColor, err := encryptTagField(color, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag color: %w", err)
}
// Prepare update request (must include encrypted_tag_key and other required fields)
input := &client.UpdateTagInput{
EncryptedName: encryptedName,
EncryptedColor: encryptedColor,
EncryptedTagKey: currentTag.EncryptedTagKey, // Keep existing key
CreatedAt: currentTag.CreatedAt.Format(time.RFC3339),
ModifiedAt: time.Now().Format(time.RFC3339),
Version: currentTag.Version,
State: currentTag.State,
}
// Update tag via API
tag, err := apiClient.UpdateTag(a.ctx, tagID, input)
if err != nil {
a.logger.Error("Failed to update tag", zap.String("tag_id", tagID), zap.Error(err))
return nil, fmt.Errorf("failed to update tag: %w", err)
}
a.logger.Info("Tag updated successfully", zap.String("tag_id", tag.ID))
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// DeleteTag deletes a tag
func (a *Application) DeleteTag(tagID string) error {
a.logger.Info("DeleteTag called", zap.String("tag_id", tagID))
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
a.logger.Error("API client not available")
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
a.logger.Error("No active session", zap.Error(err))
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
a.logger.Error("Session expired")
return fmt.Errorf("session expired - please log in again")
}
a.logger.Info("Session valid, setting tokens", zap.String("tag_id", tagID))
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
a.logger.Info("Calling API DeleteTag", zap.String("tag_id", tagID))
// Delete tag via API
err = apiClient.DeleteTag(a.ctx, tagID)
if err != nil {
a.logger.Error("Failed to delete tag", zap.String("tag_id", tagID), zap.Error(err))
return fmt.Errorf("failed to delete tag: %w", err)
}
a.logger.Info("Tag deleted successfully", zap.String("tag_id", tagID))
return nil
}
// ============================================================================
// Tag Assignment Operations
// ============================================================================
// AssignTagToFile assigns a tag to a file
func (a *Application) AssignTagToFile(tagID, fileID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
input := &client.CreateTagAssignmentInput{
TagID: tagID,
EntityID: fileID,
EntityType: "file",
}
_, err = apiClient.AssignTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to assign tag to file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to assign tag: %w", err)
}
a.logger.Info("Tag assigned to file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID))
return nil
}
// UnassignTagFromFile removes a tag from a file
func (a *Application) UnassignTagFromFile(tagID, fileID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
err = apiClient.UnassignTag(a.ctx, tagID, fileID, "file")
if err != nil {
a.logger.Error("Failed to unassign tag from file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to unassign tag: %w", err)
}
a.logger.Info("Tag unassigned from file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID))
return nil
}
// GetTagsForFile returns all tags assigned to a file (decrypted)
func (a *Application) GetTagsForFile(fileID string) ([]*TagData, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Fetch tags for entity from API
tags, err := apiClient.GetTagsForEntity(a.ctx, fileID, "file")
if err != nil {
a.logger.Error("Failed to get tags for file",
zap.String("file_id", fileID),
zap.Error(err))
return nil, fmt.Errorf("failed to get tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Decrypt each tag
result := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decryptedTag, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt tag, skipping",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue
}
result = append(result, decryptedTag)
}
a.logger.Info("Tags fetched for file",
zap.String("file_id", fileID),
zap.Int("count", len(result)))
return result, nil
}
// ============================================================================
// Collection Tag Assignment Operations
// ============================================================================
// AssignTagToCollection assigns a tag to a collection
func (a *Application) AssignTagToCollection(tagID, collectionID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
input := &client.CreateTagAssignmentInput{
TagID: tagID,
EntityID: collectionID,
EntityType: "collection",
}
_, err = apiClient.AssignTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to assign tag to collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID),
zap.Error(err))
return fmt.Errorf("failed to assign tag: %w", err)
}
a.logger.Info("Tag assigned to collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID))
return nil
}
// UnassignTagFromCollection removes a tag from a collection
func (a *Application) UnassignTagFromCollection(tagID, collectionID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
err = apiClient.UnassignTag(a.ctx, tagID, collectionID, "collection")
if err != nil {
a.logger.Error("Failed to unassign tag from collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID),
zap.Error(err))
return fmt.Errorf("failed to unassign tag: %w", err)
}
a.logger.Info("Tag unassigned from collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID))
return nil
}
// GetTagsForCollection returns all tags assigned to a collection (decrypted)
func (a *Application) GetTagsForCollection(collectionID string) ([]*TagData, error) {
// Validate input
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return nil, err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Fetch tags for entity from API
tags, err := apiClient.GetTagsForEntity(a.ctx, collectionID, "collection")
if err != nil {
a.logger.Error("Failed to get tags for collection",
zap.String("collection_id", collectionID),
zap.Error(err))
return nil, fmt.Errorf("failed to get tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Decrypt each tag
result := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decryptedTag, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt tag, skipping",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue
}
result = append(result, decryptedTag)
}
a.logger.Info("Tags fetched for collection",
zap.String("collection_id", collectionID),
zap.Int("count", len(result)))
return result, nil
}
// ============================================================================
// Tag Search Operations
// ============================================================================
// SearchByTagsResult represents the result of a multi-tag search
type SearchByTagsResult struct {
CollectionIDs []string `json:"collection_ids"`
FileIDs []string `json:"file_ids"`
TagCount int `json:"tag_count"`
CollectionCount int `json:"collection_count"`
FileCount int `json:"file_count"`
}
// SearchByTags searches for collections and files that have ALL the specified tags
func (a *Application) SearchByTags(tagIDs []string, limit int) (*SearchByTagsResult, error) {
a.logger.Info("SearchByTags called",
zap.Int("tag_count", len(tagIDs)),
zap.Int("limit", limit))
// Validate inputs
if len(tagIDs) == 0 {
return nil, fmt.Errorf("at least one tag ID is required")
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
resp, err := apiClient.SearchByTags(a.ctx, tagIDs, limit)
if err != nil {
a.logger.Error("Failed to search by tags", zap.Error(err))
return nil, fmt.Errorf("failed to search by tags: %w", err)
}
// Extract IDs only - frontend will fetch full details as needed
collectionIDs := make([]string, 0, len(resp.Collections))
for _, coll := range resp.Collections {
collectionIDs = append(collectionIDs, coll.ID)
}
fileIDs := make([]string, 0, len(resp.Files))
for _, file := range resp.Files {
fileIDs = append(fileIDs, file.ID)
}
result := &SearchByTagsResult{
CollectionIDs: collectionIDs,
FileIDs: fileIDs,
TagCount: resp.TagCount,
CollectionCount: len(collectionIDs),
FileCount: len(fileIDs),
}
a.logger.Info("SearchByTags completed",
zap.Int("collections", len(collectionIDs)),
zap.Int("files", len(fileIDs)))
return result, nil
}

View file

@ -0,0 +1,253 @@
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// GetUserProfile fetches the current user's profile
func (a *Application) GetUserProfile() (*client.User, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Fetch user profile from backend
user, err := apiClient.GetMe(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch user profile", zap.Error(err))
return nil, fmt.Errorf("failed to fetch profile: %w", err)
}
a.logger.Info("User profile fetched successfully",
zap.String("user_id", user.ID),
zap.String("email", utils.MaskEmail(user.Email)))
return user, nil
}
// UpdateUserProfile updates the current user's profile
func (a *Application) UpdateUserProfile(input *client.UpdateUserInput) (*client.User, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Update user profile
user, err := apiClient.UpdateMe(a.ctx, input)
if err != nil {
a.logger.Error("Failed to update user profile", zap.Error(err))
return nil, fmt.Errorf("failed to update profile: %w", err)
}
a.logger.Info("User profile updated successfully",
zap.String("user_id", user.ID),
zap.String("email", utils.MaskEmail(user.Email)))
return user, nil
}
// ============================================================================
// Blocked Emails Management
// ============================================================================
// BlockedEmailData represents a blocked email entry for the frontend
type BlockedEmailData struct {
BlockedEmail string `json:"blocked_email"`
Reason string `json:"reason"`
CreatedAt string `json:"created_at"`
}
// GetBlockedEmails fetches the list of blocked emails from the backend
func (a *Application) GetBlockedEmails() ([]*BlockedEmailData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
resp, err := apiClient.ListBlockedEmails(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch blocked emails", zap.Error(err))
return nil, fmt.Errorf("failed to fetch blocked emails: %w", err)
}
// Convert to frontend format
blockedEmails := make([]*BlockedEmailData, 0, len(resp.BlockedEmails))
for _, blocked := range resp.BlockedEmails {
blockedEmails = append(blockedEmails, &BlockedEmailData{
BlockedEmail: blocked.BlockedEmail,
Reason: blocked.Reason,
CreatedAt: blocked.CreatedAt.Format(time.RFC3339),
})
}
a.logger.Info("Blocked emails fetched successfully",
zap.Int("count", len(blockedEmails)))
return blockedEmails, nil
}
// AddBlockedEmail adds an email to the blocked list
func (a *Application) AddBlockedEmail(email, reason string) (*BlockedEmailData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
blocked, err := apiClient.CreateBlockedEmail(a.ctx, email, reason)
if err != nil {
a.logger.Error("Failed to add blocked email",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, fmt.Errorf("failed to block email: %w", err)
}
a.logger.Info("Email blocked successfully",
zap.String("blocked_email", utils.MaskEmail(email)))
return &BlockedEmailData{
BlockedEmail: blocked.BlockedEmail,
Reason: blocked.Reason,
CreatedAt: blocked.CreatedAt.Format(time.RFC3339),
}, nil
}
// RemoveBlockedEmail removes an email from the blocked list
func (a *Application) RemoveBlockedEmail(email string) error {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
_, err = apiClient.DeleteBlockedEmail(a.ctx, email)
if err != nil {
a.logger.Error("Failed to remove blocked email",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return fmt.Errorf("failed to unblock email: %w", err)
}
a.logger.Info("Email unblocked successfully",
zap.String("blocked_email", utils.MaskEmail(email)))
return nil
}
// ============================================================================
// Account Deletion
// ============================================================================
// DeleteAccount deletes the current user's account
func (a *Application) DeleteAccount(password string) error {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API to delete account
err = apiClient.DeleteMe(a.ctx, password)
if err != nil {
a.logger.Error("Failed to delete account", zap.Error(err))
return fmt.Errorf("failed to delete account: %w", err)
}
a.logger.Info("Account deleted successfully",
zap.String("user_id", utils.MaskEmail(session.Email)))
// Logout after successful deletion
_ = a.Logout()
return nil
}

View file

@ -0,0 +1,294 @@
package app
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// Application is the main Wails application struct
type Application struct {
ctx context.Context
logger *zap.Logger
config config.ConfigService
authService *auth.Service
tokenManager *tokenmanager.Manager
passwordStore *passwordstore.Service
keyCache *keycache.Service
rateLimiter *ratelimiter.Service
httpClient *httpclient.Service
syncService sync.Service
storageManager *storagemanager.Manager
securityLog *securitylog.Service
searchService search.SearchService
}
// ProvideApplication creates the Application for Wire
func ProvideApplication(
logger *zap.Logger,
configService config.ConfigService,
authService *auth.Service,
tokenManager *tokenmanager.Manager,
passwordStore *passwordstore.Service,
keyCache *keycache.Service,
rateLimiter *ratelimiter.Service,
httpClient *httpclient.Service,
syncService sync.Service,
storageManager *storagemanager.Manager,
securityLog *securitylog.Service,
searchService search.SearchService,
) *Application {
return &Application{
logger: logger,
config: configService,
authService: authService,
tokenManager: tokenManager,
passwordStore: passwordStore,
keyCache: keyCache,
rateLimiter: rateLimiter,
httpClient: httpClient,
syncService: syncService,
storageManager: storageManager,
securityLog: securityLog,
searchService: searchService,
}
}
// getFileRepo returns the file repository for the current user.
// Returns nil if no user is logged in (storage not initialized).
func (a *Application) getFileRepo() file.Repository {
return a.storageManager.GetFileRepository()
}
// mustGetFileRepo returns the file repository for the current user.
// Logs an error and returns a no-op repository if storage is not initialized.
// Use this in places where you expect the user to be logged in.
// The returned repository will never be nil - it returns a safe no-op implementation
// if the actual repository is not available.
func (a *Application) mustGetFileRepo() file.Repository {
repo := a.storageManager.GetFileRepository()
if repo == nil {
a.logger.Error("File repository not available - user storage not initialized")
return &noOpFileRepository{}
}
return repo
}
// getCollectionRepo returns the collection repository for the current user.
// Returns nil if no user is logged in (storage not initialized).
func (a *Application) getCollectionRepo() collection.Repository {
return a.storageManager.GetCollectionRepository()
}
// noOpFileRepository is a safe no-op implementation of file.Repository
// that returns empty results instead of causing nil pointer dereferences.
// This is used when the actual repository is not available (user not logged in).
type noOpFileRepository struct{}
func (r *noOpFileRepository) Get(id string) (*file.File, error) {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) List() ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) ListByCollection(collectionID string) ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Create(f *file.File) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Update(f *file.File) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Delete(id string) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) ListByStatus(status file.SyncStatus) ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Exists(id string) (bool, error) {
return false, fmt.Errorf("storage not initialized - user must be logged in")
}
// Startup is called when the app starts (Wails lifecycle hook)
func (a *Application) Startup(ctx context.Context) {
a.ctx = ctx
a.logger.Info("MapleFile desktop application started")
a.securityLog.LogAppLifecycle(securitylog.EventAppStart)
// Check if there's a valid session from a previous run
session, err := a.authService.GetCurrentSession(ctx)
if err != nil {
a.logger.Debug("No existing session on startup", zap.Error(err))
return
}
if session == nil {
a.logger.Info("No session found on startup")
return
}
if !session.IsValid() {
a.logger.Info("Session expired on startup, clearing",
zap.Time("expired_at", session.ExpiresAt))
_ = a.authService.Logout(ctx)
return
}
// Valid session found - restore it
a.logger.Info("Resuming valid session from previous run",
zap.String("user_id", session.UserID),
zap.String("email", utils.MaskEmail(session.Email)),
zap.Time("expires_at", session.ExpiresAt))
// Restore tokens to API client
if err := a.authService.RestoreSession(ctx, session); err != nil {
a.logger.Error("Failed to restore session", zap.Error(err))
return
}
// SECURITY: Validate session with server before fully restoring
// This prevents using stale/revoked sessions from previous runs
if err := a.validateSessionWithServer(ctx, session); err != nil {
a.logger.Warn("Session validation with server failed, clearing session",
zap.String("email", utils.MaskEmail(session.Email)),
zap.Error(err))
_ = a.authService.Logout(ctx)
return
}
a.logger.Info("Session validated with server successfully")
// Initialize user-specific storage for the logged-in user
if err := a.storageManager.InitializeForUser(session.Email); err != nil {
a.logger.Error("Failed to initialize user storage", zap.Error(err))
_ = a.authService.Logout(ctx)
return
}
a.logger.Info("User storage initialized",
zap.String("email", utils.MaskEmail(session.Email)))
// Initialize search index for the logged-in user
if err := a.searchService.Initialize(ctx, session.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
// Don't fail startup if search initialization fails - it's not critical
// The app can still function without search
} else {
a.logger.Info("Search index initialized",
zap.String("email", utils.MaskEmail(session.Email)))
// Rebuild search index from local data in the background
userEmail := session.Email // Capture email before goroutine
go func() {
if err := a.rebuildSearchIndexForUser(userEmail); err != nil {
a.logger.Warn("Failed to rebuild search index on startup", zap.Error(err))
}
}()
}
// Start token manager for automatic refresh
a.tokenManager.Start()
a.logger.Info("Token manager started for resumed session")
// Run background cleanup of deleted files
go a.cleanupDeletedFiles()
}
// validateSessionWithServer validates the stored session by making a request to the server.
// This is a security measure to ensure the session hasn't been revoked server-side.
func (a *Application) validateSessionWithServer(ctx context.Context, session *session.Session) error {
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Set tokens in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Make a lightweight request to validate the token
// GetMe is a good choice as it's a simple authenticated endpoint
_, err := apiClient.GetMe(ctx)
if err != nil {
return fmt.Errorf("server validation failed: %w", err)
}
return nil
}
// Shutdown is called when the app shuts down (Wails lifecycle hook)
func (a *Application) Shutdown(ctx context.Context) {
a.logger.Info("MapleFile desktop application shutting down")
a.securityLog.LogAppLifecycle(securitylog.EventAppShutdown)
// Calculate timeout from Wails context
timeout := 3 * time.Second
if deadline, ok := ctx.Deadline(); ok {
remaining := time.Until(deadline)
if remaining > 500*time.Millisecond {
// Leave 500ms buffer for other cleanup
timeout = remaining - 500*time.Millisecond
} else if remaining > 0 {
timeout = remaining
} else {
timeout = 100 * time.Millisecond
}
}
// Stop token manager gracefully
stopCtx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := a.tokenManager.Stop(stopCtx); err != nil {
a.logger.Error("Token manager shutdown error", zap.Error(err))
}
// Cleanup password store (destroy RAM enclaves)
a.logger.Info("Clearing all passwords from secure RAM")
a.passwordStore.Cleanup()
a.logger.Info("Password cleanup completed")
// Cleanup key cache (destroy cached master keys)
a.logger.Info("Clearing all cached master keys from secure memory")
a.keyCache.Cleanup()
a.logger.Info("Key cache cleanup completed")
// Cleanup search index
a.logger.Info("Closing search index")
if err := a.searchService.Close(); err != nil {
a.logger.Error("Search index close error", zap.Error(err))
} else {
a.logger.Info("Search index closed successfully")
}
// Cleanup user-specific storage
a.logger.Info("Cleaning up user storage")
a.storageManager.Cleanup()
a.logger.Info("User storage cleanup completed")
a.logger.Sync()
}

View file

@ -0,0 +1,227 @@
//go:build wireinject
// +build wireinject
package app
import (
"context"
"os"
"strings"
"github.com/google/wire"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
// Domain imports
sessionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
// Repository imports
sessionRepo "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/session"
// Service imports
authService "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
keyCache "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
passwordStore "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
rateLimiter "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
syncService "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
tokenManager "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
// Use case imports
sessionUC "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
)
// InitializeApplication creates a fully configured Application using Wire DI
func InitializeApplication() (*Application, error) {
wire.Build(
// Infrastructure
ProvideLogger,
config.New,
ProvideMapleFileClient,
// Session Repository (global - not user-specific)
ProvideSessionRepository,
// Storage Manager (handles user-specific storage lifecycle)
storagemanager.ProvideManager,
// Bind *storagemanager.Manager to sync.RepositoryProvider interface
wire.Bind(new(syncService.RepositoryProvider), new(*storagemanager.Manager)),
// Use Case Layer
sessionUC.ProvideCreateUseCase,
sessionUC.ProvideGetByIdUseCase,
sessionUC.ProvideDeleteUseCase,
sessionUC.ProvideSaveUseCase,
// Service Layer
authService.ProvideService,
tokenManager.ProvideManager,
passwordStore.ProvideService,
keyCache.ProvideService,
rateLimiter.ProvideService,
httpclient.ProvideService,
securitylog.ProvideService,
search.New,
// Sync Services
syncService.ProvideCollectionSyncService,
syncService.ProvideFileSyncService,
syncService.ProvideService,
// Application
ProvideApplication,
)
return nil, nil
}
// ProvideLogger creates the application logger with environment-aware configuration.
// Defaults to production mode for security. Development mode must be explicitly enabled.
func ProvideLogger() (*zap.Logger, error) {
mode := os.Getenv("MAPLEFILE_MODE")
// Only use development logger if explicitly set to "dev" or "development"
if mode == "dev" || mode == "development" {
// Development: console format, debug level, with caller and stacktrace
return zap.NewDevelopment()
}
// Default to production: JSON format, info level, no caller info, no stacktrace
// This is the secure default - production mode unless explicitly in dev
cfg := zap.NewProductionConfig()
cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
cfg.DisableCaller = true
cfg.DisableStacktrace = true
return cfg.Build()
}
// ProvideSessionRepository creates the session repository with its storage.
// Session storage is GLOBAL (not user-specific) because it stores the current login session.
func ProvideSessionRepository(logger *zap.Logger) (sessionDomain.Repository, error) {
provider, err := config.NewLevelDBConfigurationProviderForSession()
if err != nil {
return nil, err
}
sessionStorage := leveldb.NewDiskStorage(provider, logger.Named("session-storage"))
return sessionRepo.ProvideRepository(sessionStorage), nil
}
// zapLoggerAdapter adapts *zap.Logger to client.Logger interface
type zapLoggerAdapter struct {
logger *zap.Logger
}
func (a *zapLoggerAdapter) Debug(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Debug(msg, fields...)
}
func (a *zapLoggerAdapter) Info(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Info(msg, fields...)
}
func (a *zapLoggerAdapter) Warn(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Warn(msg, fields...)
}
func (a *zapLoggerAdapter) Error(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Error(msg, fields...)
}
// keysAndValuesToZapFields converts key-value pairs to zap fields
func keysAndValuesToZapFields(keysAndValues ...interface{}) []zap.Field {
fields := make([]zap.Field, 0, len(keysAndValues)/2)
for i := 0; i+1 < len(keysAndValues); i += 2 {
key, ok := keysAndValues[i].(string)
if !ok {
continue
}
fields = append(fields, zap.Any(key, keysAndValues[i+1]))
}
return fields
}
// BuildMode is set at compile time via -ldflags
// Example: go build -ldflags "-X codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/app.BuildMode=dev"
var BuildMode string
// ProvideMapleFileClient creates the backend API client
func ProvideMapleFileClient(configService config.ConfigService, logger *zap.Logger) (*client.Client, error) {
ctx := context.Background()
// Determine the API URL based on the mode
// Priority: 1) Environment variable, 2) Build-time variable, 3) Default to production
mode := os.Getenv("MAPLEFILE_MODE")
// Log the detected mode
logger.Info("Startup: checking mode configuration",
zap.String("MAPLEFILE_MODE_env", mode),
zap.String("BuildMode_compile_time", BuildMode),
)
if mode == "" {
if BuildMode != "" {
mode = BuildMode
logger.Info("Startup: using compile-time BuildMode", zap.String("mode", mode))
} else {
mode = "production" // Default to production (secure default)
logger.Info("Startup: no mode set, defaulting to production", zap.String("mode", mode))
}
}
var baseURL string
switch mode {
case "production":
baseURL = client.ProductionURL // https://maplefile.ca
case "dev", "development":
baseURL = client.LocalURL // http://localhost:8000
default:
// Fallback: check config file for custom URL
cfg, err := configService.GetConfig(ctx)
if err != nil {
return nil, err
}
baseURL = cfg.CloudProviderAddress
}
// Create logger adapter for the API client
clientLogger := &zapLoggerAdapter{logger: logger.Named("api-client")}
// Create client with the determined URL and logger
apiClient := client.New(client.Config{
BaseURL: baseURL,
Logger: clientLogger,
})
logger.Info("MapleFile API client initialized",
zap.String("mode", mode),
zap.String("base_url", baseURL),
)
// Security: Warn if using unencrypted HTTP (should only happen in dev mode)
if strings.HasPrefix(baseURL, "http://") {
logger.Warn("SECURITY WARNING: Using unencrypted HTTP connection",
zap.String("mode", mode),
zap.String("base_url", baseURL),
zap.String("recommendation", "This should only be used for local development"),
)
}
// Update the config to reflect the current backend URL (skip in production as it's immutable)
if mode != "production" {
if err := configService.SetCloudProviderAddress(ctx, baseURL); err != nil {
logger.Warn("Failed to update cloud provider address in config", zap.Error(err))
}
}
return apiClient, nil
}

View file

@ -0,0 +1,197 @@
// Code generated by Wire. DO NOT EDIT.
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
//go:build !wireinject
// +build !wireinject
package app
import (
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
session2 "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
session3 "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
"context"
"go.uber.org/zap"
"os"
"strings"
)
// Injectors from wire.go:
// InitializeApplication creates a fully configured Application using Wire DI
func InitializeApplication() (*Application, error) {
logger, err := ProvideLogger()
if err != nil {
return nil, err
}
configService, err := config.New()
if err != nil {
return nil, err
}
client, err := ProvideMapleFileClient(configService, logger)
if err != nil {
return nil, err
}
repository, err := ProvideSessionRepository(logger)
if err != nil {
return nil, err
}
createUseCase := session.ProvideCreateUseCase(repository)
getByIdUseCase := session.ProvideGetByIdUseCase(repository)
deleteUseCase := session.ProvideDeleteUseCase(repository)
saveUseCase := session.ProvideSaveUseCase(repository)
service := auth.ProvideService(client, createUseCase, getByIdUseCase, deleteUseCase, saveUseCase, logger)
manager := tokenmanager.ProvideManager(client, service, getByIdUseCase, logger)
passwordstoreService := passwordstore.ProvideService(logger)
keycacheService := keycache.ProvideService(logger)
ratelimiterService := ratelimiter.ProvideService()
httpclientService := httpclient.ProvideService()
storagemanagerManager := storagemanager.ProvideManager(logger)
collectionSyncService := sync.ProvideCollectionSyncService(logger, client, storagemanagerManager)
fileSyncService := sync.ProvideFileSyncService(logger, client, storagemanagerManager)
syncService := sync.ProvideService(logger, collectionSyncService, fileSyncService, storagemanagerManager)
securitylogService := securitylog.ProvideService(logger)
searchService := search.New(configService, logger)
application := ProvideApplication(logger, configService, service, manager, passwordstoreService, keycacheService, ratelimiterService, httpclientService, syncService, storagemanagerManager, securitylogService, searchService)
return application, nil
}
// wire.go:
// ProvideLogger creates the application logger with environment-aware configuration.
// Defaults to production mode for security. Development mode must be explicitly enabled.
func ProvideLogger() (*zap.Logger, error) {
mode := os.Getenv("MAPLEFILE_MODE")
if mode == "dev" || mode == "development" {
return zap.NewDevelopment()
}
cfg := zap.NewProductionConfig()
cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
cfg.DisableCaller = true
cfg.DisableStacktrace = true
return cfg.Build()
}
// ProvideSessionRepository creates the session repository with its storage.
// Session storage is GLOBAL (not user-specific) because it stores the current login session.
func ProvideSessionRepository(logger *zap.Logger) (session2.Repository, error) {
provider, err := config.NewLevelDBConfigurationProviderForSession()
if err != nil {
return nil, err
}
sessionStorage := leveldb.NewDiskStorage(provider, logger.Named("session-storage"))
return session3.ProvideRepository(sessionStorage), nil
}
// zapLoggerAdapter adapts *zap.Logger to client.Logger interface
type zapLoggerAdapter struct {
logger *zap.Logger
}
func (a *zapLoggerAdapter) Debug(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Debug(msg, fields...)
}
func (a *zapLoggerAdapter) Info(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Info(msg, fields...)
}
func (a *zapLoggerAdapter) Warn(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Warn(msg, fields...)
}
func (a *zapLoggerAdapter) Error(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Error(msg, fields...)
}
// keysAndValuesToZapFields converts key-value pairs to zap fields
func keysAndValuesToZapFields(keysAndValues ...interface{}) []zap.Field {
fields := make([]zap.Field, 0, len(keysAndValues)/2)
for i := 0; i+1 < len(keysAndValues); i += 2 {
key, ok := keysAndValues[i].(string)
if !ok {
continue
}
fields = append(fields, zap.Any(key, keysAndValues[i+1]))
}
return fields
}
// BuildMode is set at compile time via -ldflags
// Example: go build -ldflags "-X codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/app.BuildMode=dev"
var BuildMode string
// ProvideMapleFileClient creates the backend API client
func ProvideMapleFileClient(configService config.ConfigService, logger *zap.Logger) (*client.Client, error) {
ctx := context.Background()
mode := os.Getenv("MAPLEFILE_MODE")
logger.Info("Startup: checking mode configuration", zap.String("MAPLEFILE_MODE_env", mode), zap.String("BuildMode_compile_time", BuildMode))
if mode == "" {
if BuildMode != "" {
mode = BuildMode
logger.Info("Startup: using compile-time BuildMode", zap.String("mode", mode))
} else {
mode = "production"
logger.Info("Startup: no mode set, defaulting to production", zap.String("mode", mode))
}
}
var baseURL string
switch mode {
case "production":
baseURL = client.ProductionURL
case "dev", "development":
baseURL = client.LocalURL
default:
cfg, err := configService.GetConfig(ctx)
if err != nil {
return nil, err
}
baseURL = cfg.CloudProviderAddress
}
clientLogger := &zapLoggerAdapter{logger: logger.Named("api-client")}
apiClient := client.New(client.Config{
BaseURL: baseURL,
Logger: clientLogger,
})
logger.Info("MapleFile API client initialized", zap.String("mode", mode), zap.String("base_url", baseURL))
if strings.HasPrefix(baseURL, "http://") {
logger.Warn("SECURITY WARNING: Using unencrypted HTTP connection", zap.String("mode", mode), zap.String("base_url", baseURL), zap.String("recommendation", "This should only be used for local development"))
}
if mode != "production" {
if err := configService.SetCloudProviderAddress(ctx, baseURL); err != nil {
logger.Warn("Failed to update cloud provider address in config", zap.Error(err))
}
}
return apiClient, nil
}

View file

@ -0,0 +1,270 @@
// Package config provides a unified API for managing application configuration
// Location: monorepo/native/desktop/maplefile/internal/config/config.go
package config
import (
"context"
"encoding/json"
"os"
"path/filepath"
"sync"
"time"
)
const (
// AppNameBase is the base name of the desktop application
AppNameBase = "maplefile"
// AppNameDev is the app name used in development mode
AppNameDev = "maplefile-dev"
)
// BuildMode is set at compile time via -ldflags
// Example: go build -ldflags "-X codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config.BuildMode=dev"
// This must be set alongside the app.BuildMode for consistent behavior.
var BuildMode string
// GetAppName returns the appropriate app name based on the current mode.
// In dev mode, returns "maplefile-dev" to keep dev and production data separate.
// In production mode (or when mode is not set), returns "maplefile".
func GetAppName() string {
mode := GetBuildMode()
if mode == "dev" || mode == "development" {
return AppNameDev
}
return AppNameBase
}
// GetBuildMode returns the current build mode from environment or compile-time variable.
// Priority: 1) Environment variable, 2) Compile-time variable, 3) Default to production
// This is used early in initialization before the full app is set up.
func GetBuildMode() string {
// Check environment variable first
if mode := os.Getenv("MAPLEFILE_MODE"); mode != "" {
return mode
}
// Check compile-time variable
if BuildMode != "" {
return BuildMode
}
// Default to production (secure default)
return "production"
}
// Config holds all application configuration in a flat structure
type Config struct {
// CloudProviderAddress is the URI backend to make all calls to from this application for E2EE cloud operations.
CloudProviderAddress string `json:"cloud_provider_address"`
Credentials *Credentials `json:"credentials"`
// Desktop-specific settings
WindowWidth int `json:"window_width"`
WindowHeight int `json:"window_height"`
Theme string `json:"theme"` // light, dark, auto
Language string `json:"language"` // en, es, fr, etc.
SyncMode string `json:"sync_mode"` // encrypted_only, hybrid, decrypted_only
AutoSync bool `json:"auto_sync"` // Enable automatic synchronization
SyncIntervalMinutes int `json:"sync_interval_minutes"` // Sync interval in minutes
ShowHiddenFiles bool `json:"show_hidden_files"` // Show hidden files in file manager
DefaultView string `json:"default_view"` // list, grid
SortBy string `json:"sort_by"` // name, date, size, type
SortOrder string `json:"sort_order"` // asc, desc
}
// Credentials holds all user credentials for authentication and authorization.
// Values are decrypted for convenience purposes as we assume threat actor cannot access the decrypted values on the user's device.
type Credentials struct {
// Email is the unique registered email of the user whom successfully logged into the system.
Email string `json:"email"`
AccessToken string `json:"access_token"`
AccessTokenExpiryTime *time.Time `json:"access_token_expiry_time"`
RefreshToken string `json:"refresh_token"`
RefreshTokenExpiryTime *time.Time `json:"refresh_token_expiry_time"`
}
// ConfigService defines the unified interface for all configuration operations
type ConfigService interface {
GetConfig(ctx context.Context) (*Config, error)
GetAppDataDirPath(ctx context.Context) (string, error)
GetCloudProviderAddress(ctx context.Context) (string, error)
SetCloudProviderAddress(ctx context.Context, address string) error
GetLoggedInUserCredentials(ctx context.Context) (*Credentials, error)
SetLoggedInUserCredentials(
ctx context.Context,
email string,
accessToken string,
accessTokenExpiryTime *time.Time,
refreshToken string,
refreshTokenExpiryTime *time.Time,
) error
ClearLoggedInUserCredentials(ctx context.Context) error
// User-specific storage methods
// These return paths that are isolated per user and per environment (dev/production)
GetUserDataDirPath(ctx context.Context, userEmail string) (string, error)
GetUserFilesDirPath(ctx context.Context, userEmail string) (string, error)
GetUserSearchIndexDir(ctx context.Context, userEmail string) (string, error)
GetLoggedInUserEmail(ctx context.Context) (string, error)
// Desktop-specific methods
GetWindowSize(ctx context.Context) (width int, height int, err error)
SetWindowSize(ctx context.Context, width int, height int) error
GetTheme(ctx context.Context) (string, error)
SetTheme(ctx context.Context, theme string) error
GetLanguage(ctx context.Context) (string, error)
SetLanguage(ctx context.Context, language string) error
GetSyncMode(ctx context.Context) (string, error)
SetSyncMode(ctx context.Context, mode string) error
GetAutoSync(ctx context.Context) (bool, error)
SetAutoSync(ctx context.Context, enabled bool) error
GetSyncInterval(ctx context.Context) (int, error)
SetSyncInterval(ctx context.Context, minutes int) error
GetShowHiddenFiles(ctx context.Context) (bool, error)
SetShowHiddenFiles(ctx context.Context, show bool) error
GetDefaultView(ctx context.Context) (string, error)
SetDefaultView(ctx context.Context, view string) error
GetSortPreferences(ctx context.Context) (sortBy string, sortOrder string, err error)
SetSortPreferences(ctx context.Context, sortBy string, sortOrder string) error
}
// repository defines the interface for loading and saving configuration
type repository interface {
// LoadConfig loads the configuration, returning defaults if file doesn't exist
LoadConfig(ctx context.Context) (*Config, error)
// SaveConfig saves the configuration to persistent storage
SaveConfig(ctx context.Context, config *Config) error
}
// configService implements the ConfigService interface
type configService struct {
repo repository
mu sync.RWMutex // Thread safety
}
// fileRepository implements the repository interface with file-based storage
type fileRepository struct {
configPath string
appName string
}
// New creates a new configuration service with default settings
// This is the Wire provider function
func New() (ConfigService, error) {
appName := GetAppName()
repo, err := newFileRepository(appName)
if err != nil {
return nil, err
}
// Wrap with integrity checking
fileRepo := repo.(*fileRepository)
integrityRepo, err := NewIntegrityAwareRepository(repo, appName, fileRepo.configPath)
if err != nil {
// Fall back to basic repository if integrity service fails
// This allows the app to still function
return &configService{
repo: repo,
}, nil
}
return &configService{
repo: integrityRepo,
}, nil
}
// NewForTesting creates a configuration service with the specified repository (for testing)
func NewForTesting(repo repository) ConfigService {
return &configService{
repo: repo,
}
}
// newFileRepository creates a new instance of repository
func newFileRepository(appName string) (repository, error) {
configDir, err := os.UserConfigDir()
if err != nil {
return nil, err
}
// Create app-specific config directory with restrictive permissions (owner only)
// 0700 = owner read/write/execute, no access for group or others
appConfigDir := filepath.Join(configDir, appName)
if err := os.MkdirAll(appConfigDir, 0700); err != nil {
return nil, err
}
configPath := filepath.Join(appConfigDir, "config.json")
return &fileRepository{
configPath: configPath,
appName: appName,
}, nil
}
// LoadConfig loads the configuration from file, or returns defaults if file doesn't exist
func (r *fileRepository) LoadConfig(ctx context.Context) (*Config, error) {
// Check if the config file exists
if _, err := os.Stat(r.configPath); os.IsNotExist(err) {
// Return default config if file doesn't exist
defaults := getDefaultConfig()
// Save the defaults for future use
if err := r.SaveConfig(ctx, defaults); err != nil {
return nil, err
}
return defaults, nil
}
// Read config from file
data, err := os.ReadFile(r.configPath)
if err != nil {
return nil, err
}
var config Config
if err := json.Unmarshal(data, &config); err != nil {
return nil, err
}
return &config, nil
}
// SaveConfig saves the configuration to file with restrictive permissions
func (r *fileRepository) SaveConfig(ctx context.Context, config *Config) error {
data, err := json.MarshalIndent(config, "", " ")
if err != nil {
return err
}
// Use 0600 permissions (owner read/write only) for security
return os.WriteFile(r.configPath, data, 0600)
}
// getDefaultConfig returns the default configuration values
// Note: This function is only called from LoadConfig after the config directory
// has already been created by newFileRepository, so no directory creation needed here.
func getDefaultConfig() *Config {
return &Config{
CloudProviderAddress: "http://localhost:8000",
Credentials: &Credentials{
Email: "", // Leave blank because no user was authenticated.
AccessToken: "", // Leave blank because no user was authenticated.
AccessTokenExpiryTime: nil, // Leave blank because no user was authenticated.
RefreshToken: "", // Leave blank because no user was authenticated.
RefreshTokenExpiryTime: nil, // Leave blank because no user was authenticated.
},
// Desktop-specific defaults
WindowWidth: 1440,
WindowHeight: 900,
Theme: "auto",
Language: "en",
SyncMode: "hybrid",
AutoSync: true,
SyncIntervalMinutes: 30,
ShowHiddenFiles: false,
DefaultView: "list",
SortBy: "name",
SortOrder: "asc",
}
}

View file

@ -0,0 +1,253 @@
package config
import (
"context"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
)
const (
// integrityKeyFile is the filename for the integrity key
integrityKeyFile = ".config_key"
// integrityKeyLength is the length of the HMAC key in bytes
integrityKeyLength = 32
// hmacField is the JSON field name for the HMAC signature
hmacField = "_integrity"
)
// ConfigWithIntegrity wraps a Config with an integrity signature
type ConfigWithIntegrity struct {
Config
Integrity string `json:"_integrity,omitempty"`
}
// IntegrityService provides HMAC-based integrity verification for config files
type IntegrityService struct {
keyPath string
key []byte
}
// NewIntegrityService creates a new integrity service for the given app
func NewIntegrityService(appName string) (*IntegrityService, error) {
configDir, err := os.UserConfigDir()
if err != nil {
return nil, fmt.Errorf("failed to get config directory: %w", err)
}
appConfigDir := filepath.Join(configDir, appName)
keyPath := filepath.Join(appConfigDir, integrityKeyFile)
svc := &IntegrityService{
keyPath: keyPath,
}
// Load or generate key
if err := svc.loadOrGenerateKey(); err != nil {
return nil, fmt.Errorf("failed to initialize integrity key: %w", err)
}
return svc, nil
}
// loadOrGenerateKey loads the HMAC key from file or generates a new one
func (s *IntegrityService) loadOrGenerateKey() error {
// Try to load existing key
data, err := os.ReadFile(s.keyPath)
if err == nil {
// Key exists, decode it
s.key, err = base64.StdEncoding.DecodeString(string(data))
if err != nil || len(s.key) != integrityKeyLength {
// Invalid key, regenerate
return s.generateNewKey()
}
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("failed to read integrity key: %w", err)
}
// Key doesn't exist, generate new one
return s.generateNewKey()
}
// generateNewKey generates a new HMAC key and saves it
func (s *IntegrityService) generateNewKey() error {
s.key = make([]byte, integrityKeyLength)
if _, err := rand.Read(s.key); err != nil {
return fmt.Errorf("failed to generate key: %w", err)
}
// Ensure directory exists with restrictive permissions
if err := os.MkdirAll(filepath.Dir(s.keyPath), 0700); err != nil {
return fmt.Errorf("failed to create key directory: %w", err)
}
// Save key with restrictive permissions (owner read only)
encoded := base64.StdEncoding.EncodeToString(s.key)
if err := os.WriteFile(s.keyPath, []byte(encoded), 0400); err != nil {
return fmt.Errorf("failed to save integrity key: %w", err)
}
return nil
}
// ComputeHMAC computes the HMAC signature for config data
func (s *IntegrityService) ComputeHMAC(config *Config) (string, error) {
// Serialize config without the integrity field
data, err := json.Marshal(config)
if err != nil {
return "", fmt.Errorf("failed to serialize config: %w", err)
}
// Compute HMAC-SHA256
h := hmac.New(sha256.New, s.key)
h.Write(data)
signature := h.Sum(nil)
return base64.StdEncoding.EncodeToString(signature), nil
}
// VerifyHMAC verifies the HMAC signature of config data
func (s *IntegrityService) VerifyHMAC(config *Config, providedHMAC string) error {
// Compute expected HMAC
expectedHMAC, err := s.ComputeHMAC(config)
if err != nil {
return fmt.Errorf("failed to compute HMAC: %w", err)
}
// Decode provided HMAC
providedBytes, err := base64.StdEncoding.DecodeString(providedHMAC)
if err != nil {
return errors.New("invalid HMAC format")
}
expectedBytes, err := base64.StdEncoding.DecodeString(expectedHMAC)
if err != nil {
return errors.New("internal error computing HMAC")
}
// Constant-time comparison to prevent timing attacks
if !hmac.Equal(providedBytes, expectedBytes) {
return errors.New("config integrity check failed: file may have been tampered with")
}
return nil
}
// SignConfig adds an HMAC signature to the config
func (s *IntegrityService) SignConfig(config *Config) (*ConfigWithIntegrity, error) {
signature, err := s.ComputeHMAC(config)
if err != nil {
return nil, err
}
return &ConfigWithIntegrity{
Config: *config,
Integrity: signature,
}, nil
}
// VerifyAndExtractConfig verifies the integrity and returns the config
func (s *IntegrityService) VerifyAndExtractConfig(configWithInt *ConfigWithIntegrity) (*Config, error) {
if configWithInt.Integrity == "" {
// No integrity field - config was created before integrity checking was added
// Allow it but log a warning (caller should handle this)
return &configWithInt.Config, nil
}
// Verify the HMAC
if err := s.VerifyHMAC(&configWithInt.Config, configWithInt.Integrity); err != nil {
return nil, err
}
return &configWithInt.Config, nil
}
// integrityAwareRepository wraps a repository with integrity checking
type integrityAwareRepository struct {
inner repository
integritySvc *IntegrityService
configPath string
warnOnMissingMAC bool // If true, allows configs without MAC (for migration)
}
// NewIntegrityAwareRepository creates a repository wrapper with integrity checking
func NewIntegrityAwareRepository(inner repository, appName string, configPath string) (repository, error) {
integritySvc, err := NewIntegrityService(appName)
if err != nil {
return nil, err
}
return &integrityAwareRepository{
inner: inner,
integritySvc: integritySvc,
configPath: configPath,
warnOnMissingMAC: true, // Allow migration from old configs
}, nil
}
// LoadConfig loads and verifies the config
func (r *integrityAwareRepository) LoadConfig(ctx context.Context) (*Config, error) {
// Check if config file exists
if _, err := os.Stat(r.configPath); os.IsNotExist(err) {
// Load from inner (will create defaults)
config, err := r.inner.LoadConfig(ctx)
if err != nil {
return nil, err
}
// Save with integrity
return config, r.SaveConfig(ctx, config)
}
// Read raw file to check for integrity field
data, err := os.ReadFile(r.configPath)
if err != nil {
return nil, fmt.Errorf("failed to read config: %w", err)
}
// Try to parse with integrity field
var configWithInt ConfigWithIntegrity
if err := json.Unmarshal(data, &configWithInt); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Verify integrity
config, err := r.integritySvc.VerifyAndExtractConfig(&configWithInt)
if err != nil {
return nil, err
}
// If config had no integrity field, save it with one (migration)
if configWithInt.Integrity == "" && r.warnOnMissingMAC {
// Re-save with integrity
_ = r.SaveConfig(ctx, config) // Ignore error, not critical
}
return config, nil
}
// SaveConfig saves the config with integrity signature
func (r *integrityAwareRepository) SaveConfig(ctx context.Context, config *Config) error {
// Sign the config
signedConfig, err := r.integritySvc.SignConfig(config)
if err != nil {
return fmt.Errorf("failed to sign config: %w", err)
}
// Serialize with integrity field
data, err := json.MarshalIndent(signedConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to serialize config: %w", err)
}
// Write with restrictive permissions
return os.WriteFile(r.configPath, data, 0600)
}

View file

@ -0,0 +1,162 @@
// internal/config/leveldb.go
package config
import (
"fmt"
"os"
"path/filepath"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
)
// LevelDB support functions - desktop-specific databases
// These functions return errors instead of using log.Fatalf to allow proper error handling.
//
// Storage is organized as follows:
// - Global storage (session): {appDir}/session/
// - User-specific storage: {appDir}/users/{emailHash}/{dbName}/
//
// This ensures:
// 1. Different users have isolated data
// 2. Dev and production modes have separate directories ({appName} vs {appName}-dev)
// 3. Email addresses are not exposed in directory names (hashed)
// getAppDir returns the application data directory path, creating it if needed.
// Uses 0700 permissions for security (owner read/write/execute only).
// The directory name is mode-aware: "maplefile-dev" for dev mode, "maplefile" for production.
func getAppDir() (string, error) {
configDir, err := os.UserConfigDir()
if err != nil {
return "", fmt.Errorf("failed to get user config directory: %w", err)
}
appName := GetAppName()
appDir := filepath.Join(configDir, appName)
// Ensure the directory exists with restrictive permissions
if err := os.MkdirAll(appDir, 0700); err != nil {
return "", fmt.Errorf("failed to create app directory: %w", err)
}
return appDir, nil
}
// getUserDir returns the user-specific data directory, creating it if needed.
// Returns an error if userEmail is empty (no user logged in).
func getUserDir(userEmail string) (string, error) {
if userEmail == "" {
return "", fmt.Errorf("no user email provided - user must be logged in")
}
appName := GetAppName()
userDir, err := GetUserSpecificDataDir(appName, userEmail)
if err != nil {
return "", fmt.Errorf("failed to get user data directory: %w", err)
}
return userDir, nil
}
// =============================================================================
// GLOBAL STORAGE PROVIDERS (not user-specific)
// =============================================================================
// NewLevelDBConfigurationProviderForSession returns a LevelDB configuration provider for user sessions.
// Session storage is GLOBAL (not per-user) because it stores the current login session.
func NewLevelDBConfigurationProviderForSession() (leveldb.LevelDBConfigurationProvider, error) {
appDir, err := getAppDir()
if err != nil {
return nil, fmt.Errorf("session storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(appDir, "session"), nil
}
// =============================================================================
// USER-SPECIFIC STORAGE PROVIDERS
// These require a logged-in user's email to determine the storage path.
// =============================================================================
// NewLevelDBConfigurationProviderForLocalFilesWithUser returns a LevelDB configuration provider
// for local file metadata, scoped to a specific user.
func NewLevelDBConfigurationProviderForLocalFilesWithUser(userEmail string) (leveldb.LevelDBConfigurationProvider, error) {
userDir, err := getUserDir(userEmail)
if err != nil {
return nil, fmt.Errorf("local files storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(userDir, "local_files"), nil
}
// NewLevelDBConfigurationProviderForSyncStateWithUser returns a LevelDB configuration provider
// for sync state, scoped to a specific user.
func NewLevelDBConfigurationProviderForSyncStateWithUser(userEmail string) (leveldb.LevelDBConfigurationProvider, error) {
userDir, err := getUserDir(userEmail)
if err != nil {
return nil, fmt.Errorf("sync state storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(userDir, "sync_state"), nil
}
// NewLevelDBConfigurationProviderForCacheWithUser returns a LevelDB configuration provider
// for local cache, scoped to a specific user.
func NewLevelDBConfigurationProviderForCacheWithUser(userEmail string) (leveldb.LevelDBConfigurationProvider, error) {
userDir, err := getUserDir(userEmail)
if err != nil {
return nil, fmt.Errorf("cache storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(userDir, "cache"), nil
}
// NewLevelDBConfigurationProviderForUserDataWithUser returns a LevelDB configuration provider
// for user-specific data, scoped to a specific user.
func NewLevelDBConfigurationProviderForUserDataWithUser(userEmail string) (leveldb.LevelDBConfigurationProvider, error) {
userDir, err := getUserDir(userEmail)
if err != nil {
return nil, fmt.Errorf("user data storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(userDir, "user_data"), nil
}
// =============================================================================
// LEGACY FUNCTIONS (deprecated - use user-specific versions instead)
// These exist for backward compatibility during migration.
// =============================================================================
// NewLevelDBConfigurationProviderForCache returns a LevelDB configuration provider for local cache.
// Deprecated: Use NewLevelDBConfigurationProviderForCacheWithUser instead.
func NewLevelDBConfigurationProviderForCache() (leveldb.LevelDBConfigurationProvider, error) {
appDir, err := getAppDir()
if err != nil {
return nil, fmt.Errorf("cache storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(appDir, "cache"), nil
}
// NewLevelDBConfigurationProviderForLocalFiles returns a LevelDB configuration provider for local file metadata.
// Deprecated: Use NewLevelDBConfigurationProviderForLocalFilesWithUser instead.
func NewLevelDBConfigurationProviderForLocalFiles() (leveldb.LevelDBConfigurationProvider, error) {
appDir, err := getAppDir()
if err != nil {
return nil, fmt.Errorf("local files storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(appDir, "local_files"), nil
}
// NewLevelDBConfigurationProviderForSyncState returns a LevelDB configuration provider for sync state.
// Deprecated: Use NewLevelDBConfigurationProviderForSyncStateWithUser instead.
func NewLevelDBConfigurationProviderForSyncState() (leveldb.LevelDBConfigurationProvider, error) {
appDir, err := getAppDir()
if err != nil {
return nil, fmt.Errorf("sync state storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(appDir, "sync_state"), nil
}
// NewLevelDBConfigurationProviderForUser returns a LevelDB configuration provider for user data.
// Deprecated: Use NewLevelDBConfigurationProviderForUserDataWithUser instead.
func NewLevelDBConfigurationProviderForUser() (leveldb.LevelDBConfigurationProvider, error) {
appDir, err := getAppDir()
if err != nil {
return nil, fmt.Errorf("user storage: %w", err)
}
return leveldb.NewLevelDBConfigurationProvider(appDir, "user"), nil
}

View file

@ -0,0 +1,398 @@
// Package config provides a unified API for managing application configuration
// Location: monorepo/native/desktop/maplefile/internal/config/methods.go
package config
import (
"context"
"fmt"
"net/url"
"os"
"strings"
"time"
)
// Implementation of ConfigService methods
// getConfig is an internal method to get the current configuration
func (s *configService) getConfig(ctx context.Context) (*Config, error) {
s.mu.RLock()
defer s.mu.RUnlock()
return s.repo.LoadConfig(ctx)
}
// saveConfig is an internal method to save the configuration
func (s *configService) saveConfig(ctx context.Context, config *Config) error {
s.mu.Lock()
defer s.mu.Unlock()
return s.repo.SaveConfig(ctx, config)
}
// GetConfig returns the complete configuration
func (s *configService) GetConfig(ctx context.Context) (*Config, error) {
return s.getConfig(ctx)
}
// GetAppDataDirPath returns the proper application data directory path
// The directory is mode-aware: "maplefile-dev" for dev mode, "maplefile" for production.
func (s *configService) GetAppDataDirPath(ctx context.Context) (string, error) {
return GetUserDataDir(GetAppName())
}
// GetUserDataDirPath returns the data directory path for a specific user.
// This path is:
// 1. Isolated per user (different users get different directories)
// 2. Isolated per environment (dev vs production)
// 3. Privacy-preserving (email is hashed to create directory name)
//
// Structure: {appDataDir}/users/{emailHash}/
func (s *configService) GetUserDataDirPath(ctx context.Context, userEmail string) (string, error) {
if userEmail == "" {
return "", fmt.Errorf("user email is required")
}
return GetUserSpecificDataDir(GetAppName(), userEmail)
}
// GetUserFilesDirPath returns the directory where decrypted files are stored for a user.
// Files are organized by collection: {userDir}/files/{collectionId}/{filename}
func (s *configService) GetUserFilesDirPath(ctx context.Context, userEmail string) (string, error) {
if userEmail == "" {
return "", fmt.Errorf("user email is required")
}
return GetUserFilesDir(GetAppName(), userEmail)
}
// GetUserSearchIndexDir returns the search index directory path for a specific user.
func (s *configService) GetUserSearchIndexDir(ctx context.Context, userEmail string) (string, error) {
if userEmail == "" {
return "", fmt.Errorf("user email is required")
}
return GetUserSearchIndexDir(GetAppName(), userEmail)
}
// GetLoggedInUserEmail returns the email of the currently logged-in user.
// Returns an empty string if no user is logged in.
func (s *configService) GetLoggedInUserEmail(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
if config.Credentials == nil {
return "", nil
}
return config.Credentials.Email, nil
}
// GetCloudProviderAddress returns the cloud provider address
func (s *configService) GetCloudProviderAddress(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
return config.CloudProviderAddress, nil
}
// SetCloudProviderAddress updates the cloud provider address with security validation.
// In production mode, the address cannot be changed.
// In dev mode, HTTP is allowed for localhost only.
func (s *configService) SetCloudProviderAddress(ctx context.Context, address string) error {
mode := os.Getenv("MAPLEFILE_MODE")
if mode == "" {
mode = "dev"
}
// Security: Block address changes in production mode
if mode == "production" {
return fmt.Errorf("cloud provider address cannot be changed in production mode")
}
// Validate URL format
if err := validateCloudProviderURL(address, mode); err != nil {
return fmt.Errorf("invalid cloud provider address: %w", err)
}
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.CloudProviderAddress = address
return s.saveConfig(ctx, config)
}
// validateCloudProviderURL validates the cloud provider URL based on the current mode.
// Returns an error if the URL is invalid or doesn't meet security requirements.
func validateCloudProviderURL(rawURL string, mode string) error {
if rawURL == "" {
return fmt.Errorf("URL cannot be empty")
}
parsedURL, err := url.Parse(rawURL)
if err != nil {
return fmt.Errorf("malformed URL: %w", err)
}
// Validate scheme
scheme := strings.ToLower(parsedURL.Scheme)
if scheme != "http" && scheme != "https" {
return fmt.Errorf("URL scheme must be http or https, got: %s", scheme)
}
// Validate host is present
if parsedURL.Host == "" {
return fmt.Errorf("URL must have a host")
}
// Security: In dev mode, allow HTTP only for localhost
if mode == "dev" && scheme == "http" {
host := strings.ToLower(parsedURL.Hostname())
if host != "localhost" && host != "127.0.0.1" && !strings.HasPrefix(host, "192.168.") && !strings.HasPrefix(host, "10.") {
return fmt.Errorf("HTTP is only allowed for localhost/local network in dev mode; use HTTPS for remote servers")
}
}
// Reject URLs with credentials embedded
if parsedURL.User != nil {
return fmt.Errorf("URL must not contain embedded credentials")
}
return nil
}
// SetLoggedInUserCredentials updates the authenticated user's credentials
func (s *configService) SetLoggedInUserCredentials(
ctx context.Context,
email string,
accessToken string,
accessTokenExpiryTime *time.Time,
refreshToken string,
refreshTokenExpiryTime *time.Time,
) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.Credentials = &Credentials{
Email: email,
AccessToken: accessToken,
AccessTokenExpiryTime: accessTokenExpiryTime,
RefreshToken: refreshToken,
RefreshTokenExpiryTime: refreshTokenExpiryTime,
}
return s.saveConfig(ctx, config)
}
// GetLoggedInUserCredentials returns the authenticated user's credentials
func (s *configService) GetLoggedInUserCredentials(ctx context.Context) (*Credentials, error) {
config, err := s.getConfig(ctx)
if err != nil {
return nil, err
}
return config.Credentials, nil
}
// ClearLoggedInUserCredentials clears the authenticated user's credentials
func (s *configService) ClearLoggedInUserCredentials(ctx context.Context) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
// Clear credentials by setting them to empty values
config.Credentials = &Credentials{
Email: "",
AccessToken: "",
AccessTokenExpiryTime: nil,
RefreshToken: "",
RefreshTokenExpiryTime: nil,
}
return s.saveConfig(ctx, config)
}
// Desktop-specific methods
// GetWindowSize returns the configured window size
func (s *configService) GetWindowSize(ctx context.Context) (width int, height int, err error) {
config, err := s.getConfig(ctx)
if err != nil {
return 0, 0, err
}
return config.WindowWidth, config.WindowHeight, nil
}
// SetWindowSize updates the window size configuration
func (s *configService) SetWindowSize(ctx context.Context, width int, height int) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.WindowWidth = width
config.WindowHeight = height
return s.saveConfig(ctx, config)
}
// GetTheme returns the configured theme
func (s *configService) GetTheme(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
return config.Theme, nil
}
// SetTheme updates the theme configuration
func (s *configService) SetTheme(ctx context.Context, theme string) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.Theme = theme
return s.saveConfig(ctx, config)
}
// GetLanguage returns the configured language
func (s *configService) GetLanguage(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
return config.Language, nil
}
// SetLanguage updates the language configuration
func (s *configService) SetLanguage(ctx context.Context, language string) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.Language = language
return s.saveConfig(ctx, config)
}
// GetSyncMode returns the configured sync mode
func (s *configService) GetSyncMode(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
return config.SyncMode, nil
}
// SetSyncMode updates the sync mode configuration
func (s *configService) SetSyncMode(ctx context.Context, mode string) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.SyncMode = mode
return s.saveConfig(ctx, config)
}
// GetAutoSync returns whether automatic sync is enabled
func (s *configService) GetAutoSync(ctx context.Context) (bool, error) {
config, err := s.getConfig(ctx)
if err != nil {
return false, err
}
return config.AutoSync, nil
}
// SetAutoSync updates the automatic sync setting
func (s *configService) SetAutoSync(ctx context.Context, enabled bool) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.AutoSync = enabled
return s.saveConfig(ctx, config)
}
// GetSyncInterval returns the sync interval in minutes
func (s *configService) GetSyncInterval(ctx context.Context) (int, error) {
config, err := s.getConfig(ctx)
if err != nil {
return 0, err
}
return config.SyncIntervalMinutes, nil
}
// SetSyncInterval updates the sync interval configuration
func (s *configService) SetSyncInterval(ctx context.Context, minutes int) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.SyncIntervalMinutes = minutes
return s.saveConfig(ctx, config)
}
// GetShowHiddenFiles returns whether hidden files should be shown
func (s *configService) GetShowHiddenFiles(ctx context.Context) (bool, error) {
config, err := s.getConfig(ctx)
if err != nil {
return false, err
}
return config.ShowHiddenFiles, nil
}
// SetShowHiddenFiles updates the show hidden files setting
func (s *configService) SetShowHiddenFiles(ctx context.Context, show bool) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.ShowHiddenFiles = show
return s.saveConfig(ctx, config)
}
// GetDefaultView returns the configured default view
func (s *configService) GetDefaultView(ctx context.Context) (string, error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", err
}
return config.DefaultView, nil
}
// SetDefaultView updates the default view configuration
func (s *configService) SetDefaultView(ctx context.Context, view string) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.DefaultView = view
return s.saveConfig(ctx, config)
}
// GetSortPreferences returns the configured sort preferences
func (s *configService) GetSortPreferences(ctx context.Context) (sortBy string, sortOrder string, err error) {
config, err := s.getConfig(ctx)
if err != nil {
return "", "", err
}
return config.SortBy, config.SortOrder, nil
}
// SetSortPreferences updates the sort preferences
func (s *configService) SetSortPreferences(ctx context.Context, sortBy string, sortOrder string) error {
config, err := s.getConfig(ctx)
if err != nil {
return err
}
config.SortBy = sortBy
config.SortOrder = sortOrder
return s.saveConfig(ctx, config)
}
// Ensure our implementation satisfies the interface
var _ ConfigService = (*configService)(nil)

View file

@ -0,0 +1,175 @@
// internal/config/userdata.go
package config
import (
"crypto/sha256"
"encoding/hex"
"os"
"path/filepath"
"runtime"
"strings"
)
// GetUserDataDir returns the appropriate directory for storing application data
// following platform-specific conventions:
// - Windows: %LOCALAPPDATA%\{appName}
// - macOS: ~/Library/Application Support/{appName}
// - Linux: ~/.local/share/{appName} (or $XDG_DATA_HOME/{appName})
func GetUserDataDir(appName string) (string, error) {
var baseDir string
var err error
switch runtime.GOOS {
case "windows":
// Use LOCALAPPDATA for application data on Windows
baseDir = os.Getenv("LOCALAPPDATA")
if baseDir == "" {
// Fallback to APPDATA if LOCALAPPDATA is not set
baseDir = os.Getenv("APPDATA")
if baseDir == "" {
// Last resort: use UserConfigDir
baseDir, err = os.UserConfigDir()
if err != nil {
return "", err
}
}
}
case "darwin":
// Use ~/Library/Application Support on macOS
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
baseDir = filepath.Join(home, "Library", "Application Support")
default:
// Linux and other Unix-like systems
// Follow XDG Base Directory Specification
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
baseDir = xdgData
} else {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
baseDir = filepath.Join(home, ".local", "share")
}
}
// Combine with app name
appDataDir := filepath.Join(baseDir, appName)
// Create the directory if it doesn't exist with restrictive permissions
if err := os.MkdirAll(appDataDir, 0700); err != nil {
return "", err
}
return appDataDir, nil
}
// GetUserSpecificDataDir returns the data directory for a specific user.
// User data is isolated by hashing the email to create a unique directory name.
// This ensures:
// 1. Different users have completely separate storage
// 2. Email addresses are not exposed in directory names
// 3. The same user always gets the same directory
//
// Directory structure:
//
// {appDataDir}/users/{emailHash}/
// ├── local_files/ # File and collection metadata (LevelDB)
// ├── sync_state/ # Sync state (LevelDB)
// ├── cache/ # Application cache (LevelDB)
// └── files/ # Downloaded decrypted files
// └── {collectionId}/
// └── {filename}
func GetUserSpecificDataDir(appName, userEmail string) (string, error) {
if userEmail == "" {
return "", nil // No user logged in, return empty
}
appDataDir, err := GetUserDataDir(appName)
if err != nil {
return "", err
}
// Hash the email to create a privacy-preserving directory name
emailHash := hashEmail(userEmail)
// Create user-specific directory
userDir := filepath.Join(appDataDir, "users", emailHash)
// Create the directory with restrictive permissions (owner only)
if err := os.MkdirAll(userDir, 0700); err != nil {
return "", err
}
return userDir, nil
}
// GetUserFilesDir returns the directory where decrypted files are stored for a user.
// Files are organized by collection: {userDir}/files/{collectionId}/{filename}
func GetUserFilesDir(appName, userEmail string) (string, error) {
userDir, err := GetUserSpecificDataDir(appName, userEmail)
if err != nil {
return "", err
}
if userDir == "" {
return "", nil // No user logged in
}
filesDir := filepath.Join(userDir, "files")
// Create with restrictive permissions
if err := os.MkdirAll(filesDir, 0700); err != nil {
return "", err
}
return filesDir, nil
}
// hashEmail creates a SHA256 hash of the email address (lowercase, trimmed).
// Returns a shortened hash (first 16 characters) for more readable directory names
// while still maintaining uniqueness.
func hashEmail(email string) string {
// Normalize email: lowercase and trim whitespace
normalizedEmail := strings.ToLower(strings.TrimSpace(email))
// Create SHA256 hash
hash := sha256.Sum256([]byte(normalizedEmail))
// Return first 16 characters of hex representation (64 bits of entropy is sufficient)
return hex.EncodeToString(hash[:])[:16]
}
// GetEmailHashForPath returns the hash that would be used for a user's directory.
// This can be used to check if a user's data exists without revealing the email.
func GetEmailHashForPath(userEmail string) string {
if userEmail == "" {
return ""
}
return hashEmail(userEmail)
}
// GetUserSearchIndexDir returns the directory where the Bleve search index is stored.
// Returns: {userDir}/search/index.bleve
func GetUserSearchIndexDir(appName, userEmail string) (string, error) {
userDir, err := GetUserSpecificDataDir(appName, userEmail)
if err != nil {
return "", err
}
if userDir == "" {
return "", nil // No user logged in
}
searchIndexPath := filepath.Join(userDir, "search", "index.bleve")
// Create parent directory with restrictive permissions
searchDir := filepath.Join(userDir, "search")
if err := os.MkdirAll(searchDir, 0700); err != nil {
return "", err
}
return searchIndexPath, nil
}

View file

@ -0,0 +1,28 @@
package collection
// Repository defines the data access operations for collections
type Repository interface {
// Create stores a new collection record
Create(collection *Collection) error
// Get retrieves a collection by its ID
Get(id string) (*Collection, error)
// Update modifies an existing collection record
Update(collection *Collection) error
// Delete removes a collection record by its ID
Delete(id string) error
// List returns all collection records
List() ([]*Collection, error)
// ListByParent returns all collections with a specific parent ID
ListByParent(parentID string) ([]*Collection, error)
// ListRoot returns all root-level collections (no parent)
ListRoot() ([]*Collection, error)
// Exists checks if a collection with the given ID exists
Exists(id string) (bool, error)
}

View file

@ -0,0 +1,98 @@
package collection
import "time"
// Collection represents a collection (folder/album) stored locally with sync capabilities.
type Collection struct {
// Identifiers (from cloud)
ID string `json:"id"`
ParentID string `json:"parent_id,omitempty"`
OwnerID string `json:"owner_id"` // UserID from cloud
// Encryption data (from cloud)
EncryptedCollectionKey string `json:"encrypted_collection_key"`
Nonce string `json:"nonce"`
// Collection metadata (from cloud - name is decrypted client-side)
Name string `json:"name"` // Decrypted name
Description string `json:"description,omitempty"` // Optional description
// CustomIcon is the decrypted custom icon for this collection.
// Empty string means use default folder/album icon.
// Contains either an emoji character (e.g., "📷") or "icon:<identifier>" for predefined icons.
CustomIcon string `json:"custom_icon,omitempty"`
// Statistics (from cloud)
TotalFiles int `json:"total_files"`
TotalSizeInBytes int64 `json:"total_size_in_bytes"`
// Sharing info (from cloud)
PermissionLevel string `json:"permission_level,omitempty"` // read_only, read_write, admin
IsOwner bool `json:"is_owner"`
OwnerName string `json:"owner_name,omitempty"`
OwnerEmail string `json:"owner_email,omitempty"`
// Sync tracking (local only)
SyncStatus SyncStatus `json:"sync_status"`
LastSyncedAt time.Time `json:"last_synced_at,omitempty"`
// State from cloud
State string `json:"state"` // active, deleted
// Timestamps (from cloud)
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
}
// SyncStatus defines the synchronization status of a collection
type SyncStatus int
const (
// SyncStatusCloudOnly indicates the collection metadata is synced from cloud
SyncStatusCloudOnly SyncStatus = iota
// SyncStatusSynced indicates the collection is fully synchronized
SyncStatusSynced
)
// String returns a human-readable string representation of the sync status
func (s SyncStatus) String() string {
switch s {
case SyncStatusCloudOnly:
return "cloud_only"
case SyncStatusSynced:
return "synced"
default:
return "unknown"
}
}
// Collection state constants
const (
// StateActive indicates the collection is active
StateActive = "active"
// StateDeleted indicates the collection is deleted
StateDeleted = "deleted"
)
// Permission level constants
const (
PermissionReadOnly = "read_only"
PermissionReadWrite = "read_write"
PermissionAdmin = "admin"
)
// IsDeleted returns true if the collection is marked as deleted
func (c *Collection) IsDeleted() bool {
return c.State == StateDeleted
}
// CanWrite returns true if the user has write permissions
func (c *Collection) CanWrite() bool {
return c.IsOwner || c.PermissionLevel == PermissionReadWrite || c.PermissionLevel == PermissionAdmin
}
// CanAdmin returns true if the user has admin permissions
func (c *Collection) CanAdmin() bool {
return c.IsOwner || c.PermissionLevel == PermissionAdmin
}

View file

@ -0,0 +1,58 @@
package file
// SyncStatus defines the synchronization status of a file
type SyncStatus int
const (
// SyncStatusLocalOnly indicates the file exists only locally (not uploaded to cloud)
SyncStatusLocalOnly SyncStatus = iota
// SyncStatusCloudOnly indicates the file exists only in the cloud (metadata synced, content not downloaded)
SyncStatusCloudOnly
// SyncStatusSynced indicates the file exists both locally and in the cloud and is synchronized
SyncStatusSynced
// SyncStatusModifiedLocally indicates the file exists in both places but has local changes pending upload
SyncStatusModifiedLocally
)
// String returns a human-readable string representation of the sync status
func (s SyncStatus) String() string {
switch s {
case SyncStatusLocalOnly:
return "local_only"
case SyncStatusCloudOnly:
return "cloud_only"
case SyncStatusSynced:
return "synced"
case SyncStatusModifiedLocally:
return "modified_locally"
default:
return "unknown"
}
}
// Storage mode constants define which file versions to keep locally
const (
// StorageModeEncryptedOnly - Only keep encrypted version locally (most secure)
StorageModeEncryptedOnly = "encrypted_only"
// StorageModeDecryptedOnly - Only keep decrypted version locally (not recommended)
StorageModeDecryptedOnly = "decrypted_only"
// StorageModeHybrid - Keep both encrypted and decrypted versions (default, convenient)
StorageModeHybrid = "hybrid"
)
// File state constants
const (
// StatePending is the initial state of a file before it is uploaded
StatePending = "pending"
// StateActive indicates that the file is fully uploaded and ready for use
StateActive = "active"
// StateDeleted marks the file as deleted
StateDeleted = "deleted"
)

View file

@ -0,0 +1,28 @@
package file
// Repository defines the data access operations for files
type Repository interface {
// Create stores a new file record
Create(file *File) error
// Get retrieves a file by its ID
Get(id string) (*File, error)
// Update modifies an existing file record
Update(file *File) error
// Delete removes a file record by its ID
Delete(id string) error
// List returns all file records
List() ([]*File, error)
// ListByCollection returns all files belonging to a specific collection
ListByCollection(collectionID string) ([]*File, error)
// ListByStatus returns all files with a specific sync status
ListByStatus(status SyncStatus) ([]*File, error)
// Exists checks if a file with the given ID exists
Exists(id string) (bool, error)
}

View file

@ -0,0 +1,88 @@
package file
import "time"
// File represents a file stored locally with sync capabilities.
// This model combines cloud metadata with local storage tracking.
type File struct {
// Identifiers (from cloud)
ID string `json:"id"`
CollectionID string `json:"collection_id"`
OwnerID string `json:"owner_id"` // UserID from cloud
// Encryption data (from cloud API response)
EncryptedFileKey EncryptedFileKeyData `json:"encrypted_file_key"`
FileKeyNonce string `json:"file_key_nonce"`
EncryptedMetadata string `json:"encrypted_metadata"`
MetadataNonce string `json:"metadata_nonce"`
FileNonce string `json:"file_nonce"`
// File sizes (from cloud)
EncryptedSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
DecryptedSizeInBytes int64 `json:"decrypted_size_in_bytes,omitempty"`
// Local storage paths (local only)
EncryptedFilePath string `json:"encrypted_file_path,omitempty"`
FilePath string `json:"file_path,omitempty"`
ThumbnailPath string `json:"thumbnail_path,omitempty"`
// Decrypted metadata (local only - populated after decryption)
Name string `json:"name,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Metadata *FileMetadata `json:"metadata,omitempty"`
// Sync tracking (local only)
SyncStatus SyncStatus `json:"sync_status"`
LastSyncedAt time.Time `json:"last_synced_at,omitempty"`
// State from cloud
State string `json:"state"` // pending, active, deleted
StorageMode string `json:"storage_mode"` // encrypted_only, hybrid, decrypted_only
Version int `json:"version"` // Cloud version for conflict resolution
// Timestamps (from cloud)
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
// Thumbnail URL (from cloud, for remote access)
ThumbnailURL string `json:"thumbnail_url,omitempty"`
}
// EncryptedFileKeyData matches the cloud API structure exactly
type EncryptedFileKeyData struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
}
// FileMetadata represents decrypted file metadata (populated after decryption)
type FileMetadata struct {
Name string `json:"name"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
FileExtension string `json:"file_extension"`
}
// IsCloudOnly returns true if the file only exists in the cloud
func (f *File) IsCloudOnly() bool {
return f.SyncStatus == SyncStatusCloudOnly
}
// IsSynced returns true if the file is synchronized between local and cloud
func (f *File) IsSynced() bool {
return f.SyncStatus == SyncStatusSynced
}
// IsLocalOnly returns true if the file only exists locally
func (f *File) IsLocalOnly() bool {
return f.SyncStatus == SyncStatusLocalOnly
}
// HasLocalContent returns true if the file has local content (not just metadata)
func (f *File) HasLocalContent() bool {
return f.FilePath != "" || f.EncryptedFilePath != ""
}
// IsDeleted returns true if the file is marked as deleted
func (f *File) IsDeleted() bool {
return f.State == StateDeleted
}

View file

@ -0,0 +1,16 @@
package session
// Repository interface defines data access operations for sessions
type Repository interface {
// Save stores a session
Save(session *Session) error
// Get retrieves the current session
Get() (*Session, error)
// Delete removes the current session
Delete() error
// Exists checks if a session exists
Exists() (bool, error)
}

View file

@ -0,0 +1,30 @@
package session
import "time"
// Session represents a user authentication session (domain entity)
type Session struct {
UserID string
Email string
AccessToken string
RefreshToken string
ExpiresAt time.Time
CreatedAt time.Time
// Encrypted user data for password verification (stored during login)
Salt string // Base64 encoded salt for password derivation
EncryptedMasterKey string // Base64 encoded encrypted master key
EncryptedPrivateKey string // Base64 encoded encrypted private key
PublicKey string // Base64 encoded public key
KDFAlgorithm string // Key derivation algorithm: "PBKDF2-SHA256"
}
// IsExpired checks if the session has expired
func (s *Session) IsExpired() bool {
return time.Now().After(s.ExpiresAt)
}
// IsValid checks if the session is valid (not expired and has tokens)
func (s *Session) IsValid() bool {
return !s.IsExpired() && s.AccessToken != "" && s.RefreshToken != ""
}

View file

@ -0,0 +1,13 @@
package syncstate
// Repository defines the data access operations for sync state
type Repository interface {
// Get retrieves the current sync state
Get() (*SyncState, error)
// Save persists the sync state
Save(state *SyncState) error
// Reset clears the sync state (for fresh sync)
Reset() error
}

View file

@ -0,0 +1,77 @@
package syncstate
import "time"
// SyncState tracks the synchronization progress for collections and files.
// It stores cursors from the API for incremental sync and timestamps for tracking.
type SyncState struct {
// Timestamps for tracking when sync occurred
LastCollectionSync time.Time `json:"last_collection_sync"`
LastFileSync time.Time `json:"last_file_sync"`
// Cursors from API responses (used for pagination)
CollectionCursor string `json:"collection_cursor,omitempty"`
FileCursor string `json:"file_cursor,omitempty"`
// Sync completion flags
CollectionSyncComplete bool `json:"collection_sync_complete"`
FileSyncComplete bool `json:"file_sync_complete"`
}
// NewSyncState creates a new empty SyncState
func NewSyncState() *SyncState {
return &SyncState{}
}
// IsCollectionSyncComplete returns true if all collections have been synced
func (s *SyncState) IsCollectionSyncComplete() bool {
return s.CollectionSyncComplete
}
// IsFileSyncComplete returns true if all files have been synced
func (s *SyncState) IsFileSyncComplete() bool {
return s.FileSyncComplete
}
// IsFullySynced returns true if both collections and files are fully synced
func (s *SyncState) IsFullySynced() bool {
return s.CollectionSyncComplete && s.FileSyncComplete
}
// ResetCollectionSync resets the collection sync state for a fresh sync
func (s *SyncState) ResetCollectionSync() {
s.CollectionCursor = ""
s.CollectionSyncComplete = false
s.LastCollectionSync = time.Time{}
}
// ResetFileSync resets the file sync state for a fresh sync
func (s *SyncState) ResetFileSync() {
s.FileCursor = ""
s.FileSyncComplete = false
s.LastFileSync = time.Time{}
}
// Reset resets both collection and file sync states
func (s *SyncState) Reset() {
s.ResetCollectionSync()
s.ResetFileSync()
}
// UpdateCollectionSync updates the collection sync state after a sync operation
func (s *SyncState) UpdateCollectionSync(cursor string, hasMore bool) {
s.CollectionCursor = cursor
s.CollectionSyncComplete = !hasMore
if !hasMore {
s.LastCollectionSync = time.Now()
}
}
// UpdateFileSync updates the file sync state after a sync operation
func (s *SyncState) UpdateFileSync(cursor string, hasMore bool) {
s.FileCursor = cursor
s.FileSyncComplete = !hasMore
if !hasMore {
s.LastFileSync = time.Now()
}
}

View file

@ -0,0 +1,10 @@
package user
// Repository defines the interface for user data persistence
type Repository interface {
Save(user *User) error
GetByID(id string) (*User, error)
GetByEmail(email string) (*User, error)
Delete(id string) error
Exists(id string) (bool, error)
}

View file

@ -0,0 +1,19 @@
package user
import "time"
// User represents a MapleFile user profile stored locally
type User struct {
ID string
Email string
FirstName string
LastName string
StorageQuotaBytes int64
CreatedAt time.Time
UpdatedAt time.Time
}
// IsValid checks if the user has the minimum required fields
func (u *User) IsValid() bool {
return u.ID != "" && u.Email != ""
}

View file

@ -0,0 +1,212 @@
package collection
import (
"encoding/json"
"fmt"
"strings"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
const (
collectionKeyPrefix = "collection:"
parentCollIndex = "parent_collection_index:"
rootCollIndex = "root_collection_index:"
)
type repository struct {
storage storage.Storage
}
// ProvideRepository creates a new collection repository for Wire
func ProvideRepository(storage storage.Storage) collection.Repository {
return &repository{storage: storage}
}
func (r *repository) Create(c *collection.Collection) error {
return r.save(c)
}
func (r *repository) Get(id string) (*collection.Collection, error) {
key := collectionKeyPrefix + id
data, err := r.storage.Get(key)
if err != nil {
return nil, fmt.Errorf("failed to get collection: %w", err)
}
if data == nil {
return nil, nil
}
var c collection.Collection
if err := json.Unmarshal(data, &c); err != nil {
return nil, fmt.Errorf("failed to unmarshal collection: %w", err)
}
return &c, nil
}
func (r *repository) Update(c *collection.Collection) error {
// Get existing collection to clean up old indexes if parent changed
existing, err := r.Get(c.ID)
if err != nil {
return fmt.Errorf("failed to get existing collection: %w", err)
}
if existing != nil && existing.ParentID != c.ParentID {
// Clean up old parent index
if existing.ParentID == "" {
oldRootKey := rootCollIndex + existing.ID
_ = r.storage.Delete(oldRootKey)
} else {
oldParentKey := parentCollIndex + existing.ParentID + ":" + existing.ID
_ = r.storage.Delete(oldParentKey)
}
}
return r.save(c)
}
func (r *repository) Delete(id string) error {
// Get collection first to remove indexes
c, err := r.Get(id)
if err != nil {
return err
}
if c == nil {
return nil // Nothing to delete
}
// Delete parent index or root index
if c.ParentID == "" {
rootKey := rootCollIndex + id
if err := r.storage.Delete(rootKey); err != nil {
return fmt.Errorf("failed to delete root index: %w", err)
}
} else {
parentKey := parentCollIndex + c.ParentID + ":" + id
if err := r.storage.Delete(parentKey); err != nil {
return fmt.Errorf("failed to delete parent index: %w", err)
}
}
// Delete collection
collKey := collectionKeyPrefix + id
if err := r.storage.Delete(collKey); err != nil {
return fmt.Errorf("failed to delete collection: %w", err)
}
return nil
}
func (r *repository) List() ([]*collection.Collection, error) {
var collections []*collection.Collection
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, collectionKeyPrefix) {
var c collection.Collection
if err := json.Unmarshal(value, &c); err != nil {
return fmt.Errorf("failed to unmarshal collection: %w", err)
}
collections = append(collections, &c)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list collections: %w", err)
}
return collections, nil
}
func (r *repository) ListByParent(parentID string) ([]*collection.Collection, error) {
var collections []*collection.Collection
prefix := parentCollIndex + parentID + ":"
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, prefix) {
// Extract collection ID from index key
collID := strings.TrimPrefix(keyStr, prefix)
c, err := r.Get(collID)
if err != nil {
return err
}
if c != nil {
collections = append(collections, c)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list collections by parent: %w", err)
}
return collections, nil
}
func (r *repository) ListRoot() ([]*collection.Collection, error) {
var collections []*collection.Collection
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, rootCollIndex) {
// Extract collection ID from index key
collID := strings.TrimPrefix(keyStr, rootCollIndex)
c, err := r.Get(collID)
if err != nil {
return err
}
if c != nil {
collections = append(collections, c)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list root collections: %w", err)
}
return collections, nil
}
func (r *repository) Exists(id string) (bool, error) {
key := collectionKeyPrefix + id
data, err := r.storage.Get(key)
if err != nil {
return false, fmt.Errorf("failed to check collection existence: %w", err)
}
return data != nil, nil
}
// save persists the collection and maintains indexes
func (r *repository) save(c *collection.Collection) error {
data, err := json.Marshal(c)
if err != nil {
return fmt.Errorf("failed to marshal collection: %w", err)
}
// Save collection by ID
collKey := collectionKeyPrefix + c.ID
if err := r.storage.Set(collKey, data); err != nil {
return fmt.Errorf("failed to save collection: %w", err)
}
// Create parent index (for ListByParent) or root index (for ListRoot)
if c.ParentID == "" {
rootKey := rootCollIndex + c.ID
if err := r.storage.Set(rootKey, []byte(c.ID)); err != nil {
return fmt.Errorf("failed to create root index: %w", err)
}
} else {
parentKey := parentCollIndex + c.ParentID + ":" + c.ID
if err := r.storage.Set(parentKey, []byte(c.ID)); err != nil {
return fmt.Errorf("failed to create parent index: %w", err)
}
}
return nil
}

View file

@ -0,0 +1,213 @@
package file
import (
"encoding/json"
"fmt"
"strings"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
)
const (
fileKeyPrefix = "file:"
collectionFileIndex = "collection_file_index:"
statusFileIndex = "status_file_index:"
)
type repository struct {
storage storage.Storage
}
// ProvideRepository creates a new file repository for Wire
func ProvideRepository(storage storage.Storage) file.Repository {
return &repository{storage: storage}
}
func (r *repository) Create(f *file.File) error {
return r.save(f)
}
func (r *repository) Get(id string) (*file.File, error) {
key := fileKeyPrefix + id
data, err := r.storage.Get(key)
if err != nil {
return nil, fmt.Errorf("failed to get file: %w", err)
}
if data == nil {
return nil, nil
}
var f file.File
if err := json.Unmarshal(data, &f); err != nil {
return nil, fmt.Errorf("failed to unmarshal file: %w", err)
}
return &f, nil
}
func (r *repository) Update(f *file.File) error {
// Get existing file to clean up old indexes if collection changed
existing, err := r.Get(f.ID)
if err != nil {
return fmt.Errorf("failed to get existing file: %w", err)
}
if existing != nil {
// Clean up old collection index if collection changed
if existing.CollectionID != f.CollectionID {
oldIndexKey := collectionFileIndex + existing.CollectionID + ":" + existing.ID
_ = r.storage.Delete(oldIndexKey)
}
// Clean up old status index if status changed
if existing.SyncStatus != f.SyncStatus {
oldStatusKey := statusFileIndex + existing.SyncStatus.String() + ":" + existing.ID
_ = r.storage.Delete(oldStatusKey)
}
}
return r.save(f)
}
func (r *repository) Delete(id string) error {
// Get file first to remove indexes
f, err := r.Get(id)
if err != nil {
return err
}
if f == nil {
return nil // Nothing to delete
}
// Delete collection index
collIndexKey := collectionFileIndex + f.CollectionID + ":" + id
if err := r.storage.Delete(collIndexKey); err != nil {
return fmt.Errorf("failed to delete collection index: %w", err)
}
// Delete status index
statusKey := statusFileIndex + f.SyncStatus.String() + ":" + id
if err := r.storage.Delete(statusKey); err != nil {
return fmt.Errorf("failed to delete status index: %w", err)
}
// Delete file
fileKey := fileKeyPrefix + id
if err := r.storage.Delete(fileKey); err != nil {
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
func (r *repository) List() ([]*file.File, error) {
var files []*file.File
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, fileKeyPrefix) {
var f file.File
if err := json.Unmarshal(value, &f); err != nil {
return fmt.Errorf("failed to unmarshal file: %w", err)
}
files = append(files, &f)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
return files, nil
}
func (r *repository) ListByCollection(collectionID string) ([]*file.File, error) {
var files []*file.File
prefix := collectionFileIndex + collectionID + ":"
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, prefix) {
// Extract file ID from index key
fileID := strings.TrimPrefix(keyStr, prefix)
f, err := r.Get(fileID)
if err != nil {
return err
}
if f != nil {
files = append(files, f)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list files by collection: %w", err)
}
return files, nil
}
func (r *repository) ListByStatus(status file.SyncStatus) ([]*file.File, error) {
var files []*file.File
prefix := statusFileIndex + status.String() + ":"
err := r.storage.Iterate(func(key, value []byte) error {
keyStr := string(key)
if strings.HasPrefix(keyStr, prefix) {
// Extract file ID from index key
fileID := strings.TrimPrefix(keyStr, prefix)
f, err := r.Get(fileID)
if err != nil {
return err
}
if f != nil {
files = append(files, f)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list files by status: %w", err)
}
return files, nil
}
func (r *repository) Exists(id string) (bool, error) {
key := fileKeyPrefix + id
data, err := r.storage.Get(key)
if err != nil {
return false, fmt.Errorf("failed to check file existence: %w", err)
}
return data != nil, nil
}
// save persists the file and maintains indexes
func (r *repository) save(f *file.File) error {
data, err := json.Marshal(f)
if err != nil {
return fmt.Errorf("failed to marshal file: %w", err)
}
// Save file by ID
fileKey := fileKeyPrefix + f.ID
if err := r.storage.Set(fileKey, data); err != nil {
return fmt.Errorf("failed to save file: %w", err)
}
// Create collection index (for ListByCollection)
collIndexKey := collectionFileIndex + f.CollectionID + ":" + f.ID
if err := r.storage.Set(collIndexKey, []byte(f.ID)); err != nil {
return fmt.Errorf("failed to create collection index: %w", err)
}
// Create status index (for ListByStatus)
statusKey := statusFileIndex + f.SyncStatus.String() + ":" + f.ID
if err := r.storage.Set(statusKey, []byte(f.ID)); err != nil {
return fmt.Errorf("failed to create status index: %w", err)
}
return nil
}

View file

@ -0,0 +1,55 @@
package session
import (
"encoding/json"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
)
const sessionKey = "current_session"
type repository struct {
storage storage.Storage
}
// ProvideRepository creates a session repository for Wire
func ProvideRepository(storage storage.Storage) session.Repository {
return &repository{storage: storage}
}
func (r *repository) Save(sess *session.Session) error {
data, err := json.Marshal(sess)
if err != nil {
return err
}
return r.storage.Set(sessionKey, data)
}
func (r *repository) Get() (*session.Session, error) {
data, err := r.storage.Get(sessionKey)
if err != nil {
return nil, err
}
if data == nil {
return nil, nil
}
var sess session.Session
if err := json.Unmarshal(data, &sess); err != nil {
return nil, err
}
return &sess, nil
}
func (r *repository) Delete() error {
return r.storage.Delete(sessionKey)
}
func (r *repository) Exists() (bool, error) {
data, err := r.storage.Get(sessionKey)
if err != nil {
return false, err
}
return data != nil, nil
}

View file

@ -0,0 +1,58 @@
package syncstate
import (
"encoding/json"
"fmt"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
)
const syncStateKey = "sync_state"
type repository struct {
storage storage.Storage
}
// ProvideRepository creates a new syncstate repository for Wire
func ProvideRepository(storage storage.Storage) syncstate.Repository {
return &repository{storage: storage}
}
func (r *repository) Get() (*syncstate.SyncState, error) {
data, err := r.storage.Get(syncStateKey)
if err != nil {
return nil, fmt.Errorf("failed to get sync state: %w", err)
}
if data == nil {
// Return empty sync state if none exists
return syncstate.NewSyncState(), nil
}
var state syncstate.SyncState
if err := json.Unmarshal(data, &state); err != nil {
return nil, fmt.Errorf("failed to unmarshal sync state: %w", err)
}
return &state, nil
}
func (r *repository) Save(state *syncstate.SyncState) error {
data, err := json.Marshal(state)
if err != nil {
return fmt.Errorf("failed to marshal sync state: %w", err)
}
if err := r.storage.Set(syncStateKey, data); err != nil {
return fmt.Errorf("failed to save sync state: %w", err)
}
return nil
}
func (r *repository) Reset() error {
if err := r.storage.Delete(syncStateKey); err != nil {
return fmt.Errorf("failed to reset sync state: %w", err)
}
return nil
}

View file

@ -0,0 +1,105 @@
package user
import (
"encoding/json"
"fmt"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/user"
)
const (
userKeyPrefix = "user:"
emailKeyIndex = "email_index:"
)
type repository struct {
storage storage.Storage
}
// ProvideRepository creates a new user repository
func ProvideRepository(storage storage.Storage) user.Repository {
return &repository{
storage: storage,
}
}
func (r *repository) Save(u *user.User) error {
data, err := json.Marshal(u)
if err != nil {
return fmt.Errorf("failed to marshal user: %w", err)
}
// Save user by ID
userKey := userKeyPrefix + u.ID
if err := r.storage.Set(userKey, data); err != nil {
return fmt.Errorf("failed to save user: %w", err)
}
// Create email index
emailKey := emailKeyIndex + u.Email
if err := r.storage.Set(emailKey, []byte(u.ID)); err != nil {
return fmt.Errorf("failed to create email index: %w", err)
}
return nil
}
func (r *repository) GetByID(id string) (*user.User, error) {
key := userKeyPrefix + id
data, err := r.storage.Get(key)
if err != nil {
return nil, fmt.Errorf("failed to get user: %w", err)
}
var u user.User
if err := json.Unmarshal(data, &u); err != nil {
return nil, fmt.Errorf("failed to unmarshal user: %w", err)
}
return &u, nil
}
func (r *repository) GetByEmail(email string) (*user.User, error) {
// Get user ID from email index
emailKey := emailKeyIndex + email
idData, err := r.storage.Get(emailKey)
if err != nil {
return nil, fmt.Errorf("user not found by email: %w", err)
}
userID := string(idData)
return r.GetByID(userID)
}
func (r *repository) Delete(id string) error {
// Get user first to remove email index
u, err := r.GetByID(id)
if err != nil {
return err
}
// Delete email index
emailKey := emailKeyIndex + u.Email
if err := r.storage.Delete(emailKey); err != nil {
return fmt.Errorf("failed to delete email index: %w", err)
}
// Delete user
userKey := userKeyPrefix + id
if err := r.storage.Delete(userKey); err != nil {
return fmt.Errorf("failed to delete user: %w", err)
}
return nil
}
func (r *repository) Exists(id string) (bool, error) {
key := userKeyPrefix + id
_, err := r.storage.Get(key)
if err != nil {
// Key doesn't exist
return false, nil
}
return true, nil
}

View file

@ -0,0 +1,281 @@
package auth
import (
"context"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
domainSession "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
type Service struct {
apiClient *client.Client
createSessionUC *session.CreateUseCase
getSessionUC *session.GetByIdUseCase
deleteSessionUC *session.DeleteUseCase
saveSessionUC *session.SaveUseCase
logger *zap.Logger
}
// ProvideService creates the auth service for Wire
func ProvideService(
apiClient *client.Client,
createSessionUC *session.CreateUseCase,
getSessionUC *session.GetByIdUseCase,
deleteSessionUC *session.DeleteUseCase,
saveSessionUC *session.SaveUseCase,
logger *zap.Logger,
) *Service {
svc := &Service{
apiClient: apiClient,
createSessionUC: createSessionUC,
getSessionUC: getSessionUC,
deleteSessionUC: deleteSessionUC,
saveSessionUC: saveSessionUC,
logger: logger.Named("auth-service"),
}
// Set up token refresh callback to persist new tokens to session
apiClient.OnTokenRefresh(func(accessToken, refreshToken, accessTokenExpiryDate string) {
svc.handleTokenRefresh(accessToken, refreshToken, accessTokenExpiryDate)
})
return svc
}
// handleTokenRefresh is called when the API client automatically refreshes the access token
func (s *Service) handleTokenRefresh(accessToken, refreshToken, accessTokenExpiryDate string) {
// Get the current session
existingSession, err := s.getSessionUC.Execute()
if err != nil {
s.logger.Error("Failed to get session during token refresh callback", zap.Error(err))
return
}
if existingSession == nil {
s.logger.Warn("No session found during token refresh callback")
return
}
// Update the session with new tokens
existingSession.AccessToken = accessToken
existingSession.RefreshToken = refreshToken
// Parse the actual expiry date from the response instead of using hardcoded value
if accessTokenExpiryDate != "" {
expiryTime, parseErr := time.Parse(time.RFC3339, accessTokenExpiryDate)
if parseErr != nil {
s.logger.Warn("Failed to parse access token expiry date, using default 15m",
zap.String("expiry_date", accessTokenExpiryDate),
zap.Error(parseErr))
existingSession.ExpiresAt = time.Now().Add(15 * time.Minute)
} else {
existingSession.ExpiresAt = expiryTime
s.logger.Debug("Using actual token expiry from response",
zap.Time("expiry_time", expiryTime))
}
} else {
s.logger.Warn("No access token expiry date in refresh response, using default 15m")
existingSession.ExpiresAt = time.Now().Add(15 * time.Minute)
}
// Save updated session
if err := s.saveSessionUC.Execute(existingSession); err != nil {
s.logger.Error("Failed to save session after token refresh", zap.Error(err))
return
}
s.logger.Info("Session updated with refreshed tokens", zap.String("email", utils.MaskEmail(existingSession.Email)))
}
// RequestOTT requests a one-time token for login
func (s *Service) RequestOTT(ctx context.Context, email string) error {
_, err := s.apiClient.RequestOTT(ctx, email)
if err != nil {
s.logger.Error("Failed to request OTT", zap.Error(err))
return err
}
s.logger.Info("OTT requested successfully", zap.String("email", utils.MaskEmail(email)))
return nil
}
// VerifyOTT verifies the one-time token and returns the encrypted challenge
func (s *Service) VerifyOTT(ctx context.Context, email, ott string) (*client.VerifyOTTResponse, error) {
resp, err := s.apiClient.VerifyOTT(ctx, email, ott)
if err != nil {
s.logger.Error("OTT verification failed", zap.Error(err))
return nil, err
}
s.logger.Info("OTT verified successfully", zap.String("email", utils.MaskEmail(email)))
return resp, nil
}
// CompleteLogin completes the login process with OTT and challenge
func (s *Service) CompleteLogin(ctx context.Context, input *client.CompleteLoginInput) (*client.LoginResponse, error) {
// Complete login via API
resp, err := s.apiClient.CompleteLogin(ctx, input)
if err != nil {
s.logger.Error("Login failed", zap.Error(err))
return nil, err
}
// Parse expiration time from response
var expiresIn time.Duration
if resp.AccessTokenExpiryDate != "" {
expiryTime, parseErr := time.Parse(time.RFC3339, resp.AccessTokenExpiryDate)
if parseErr != nil {
s.logger.Warn("Failed to parse access token expiry date, using default 15m",
zap.String("expiry_date", resp.AccessTokenExpiryDate),
zap.Error(parseErr))
expiresIn = 15 * time.Minute // Default to 15 minutes (backend default)
} else {
expiresIn = time.Until(expiryTime)
s.logger.Info("Parsed access token expiry",
zap.Time("expiry_time", expiryTime),
zap.Duration("expires_in", expiresIn))
}
} else {
s.logger.Warn("No access token expiry date in response, using default 15m")
expiresIn = 15 * time.Minute // Default to 15 minutes (backend default)
}
// Use email as userID for now (can be improved later)
userID := input.Email
// Save session locally via use case
err = s.createSessionUC.Execute(
userID,
input.Email,
resp.AccessToken,
resp.RefreshToken,
expiresIn,
)
if err != nil {
s.logger.Error("Failed to save session", zap.Error(err))
return nil, err
}
s.logger.Info("User logged in successfully", zap.String("email", utils.MaskEmail(input.Email)))
return resp, nil
}
// Logout removes the local session
func (s *Service) Logout(ctx context.Context) error {
// Delete local session
err := s.deleteSessionUC.Execute()
if err != nil {
s.logger.Error("Failed to delete session", zap.Error(err))
return err
}
s.logger.Info("User logged out successfully")
return nil
}
// GetCurrentSession retrieves the current user session
func (s *Service) GetCurrentSession(ctx context.Context) (*domainSession.Session, error) {
sess, err := s.getSessionUC.Execute()
if err != nil {
s.logger.Error("Failed to get session", zap.Error(err))
return nil, err
}
return sess, nil
}
// UpdateSession updates the current session
func (s *Service) UpdateSession(ctx context.Context, sess *domainSession.Session) error {
return s.saveSessionUC.Execute(sess)
}
// IsLoggedIn checks if a user is currently logged in
func (s *Service) IsLoggedIn(ctx context.Context) (bool, error) {
sess, err := s.getSessionUC.Execute()
if err != nil {
return false, err
}
if sess == nil {
return false, nil
}
return sess.IsValid(), nil
}
// RestoreSession restores tokens to the API client from a persisted session
// This is used on app startup to resume a session from a previous run
func (s *Service) RestoreSession(ctx context.Context, sess *domainSession.Session) error {
if sess == nil {
return nil
}
// Restore tokens to API client
s.apiClient.SetTokens(sess.AccessToken, sess.RefreshToken)
s.logger.Info("Session restored to API client",
zap.String("user_id", sess.UserID),
zap.String("email", utils.MaskEmail(sess.Email)))
return nil
}
// Register creates a new user account
func (s *Service) Register(ctx context.Context, input *client.RegisterInput) error {
_, err := s.apiClient.Register(ctx, input)
if err != nil {
s.logger.Error("Registration failed", zap.Error(err))
return err
}
s.logger.Info("User registered successfully", zap.String("email", utils.MaskEmail(input.Email)))
return nil
}
// VerifyEmail verifies the email with the verification code
func (s *Service) VerifyEmail(ctx context.Context, input *client.VerifyEmailInput) error {
_, err := s.apiClient.VerifyEmailCode(ctx, input)
if err != nil {
s.logger.Error("Email verification failed", zap.Error(err))
return err
}
s.logger.Info("Email verified successfully", zap.String("email", utils.MaskEmail(input.Email)))
return nil
}
// GetAPIClient returns the API client instance
// This allows other parts of the application to make authenticated API calls
func (s *Service) GetAPIClient() *client.Client {
return s.apiClient
}
// InitiateRecovery initiates the account recovery process
func (s *Service) InitiateRecovery(ctx context.Context, email, method string) (*client.RecoveryInitiateResponse, error) {
resp, err := s.apiClient.RecoveryInitiate(ctx, email, method)
if err != nil {
s.logger.Error("Recovery initiation failed", zap.Error(err))
return nil, err
}
s.logger.Info("Recovery initiated successfully", zap.String("email", utils.MaskEmail(email)))
return resp, nil
}
// VerifyRecovery verifies the recovery challenge
func (s *Service) VerifyRecovery(ctx context.Context, input *client.RecoveryVerifyInput) (*client.RecoveryVerifyResponse, error) {
resp, err := s.apiClient.RecoveryVerify(ctx, input)
if err != nil {
s.logger.Error("Recovery verification failed", zap.Error(err))
return nil, err
}
s.logger.Info("Recovery verification successful")
return resp, nil
}
// CompleteRecovery completes the account recovery and resets credentials
func (s *Service) CompleteRecovery(ctx context.Context, input *client.RecoveryCompleteInput) (*client.RecoveryCompleteResponse, error) {
resp, err := s.apiClient.RecoveryComplete(ctx, input)
if err != nil {
s.logger.Error("Recovery completion failed", zap.Error(err))
return nil, err
}
s.logger.Info("Recovery completed successfully")
return resp, nil
}

View file

@ -0,0 +1,199 @@
package httpclient
import (
"net"
"net/http"
"time"
)
// Service provides an HTTP client with proper timeouts.
// This addresses OWASP security concern B1: using http.DefaultClient which has
// no timeouts and can be vulnerable to slowloris attacks and resource exhaustion.
//
// Note: TLS/SSL is handled by Caddy reverse proxy in production (see OWASP report
// A04-4.1 "Certificate Pinning Not Required" - BY DESIGN). This service focuses
// on adding timeouts, not TLS configuration.
//
// For large file downloads, use DoDownloadNoTimeout() which relies on the request's
// context for cancellation instead of a fixed timeout. This allows multi-gigabyte
// files to download without timeout issues while still being cancellable.
type Service struct {
// client is the configured HTTP client for API requests
client *http.Client
// downloadClient is a separate client for file downloads with longer timeouts
downloadClient *http.Client
// noTimeoutClient is for large file downloads where context controls cancellation
noTimeoutClient *http.Client
}
// Config holds configuration options for the HTTP client service
type Config struct {
// RequestTimeout is the overall timeout for API requests (default: 30s)
RequestTimeout time.Duration
// DownloadTimeout is the overall timeout for file downloads (default: 10m)
DownloadTimeout time.Duration
// ConnectTimeout is the timeout for establishing connections (default: 10s)
ConnectTimeout time.Duration
// TLSHandshakeTimeout is the timeout for TLS handshake (default: 10s)
TLSHandshakeTimeout time.Duration
// IdleConnTimeout is how long idle connections stay in the pool (default: 90s)
IdleConnTimeout time.Duration
// MaxIdleConns is the max number of idle connections (default: 100)
MaxIdleConns int
// MaxIdleConnsPerHost is the max idle connections per host (default: 10)
MaxIdleConnsPerHost int
}
// DefaultConfig returns sensible default configuration values
func DefaultConfig() Config {
return Config{
RequestTimeout: 30 * time.Second,
DownloadTimeout: 10 * time.Minute,
ConnectTimeout: 10 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
}
}
// ProvideService creates a new HTTP client service with secure defaults
func ProvideService() *Service {
return NewService(DefaultConfig())
}
// NewService creates a new HTTP client service with the given configuration
func NewService(cfg Config) *Service {
// Create transport with timeouts and connection pooling
// Note: We don't set TLSClientConfig - Go's defaults are secure and
// production uses Caddy for TLS termination anyway
transport := &http.Transport{
DialContext: (&net.Dialer{
Timeout: cfg.ConnectTimeout,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: cfg.TLSHandshakeTimeout,
IdleConnTimeout: cfg.IdleConnTimeout,
MaxIdleConns: cfg.MaxIdleConns,
MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost,
ExpectContinueTimeout: 1 * time.Second,
ForceAttemptHTTP2: true,
}
// Create the main client for API requests
client := &http.Client{
Transport: transport,
Timeout: cfg.RequestTimeout,
}
// Create a separate transport for downloads with longer timeouts
downloadTransport := &http.Transport{
DialContext: (&net.Dialer{
Timeout: cfg.ConnectTimeout,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: cfg.TLSHandshakeTimeout,
IdleConnTimeout: cfg.IdleConnTimeout,
MaxIdleConns: cfg.MaxIdleConns,
MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost,
ExpectContinueTimeout: 1 * time.Second,
ForceAttemptHTTP2: true,
// Disable compression for downloads to avoid decompression overhead
DisableCompression: true,
}
// Create the download client with longer timeout
downloadClient := &http.Client{
Transport: downloadTransport,
Timeout: cfg.DownloadTimeout,
}
// Create a no-timeout transport for large file downloads
// This client has no overall timeout - cancellation is controlled via request context
// Connection and TLS handshake still have timeouts to prevent hanging on initial connect
noTimeoutTransport := &http.Transport{
DialContext: (&net.Dialer{
Timeout: cfg.ConnectTimeout,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: cfg.TLSHandshakeTimeout,
IdleConnTimeout: cfg.IdleConnTimeout,
MaxIdleConns: cfg.MaxIdleConns,
MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost,
ExpectContinueTimeout: 1 * time.Second,
ForceAttemptHTTP2: true,
DisableCompression: true,
}
// No timeout - relies on context cancellation for large file downloads
noTimeoutClient := &http.Client{
Transport: noTimeoutTransport,
Timeout: 0, // No timeout
}
return &Service{
client: client,
downloadClient: downloadClient,
noTimeoutClient: noTimeoutClient,
}
}
// Client returns the HTTP client for API requests (30s timeout)
func (s *Service) Client() *http.Client {
return s.client
}
// DownloadClient returns the HTTP client for file downloads (10m timeout)
func (s *Service) DownloadClient() *http.Client {
return s.downloadClient
}
// Do executes an HTTP request using the API client
func (s *Service) Do(req *http.Request) (*http.Response, error) {
return s.client.Do(req)
}
// DoDownload executes an HTTP request using the download client (longer timeout)
func (s *Service) DoDownload(req *http.Request) (*http.Response, error) {
return s.downloadClient.Do(req)
}
// Get performs an HTTP GET request using the API client
func (s *Service) Get(url string) (*http.Response, error) {
return s.client.Get(url)
}
// GetDownload performs an HTTP GET request using the download client (longer timeout)
func (s *Service) GetDownload(url string) (*http.Response, error) {
return s.downloadClient.Get(url)
}
// DoLargeDownload executes an HTTP request for large file downloads.
// This client has NO overall timeout - cancellation must be handled via the request's context.
// Use this for multi-gigabyte files that may take hours to download.
// The connection establishment and TLS handshake still have timeouts.
//
// Example usage:
//
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel() // Call cancel() to abort the download
// req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
// resp, err := httpClient.DoLargeDownload(req)
func (s *Service) DoLargeDownload(req *http.Request) (*http.Response, error) {
return s.noTimeoutClient.Do(req)
}
// GetLargeDownload performs an HTTP GET request for large file downloads.
// This client has NO overall timeout - the download can run indefinitely.
// Use this for multi-gigabyte files. To cancel, use DoLargeDownload with a context.
func (s *Service) GetLargeDownload(url string) (*http.Response, error) {
return s.noTimeoutClient.Get(url)
}

View file

@ -0,0 +1,263 @@
package inputvalidation
import (
"fmt"
"net/mail"
"regexp"
"strings"
"unicode"
"unicode/utf8"
)
// Validation limits for input fields
const (
// Email limits
MaxEmailLength = 254 // RFC 5321
// Name limits (collection names, file names, user names)
MinNameLength = 1
MaxNameLength = 255
// Display name limits
MaxDisplayNameLength = 100
// Description limits
MaxDescriptionLength = 1000
// UUID format (standard UUID v4)
UUIDLength = 36
// OTT (One-Time Token) limits
OTTLength = 8 // 8-digit code
// Password limits
MinPasswordLength = 8
MaxPasswordLength = 128
)
// uuidRegex matches standard UUID format (8-4-4-4-12)
var uuidRegex = regexp.MustCompile(`^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$`)
// ottRegex matches 8-digit OTT codes
var ottRegex = regexp.MustCompile(`^[0-9]{8}$`)
// ValidateEmail validates an email address
func ValidateEmail(email string) error {
if email == "" {
return fmt.Errorf("email is required")
}
// Check length
if len(email) > MaxEmailLength {
return fmt.Errorf("email exceeds maximum length of %d characters", MaxEmailLength)
}
// Use Go's mail package for RFC 5322 validation
_, err := mail.ParseAddress(email)
if err != nil {
return fmt.Errorf("invalid email format")
}
// Additional checks for security
if strings.ContainsAny(email, "\x00\n\r") {
return fmt.Errorf("email contains invalid characters")
}
return nil
}
// ValidateUUID validates a UUID string
func ValidateUUID(id, fieldName string) error {
if id == "" {
return fmt.Errorf("%s is required", fieldName)
}
if len(id) != UUIDLength {
return fmt.Errorf("%s must be a valid UUID", fieldName)
}
if !uuidRegex.MatchString(id) {
return fmt.Errorf("%s must be a valid UUID format", fieldName)
}
return nil
}
// ValidateName validates a name field (collection name, filename, etc.)
func ValidateName(name, fieldName string) error {
if name == "" {
return fmt.Errorf("%s is required", fieldName)
}
// Check length
if len(name) > MaxNameLength {
return fmt.Errorf("%s exceeds maximum length of %d characters", fieldName, MaxNameLength)
}
// Check for valid UTF-8
if !utf8.ValidString(name) {
return fmt.Errorf("%s contains invalid characters", fieldName)
}
// Check for control characters (except tab and newline which might be valid in descriptions)
for _, r := range name {
if r < 32 && r != '\t' && r != '\n' && r != '\r' {
return fmt.Errorf("%s contains invalid control characters", fieldName)
}
// Also check for null byte and other dangerous characters
if r == 0 {
return fmt.Errorf("%s contains null characters", fieldName)
}
}
// Check that it's not all whitespace
if strings.TrimSpace(name) == "" {
return fmt.Errorf("%s cannot be empty or whitespace only", fieldName)
}
return nil
}
// ValidateDisplayName validates a display name (first name, last name, etc.)
func ValidateDisplayName(name, fieldName string) error {
// Display names can be empty (optional fields)
if name == "" {
return nil
}
// Check length
if len(name) > MaxDisplayNameLength {
return fmt.Errorf("%s exceeds maximum length of %d characters", fieldName, MaxDisplayNameLength)
}
// Check for valid UTF-8
if !utf8.ValidString(name) {
return fmt.Errorf("%s contains invalid characters", fieldName)
}
// Check for control characters
for _, r := range name {
if r < 32 || !unicode.IsPrint(r) {
if r != ' ' { // Allow spaces
return fmt.Errorf("%s contains invalid characters", fieldName)
}
}
}
return nil
}
// ValidateDescription validates a description field
func ValidateDescription(desc string) error {
// Descriptions can be empty (optional)
if desc == "" {
return nil
}
// Check length
if len(desc) > MaxDescriptionLength {
return fmt.Errorf("description exceeds maximum length of %d characters", MaxDescriptionLength)
}
// Check for valid UTF-8
if !utf8.ValidString(desc) {
return fmt.Errorf("description contains invalid characters")
}
// Check for null bytes
if strings.ContainsRune(desc, 0) {
return fmt.Errorf("description contains null characters")
}
return nil
}
// ValidateOTT validates a one-time token (8-digit code)
func ValidateOTT(ott string) error {
if ott == "" {
return fmt.Errorf("verification code is required")
}
// Trim whitespace (users might copy-paste with spaces)
ott = strings.TrimSpace(ott)
if !ottRegex.MatchString(ott) {
return fmt.Errorf("verification code must be an 8-digit number")
}
return nil
}
// ValidatePassword validates a password
func ValidatePassword(password string) error {
if password == "" {
return fmt.Errorf("password is required")
}
if len(password) < MinPasswordLength {
return fmt.Errorf("password must be at least %d characters", MinPasswordLength)
}
if len(password) > MaxPasswordLength {
return fmt.Errorf("password exceeds maximum length of %d characters", MaxPasswordLength)
}
// Check for null bytes (could indicate injection attempt)
if strings.ContainsRune(password, 0) {
return fmt.Errorf("password contains invalid characters")
}
return nil
}
// ValidateCollectionID is a convenience function for collection ID validation
func ValidateCollectionID(id string) error {
return ValidateUUID(id, "collection ID")
}
// ValidateFileID is a convenience function for file ID validation
func ValidateFileID(id string) error {
return ValidateUUID(id, "file ID")
}
// ValidateTagID is a convenience function for tag ID validation
func ValidateTagID(id string) error {
return ValidateUUID(id, "tag ID")
}
// ValidateCollectionName validates a collection name
func ValidateCollectionName(name string) error {
return ValidateName(name, "collection name")
}
// ValidateFileName validates a file name
func ValidateFileName(name string) error {
if err := ValidateName(name, "filename"); err != nil {
return err
}
// Additional file-specific validations
// Check for path traversal attempts
if strings.Contains(name, "..") {
return fmt.Errorf("filename cannot contain path traversal sequences")
}
// Check for path separators
if strings.ContainsAny(name, "/\\") {
return fmt.Errorf("filename cannot contain path separators")
}
return nil
}
// SanitizeString removes or replaces potentially dangerous characters
// This is a defense-in-depth measure - validation should be done first
func SanitizeString(s string) string {
// Remove null bytes
s = strings.ReplaceAll(s, "\x00", "")
// Trim excessive whitespace
s = strings.TrimSpace(s)
return s
}

View file

@ -0,0 +1,167 @@
package inputvalidation
import (
"fmt"
"net"
"net/url"
"strings"
)
// AllowedDownloadHosts lists the allowed hosts for presigned download URLs.
// These are the only hosts from which the application will download files.
var AllowedDownloadHosts = []string{
// Production S3-compatible storage (Digital Ocean Spaces)
".digitaloceanspaces.com",
// AWS S3 (if used in future)
".s3.amazonaws.com",
".s3.us-east-1.amazonaws.com",
".s3.us-west-2.amazonaws.com",
".s3.eu-west-1.amazonaws.com",
// MapleFile domains (if serving files directly)
".maplefile.ca",
// Local development
"localhost",
"127.0.0.1",
}
// ValidateDownloadURL validates a presigned download URL before use.
// This prevents SSRF attacks by ensuring downloads only happen from trusted hosts.
func ValidateDownloadURL(rawURL string) error {
if rawURL == "" {
return fmt.Errorf("download URL is required")
}
// Parse the URL
parsedURL, err := url.Parse(rawURL)
if err != nil {
return fmt.Errorf("invalid URL format: %w", err)
}
// Validate scheme - must be HTTPS (except localhost for development)
if parsedURL.Scheme != "https" && parsedURL.Scheme != "http" {
return fmt.Errorf("URL must use HTTP or HTTPS scheme")
}
// Get host without port
host := parsedURL.Hostname()
if host == "" {
return fmt.Errorf("URL must have a valid host")
}
// For HTTPS requirement - only allow HTTP for localhost/local IPs
if parsedURL.Scheme == "http" {
if !isLocalHost(host) {
return fmt.Errorf("non-local URLs must use HTTPS")
}
}
// Check if host is in allowed list
if !isAllowedHost(host) {
return fmt.Errorf("download from host %q is not allowed", host)
}
// Check for credentials in URL (security risk)
if parsedURL.User != nil {
return fmt.Errorf("URL must not contain credentials")
}
// Check for suspicious path traversal in URL path
if strings.Contains(parsedURL.Path, "..") {
return fmt.Errorf("URL path contains invalid sequences")
}
return nil
}
// isAllowedHost checks if a host is in the allowed download hosts list
func isAllowedHost(host string) bool {
host = strings.ToLower(host)
for _, allowed := range AllowedDownloadHosts {
allowed = strings.ToLower(allowed)
// Exact match
if host == allowed {
return true
}
// Suffix match for wildcard domains (e.g., ".digitaloceanspaces.com")
if strings.HasPrefix(allowed, ".") && strings.HasSuffix(host, allowed) {
return true
}
// Handle subdomains for non-wildcard entries
if !strings.HasPrefix(allowed, ".") {
if host == allowed || strings.HasSuffix(host, "."+allowed) {
return true
}
}
}
return false
}
// isLocalHost checks if a host is localhost or a local IP address
func isLocalHost(host string) bool {
host = strings.ToLower(host)
// Check common localhost names
if host == "localhost" || host == "127.0.0.1" || host == "::1" {
return true
}
// Check if it's a local network IP
ip := net.ParseIP(host)
if ip == nil {
return false
}
// Check for loopback
if ip.IsLoopback() {
return true
}
// Check for private network ranges (10.x.x.x, 192.168.x.x, 172.16-31.x.x)
if ip.IsPrivate() {
return true
}
return false
}
// ValidateAPIBaseURL validates a base URL for API requests
func ValidateAPIBaseURL(rawURL string) error {
if rawURL == "" {
return fmt.Errorf("API URL is required")
}
parsedURL, err := url.Parse(rawURL)
if err != nil {
return fmt.Errorf("invalid URL format: %w", err)
}
// Validate scheme
if parsedURL.Scheme != "https" && parsedURL.Scheme != "http" {
return fmt.Errorf("URL must use HTTP or HTTPS scheme")
}
// Get host
host := parsedURL.Hostname()
if host == "" {
return fmt.Errorf("URL must have a valid host")
}
// For HTTPS requirement - only allow HTTP for localhost/local IPs
if parsedURL.Scheme == "http" {
if !isLocalHost(host) {
return fmt.Errorf("non-local URLs must use HTTPS")
}
}
// Check for credentials in URL
if parsedURL.User != nil {
return fmt.Errorf("URL must not contain credentials")
}
return nil
}

View file

@ -0,0 +1,181 @@
// Package keycache provides secure in-memory caching of cryptographic keys during a session.
// Keys are stored in memguard Enclaves (encrypted at rest in memory) and automatically
// cleared when the application shuts down or the user logs out.
package keycache
import (
"fmt"
"sync"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// Service manages cached cryptographic keys in secure memory
type Service struct {
logger *zap.Logger
mu sync.RWMutex
// Map of email -> Enclave containing master key
// Enclave stores data encrypted in memory, must be opened to access
masterKeys map[string]*memguard.Enclave
}
// ProvideService creates a new key cache service (for Wire)
func ProvideService(logger *zap.Logger) *Service {
return &Service{
logger: logger.Named("keycache"),
masterKeys: make(map[string]*memguard.Enclave),
}
}
// StoreMasterKey stores a user's master key in an encrypted memory Enclave
// The key will remain cached until cleared or the app exits
func (s *Service) StoreMasterKey(email string, masterKey []byte) error {
if email == "" {
return fmt.Errorf("email is required")
}
if len(masterKey) == 0 {
return fmt.Errorf("master key is empty")
}
s.mu.Lock()
defer s.mu.Unlock()
// If there's already a cached key, remove it first
if existing, exists := s.masterKeys[email]; exists {
// Enclaves are garbage collected when removed from map
delete(s.masterKeys, email)
s.logger.Debug("Replaced existing cached master key", zap.String("email", utils.MaskEmail(email)))
_ = existing // Prevent unused variable warning
}
// Create a LockedBuffer from the master key bytes first
// This locks the memory pages and prevents swapping
lockedBuf := memguard.NewBufferFromBytes(masterKey)
// Create an Enclave from the LockedBuffer
// Enclave stores the data encrypted at rest in memory
enclave := lockedBuf.Seal()
// The LockedBuffer is consumed by Seal(), so we don't need to Destroy() it
// Store the enclave
s.masterKeys[email] = enclave
s.logger.Info("Master key cached securely in memory",
zap.String("email", utils.MaskEmail(email)),
zap.Int("size", len(masterKey)))
return nil
}
// GetMasterKey retrieves a cached master key for the given email
// Returns the key bytes and a cleanup function that MUST be called when done
// The cleanup function destroys the LockedBuffer to prevent memory leaks
func (s *Service) GetMasterKey(email string) ([]byte, func(), error) {
if email == "" {
return nil, nil, fmt.Errorf("email is required")
}
s.mu.RLock()
enclave, exists := s.masterKeys[email]
s.mu.RUnlock()
if !exists {
return nil, nil, fmt.Errorf("no cached master key found for email: %s", email)
}
// Open the enclave to access the master key
lockedBuf, err := enclave.Open()
if err != nil {
return nil, nil, fmt.Errorf("failed to open enclave for reading: %w", err)
}
// Get the bytes (caller will use these)
masterKey := lockedBuf.Bytes()
// Return cleanup function that destroys the LockedBuffer
cleanup := func() {
lockedBuf.Destroy()
}
s.logger.Debug("Retrieved cached master key from secure memory",
zap.String("email", utils.MaskEmail(email)))
return masterKey, cleanup, nil
}
// WithMasterKey provides a callback pattern for using a cached master key
// The key is automatically cleaned up after the callback returns
func (s *Service) WithMasterKey(email string, fn func([]byte) error) error {
masterKey, cleanup, err := s.GetMasterKey(email)
if err != nil {
return err
}
defer cleanup()
return fn(masterKey)
}
// HasMasterKey checks if a master key is cached for the given email
func (s *Service) HasMasterKey(email string) bool {
if email == "" {
return false
}
s.mu.RLock()
defer s.mu.RUnlock()
_, exists := s.masterKeys[email]
return exists
}
// ClearMasterKey removes a cached master key for a specific user
func (s *Service) ClearMasterKey(email string) error {
if email == "" {
return fmt.Errorf("email is required")
}
s.mu.Lock()
defer s.mu.Unlock()
if enclave, exists := s.masterKeys[email]; exists {
delete(s.masterKeys, email)
s.logger.Info("Cleared cached master key from secure memory",
zap.String("email", utils.MaskEmail(email)))
_ = enclave // Enclave will be garbage collected
return nil
}
return fmt.Errorf("no cached master key found for email: %s", email)
}
// ClearAll removes all cached master keys
// This should be called on logout or application shutdown
func (s *Service) ClearAll() {
s.mu.Lock()
defer s.mu.Unlock()
count := len(s.masterKeys)
if count == 0 {
return
}
// Clear all enclaves
for email := range s.masterKeys {
delete(s.masterKeys, email)
}
s.logger.Info("Cleared all cached master keys from secure memory",
zap.Int("count", count))
}
// Cleanup performs cleanup operations when the service is shutting down
// This is called by the application shutdown handler
func (s *Service) Cleanup() {
s.logger.Info("Cleaning up key cache service")
s.ClearAll()
}

View file

@ -0,0 +1,180 @@
package passwordstore
import (
"fmt"
"sync"
"github.com/awnumar/memguard"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// Service manages password storage in secure RAM
type Service struct {
logger *zap.Logger
mu sync.RWMutex
// RAM storage (memguard enclaves) - email -> encrypted password
memoryStore map[string]*memguard.Enclave
}
// New creates a new password storage service
func New(logger *zap.Logger) *Service {
// Initialize memguard
memguard.CatchInterrupt()
return &Service{
logger: logger,
memoryStore: make(map[string]*memguard.Enclave),
}
}
// StorePassword stores password in secure RAM (memguard).
// SECURITY NOTE: This method accepts a string for API compatibility with JSON inputs.
// The string is immediately converted to []byte and the byte slice is zeroed after
// creating the secure enclave. For maximum security, use StorePasswordBytes when
// you have direct access to []byte data.
func (s *Service) StorePassword(email, password string) error {
// Convert string to byte slice for secure handling
passwordBytes := []byte(password)
// Store using the secure byte-based method
err := s.StorePasswordBytes(email, passwordBytes)
// Zero the byte slice after use (defense in depth)
// Note: The original string cannot be zeroed in Go, but we minimize exposure
// by zeroing the byte slice copy as soon as possible
zeroBytes(passwordBytes)
return err
}
// StorePasswordBytes stores password from []byte in secure RAM (memguard).
// This is the preferred method when you have direct access to password bytes,
// as it allows the caller to zero the source bytes after this call returns.
// The provided byte slice is copied into secure memory and can be safely zeroed
// by the caller after this method returns.
func (s *Service) StorePasswordBytes(email string, password []byte) error {
s.mu.Lock()
defer s.mu.Unlock()
// Remove existing enclave if any (will be garbage collected)
if _, exists := s.memoryStore[email]; exists {
delete(s.memoryStore, email)
s.logger.Debug("Replaced existing password enclave",
zap.String("email", utils.MaskEmail(email)))
}
// Create new secure enclave (memguard copies the data into protected memory)
enclave := memguard.NewEnclave(password)
s.memoryStore[email] = enclave
s.logger.Debug("Password stored in secure RAM",
zap.String("email", utils.MaskEmail(email)))
return nil
}
// GetPassword retrieves password from secure RAM as a string.
// SECURITY NOTE: The returned string cannot be zeroed in Go. For operations
// that can work with []byte, use GetPasswordBytes instead.
func (s *Service) GetPassword(email string) (string, error) {
passwordBytes, err := s.GetPasswordBytes(email)
if err != nil {
return "", err
}
// Convert to string (unfortunately creates a copy that can't be zeroed)
password := string(passwordBytes)
// Zero the byte slice
zeroBytes(passwordBytes)
return password, nil
}
// GetPasswordBytes retrieves password from secure RAM as []byte.
// The caller SHOULD zero the returned byte slice after use by calling
// zeroBytes or similar. This is the preferred method for security-sensitive
// operations.
func (s *Service) GetPasswordBytes(email string) ([]byte, error) {
s.mu.RLock()
enclave, exists := s.memoryStore[email]
s.mu.RUnlock()
if !exists {
return nil, fmt.Errorf("no password stored for %s", email)
}
// Open enclave to read password
lockedBuffer, err := enclave.Open()
if err != nil {
return nil, fmt.Errorf("failed to open password enclave: %w", err)
}
defer lockedBuffer.Destroy()
// Copy the password bytes (memguard buffer will be destroyed after defer)
passwordBytes := make([]byte, len(lockedBuffer.Bytes()))
copy(passwordBytes, lockedBuffer.Bytes())
return passwordBytes, nil
}
// HasPassword checks if password is stored for given email
func (s *Service) HasPassword(email string) bool {
s.mu.RLock()
defer s.mu.RUnlock()
_, exists := s.memoryStore[email]
return exists
}
// ClearPassword removes password from RAM (logout)
func (s *Service) ClearPassword(email string) error {
s.mu.Lock()
defer s.mu.Unlock()
if _, exists := s.memoryStore[email]; exists {
delete(s.memoryStore, email)
s.logger.Debug("Password cleared from RAM",
zap.String("email", utils.MaskEmail(email)))
}
return nil
}
// Cleanup destroys all secure memory (called on shutdown)
func (s *Service) Cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
// Log cleanup for each stored password
for email := range s.memoryStore {
s.logger.Debug("Clearing password enclave on shutdown",
zap.String("email", utils.MaskEmail(email)))
}
// Clear the map (enclaves will be garbage collected)
s.memoryStore = make(map[string]*memguard.Enclave)
// Purge all memguard secure memory
memguard.Purge()
s.logger.Debug("Password store cleanup complete - all secure memory purged")
}
// zeroBytes overwrites a byte slice with zeros to clear sensitive data from memory.
// This is a defense-in-depth measure - while Go's GC may still have copies,
// this reduces the window of exposure.
func zeroBytes(b []byte) {
for i := range b {
b[i] = 0
}
}
// ZeroBytes is exported for callers who receive password bytes and need to clear them.
// Use this after you're done with password bytes returned by GetPasswordBytes.
func ZeroBytes(b []byte) {
zeroBytes(b)
}

View file

@ -0,0 +1,10 @@
package passwordstore
import (
"go.uber.org/zap"
)
// ProvideService creates the password storage service
func ProvideService(logger *zap.Logger) *Service {
return New(logger.Named("password-store"))
}

View file

@ -0,0 +1,260 @@
// Package ratelimiter provides client-side rate limiting for sensitive operations.
//
// Security Note: This is a defense-in-depth measure. The backend MUST also implement
// rate limiting as the authoritative control. Client-side rate limiting provides:
// - Protection against accidental rapid requests (e.g., user double-clicking)
// - Reduced load on backend during legitimate high-frequency usage
// - Better UX by failing fast with clear error messages
// - Deterrent against simple automated attacks (though not a security boundary)
//
// This does NOT replace server-side rate limiting, which remains the security control.
package ratelimiter
import (
"fmt"
"sync"
"time"
)
// Operation represents a rate-limited operation type
type Operation string
const (
// OpRequestOTT is the operation for requesting a one-time token
OpRequestOTT Operation = "request_ott"
// OpVerifyOTT is the operation for verifying a one-time token
OpVerifyOTT Operation = "verify_ott"
// OpCompleteLogin is the operation for completing login
OpCompleteLogin Operation = "complete_login"
// OpRegister is the operation for user registration
OpRegister Operation = "register"
// OpVerifyEmail is the operation for email verification
OpVerifyEmail Operation = "verify_email"
)
// RateLimitError is returned when an operation is rate limited
type RateLimitError struct {
Operation Operation
RetryAfter time.Duration
AttemptsMade int
MaxAttempts int
}
func (e *RateLimitError) Error() string {
return fmt.Sprintf(
"rate limited: %s operation exceeded %d attempts, retry after %v",
e.Operation, e.MaxAttempts, e.RetryAfter.Round(time.Second),
)
}
// operationLimit defines the rate limit configuration for an operation
type operationLimit struct {
maxAttempts int // Maximum attempts allowed in the window
window time.Duration // Time window for the limit
cooldown time.Duration // Cooldown period after hitting the limit
}
// operationState tracks the current state of rate limiting for an operation
type operationState struct {
attempts int // Current attempt count
windowStart time.Time // When the current window started
lockedUntil time.Time // If rate limited, when the cooldown ends
}
// Service provides rate limiting functionality
type Service struct {
mu sync.Mutex
limits map[Operation]operationLimit
state map[string]*operationState // key: operation + identifier (e.g., email)
}
// New creates a new rate limiter service with default limits.
//
// Default limits are designed to:
// - Allow normal user behavior (typos, retries)
// - Prevent rapid automated attempts
// - Provide reasonable cooldown periods
func New() *Service {
return &Service{
limits: map[Operation]operationLimit{
// OTT request: 3 attempts per 60 seconds, 2 minute cooldown
// Rationale: Users might request OTT multiple times if email is slow
OpRequestOTT: {
maxAttempts: 3,
window: 60 * time.Second,
cooldown: 2 * time.Minute,
},
// OTT verification: 5 attempts per 60 seconds, 1 minute cooldown
// Rationale: Users might mistype the 8-digit code
OpVerifyOTT: {
maxAttempts: 5,
window: 60 * time.Second,
cooldown: 1 * time.Minute,
},
// Complete login: 5 attempts per 60 seconds, 1 minute cooldown
// Rationale: Password decryption might fail due to typos
OpCompleteLogin: {
maxAttempts: 5,
window: 60 * time.Second,
cooldown: 1 * time.Minute,
},
// Registration: 3 attempts per 5 minutes, 5 minute cooldown
// Rationale: Registration is a one-time operation, limit abuse
OpRegister: {
maxAttempts: 3,
window: 5 * time.Minute,
cooldown: 5 * time.Minute,
},
// Email verification: 5 attempts per 60 seconds, 1 minute cooldown
// Rationale: Users might mistype the verification code
OpVerifyEmail: {
maxAttempts: 5,
window: 60 * time.Second,
cooldown: 1 * time.Minute,
},
},
state: make(map[string]*operationState),
}
}
// ProvideService creates the rate limiter service for Wire dependency injection
func ProvideService() *Service {
return New()
}
// Check verifies if an operation is allowed and records the attempt.
// The identifier is typically the user's email address.
// Returns nil if the operation is allowed, or a RateLimitError if rate limited.
func (s *Service) Check(op Operation, identifier string) error {
s.mu.Lock()
defer s.mu.Unlock()
limit, ok := s.limits[op]
if !ok {
// Unknown operation, allow by default (fail open for usability)
return nil
}
key := string(op) + ":" + identifier
now := time.Now()
state, exists := s.state[key]
if !exists {
// First attempt for this operation+identifier
s.state[key] = &operationState{
attempts: 1,
windowStart: now,
}
return nil
}
// Check if currently in cooldown
if now.Before(state.lockedUntil) {
return &RateLimitError{
Operation: op,
RetryAfter: state.lockedUntil.Sub(now),
AttemptsMade: state.attempts,
MaxAttempts: limit.maxAttempts,
}
}
// Check if window has expired (reset if so)
if now.Sub(state.windowStart) > limit.window {
state.attempts = 1
state.windowStart = now
state.lockedUntil = time.Time{}
return nil
}
// Increment attempt count
state.attempts++
// Check if limit exceeded
if state.attempts > limit.maxAttempts {
state.lockedUntil = now.Add(limit.cooldown)
return &RateLimitError{
Operation: op,
RetryAfter: limit.cooldown,
AttemptsMade: state.attempts,
MaxAttempts: limit.maxAttempts,
}
}
return nil
}
// Reset clears the rate limit state for a specific operation and identifier.
// Call this after a successful operation where you want to allow fresh attempts
// (e.g., after successful OTT verification). Do NOT call this for operations
// where success shouldn't reset the limit (e.g., OTT request, registration).
func (s *Service) Reset(op Operation, identifier string) {
s.mu.Lock()
defer s.mu.Unlock()
key := string(op) + ":" + identifier
delete(s.state, key)
}
// ResetAll clears all rate limit state for an identifier (e.g., after successful login).
func (s *Service) ResetAll(identifier string) {
s.mu.Lock()
defer s.mu.Unlock()
for op := range s.limits {
key := string(op) + ":" + identifier
delete(s.state, key)
}
}
// GetRemainingAttempts returns the number of remaining attempts for an operation.
// Returns -1 if the operation is currently rate limited.
func (s *Service) GetRemainingAttempts(op Operation, identifier string) int {
s.mu.Lock()
defer s.mu.Unlock()
limit, ok := s.limits[op]
if !ok {
return -1
}
key := string(op) + ":" + identifier
state, exists := s.state[key]
if !exists {
return limit.maxAttempts
}
now := time.Now()
// In cooldown
if now.Before(state.lockedUntil) {
return -1
}
// Window expired
if now.Sub(state.windowStart) > limit.window {
return limit.maxAttempts
}
remaining := limit.maxAttempts - state.attempts
if remaining < 0 {
return 0
}
return remaining
}
// Cleanup removes expired state entries to prevent memory growth.
// This should be called periodically (e.g., every hour).
func (s *Service) Cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now()
maxAge := 24 * time.Hour // Remove entries older than 24 hours
for key, state := range s.state {
// Remove if window started more than maxAge ago and not in cooldown
if now.Sub(state.windowStart) > maxAge && now.After(state.lockedUntil) {
delete(s.state, key)
}
}
}

View file

@ -0,0 +1,512 @@
// Package search provides full-text search functionality using Bleve.
//
// This package implements a local full-text search index for files and collections
// using the Bleve search library (https://blevesearch.com/). The search index is
// stored per-user in their local application data directory.
//
// Key features:
// - Case-insensitive substring matching (e.g., "mesh" matches "meshtastic")
// - Support for Bleve query syntax (+, -, "", *, ?)
// - Deduplication of search results by document ID
// - Batch indexing for efficient rebuilds
// - User-isolated indexes (each user has their own search index)
//
// Location: monorepo/native/desktop/maplefile/internal/service/search/search.go
package search
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/mapping"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
)
// SearchService provides full-text search capabilities
type SearchService interface {
// Initialize opens or creates the search index for the specified user email
Initialize(ctx context.Context, userEmail string) error
// Close closes the search index
Close() error
// IndexFile adds or updates a file in the search index
IndexFile(file *FileDocument) error
// IndexCollection adds or updates a collection in the search index
IndexCollection(collection *CollectionDocument) error
// DeleteFile removes a file from the search index
DeleteFile(fileID string) error
// DeleteCollection removes a collection from the search index
DeleteCollection(collectionID string) error
// Search performs a full-text search
Search(query string, limit int) (*SearchResult, error)
// RebuildIndex rebuilds the entire search index from scratch
RebuildIndex(userEmail string, files []*FileDocument, collections []*CollectionDocument) error
// GetIndexSize returns the size of the search index in bytes
GetIndexSize() (int64, error)
// GetDocumentCount returns the number of documents in the index
GetDocumentCount() (uint64, error)
}
// FileDocument represents a file document in the search index
type FileDocument struct {
ID string `json:"id"`
Filename string `json:"filename"`
Description string `json:"description"`
CollectionID string `json:"collection_id"`
CollectionName string `json:"collection_name"` // Denormalized for search
Tags []string `json:"tags"`
Size int64 `json:"size"`
CreatedAt time.Time `json:"created_at"`
Type string `json:"type"` // "file"
}
// CollectionDocument represents a collection document in the search index
type CollectionDocument struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Tags []string `json:"tags"`
FileCount int `json:"file_count"`
CreatedAt time.Time `json:"created_at"`
Type string `json:"type"` // "collection"
}
// SearchResult contains the search results
type SearchResult struct {
Files []*FileDocument `json:"files"`
Collections []*CollectionDocument `json:"collections"`
TotalFiles int `json:"total_files"`
TotalCollections int `json:"total_collections"`
TotalHits uint64 `json:"total_hits"`
MaxScore float64 `json:"max_score"`
Took time.Duration `json:"took"`
Query string `json:"query"`
}
// searchService implements SearchService
type searchService struct {
index bleve.Index
configService config.ConfigService
logger *zap.Logger
}
// New creates a new search service
func New(configService config.ConfigService, logger *zap.Logger) SearchService {
return &searchService{
configService: configService,
logger: logger,
}
}
// Initialize opens or creates the search index for the specified user
func (s *searchService) Initialize(ctx context.Context, userEmail string) error {
if userEmail == "" {
return fmt.Errorf("user email is required")
}
// Get search index path
indexPath, err := s.configService.GetUserSearchIndexDir(ctx, userEmail)
if err != nil {
return fmt.Errorf("failed to get search index path: %w", err)
}
if indexPath == "" {
return fmt.Errorf("search index path is empty")
}
s.logger.Info("Initializing search index", zap.String("path", indexPath))
// Try to open existing index
index, err := bleve.Open(indexPath)
if err == bleve.ErrorIndexPathDoesNotExist {
// Create new index
s.logger.Info("Creating new search index")
indexMapping := buildIndexMapping()
index, err = bleve.New(indexPath, indexMapping)
if err != nil {
return fmt.Errorf("failed to create search index: %w", err)
}
} else if err != nil {
return fmt.Errorf("failed to open search index: %w", err)
}
s.index = index
s.logger.Info("Search index initialized successfully")
return nil
}
// Close closes the search index
func (s *searchService) Close() error {
if s.index != nil {
err := s.index.Close()
s.index = nil
return err
}
return nil
}
// IndexFile adds or updates a file in the search index
func (s *searchService) IndexFile(file *FileDocument) error {
if s.index == nil {
return fmt.Errorf("search index not initialized")
}
file.Type = "file"
return s.index.Index(file.ID, file)
}
// IndexCollection adds or updates a collection in the search index
func (s *searchService) IndexCollection(collection *CollectionDocument) error {
if s.index == nil {
return fmt.Errorf("search index not initialized")
}
collection.Type = "collection"
return s.index.Index(collection.ID, collection)
}
// DeleteFile removes a file from the search index
func (s *searchService) DeleteFile(fileID string) error {
if s.index == nil {
return fmt.Errorf("search index not initialized")
}
return s.index.Delete(fileID)
}
// DeleteCollection removes a collection from the search index
func (s *searchService) DeleteCollection(collectionID string) error {
if s.index == nil {
return fmt.Errorf("search index not initialized")
}
return s.index.Delete(collectionID)
}
// Search performs a full-text search across files and collections.
//
// The search supports:
// - Simple queries: automatically wrapped with wildcards for substring matching
// - Advanced queries: use Bleve query syntax directly (+, -, "", *, ?)
//
// Examples:
// - "mesh" → matches "meshtastic", "mesh_config", etc.
// - "\"exact phrase\"" → matches exact phrase only
// - "+required -excluded" → must contain "required", must not contain "excluded"
func (s *searchService) Search(query string, limit int) (*SearchResult, error) {
if s.index == nil {
return nil, fmt.Errorf("search index not initialized")
}
if limit <= 0 || limit > 100 {
limit = 50
}
// Convert to lowercase for case-insensitive search
searchQueryStr := strings.ToLower(query)
// For simple queries (no operators), wrap with wildcards to enable substring matching.
// This allows "mesh" to match "meshtastic_antenna.png".
// If the user provides operators or wildcards, use their query as-is.
if !strings.Contains(searchQueryStr, "*") && !strings.Contains(searchQueryStr, "?") &&
!strings.Contains(searchQueryStr, "+") && !strings.Contains(searchQueryStr, "-") &&
!strings.Contains(searchQueryStr, "\"") {
searchQueryStr = "*" + searchQueryStr + "*"
}
searchQuery := bleve.NewQueryStringQuery(searchQueryStr)
searchRequest := bleve.NewSearchRequest(searchQuery)
searchRequest.Size = limit
searchRequest.Fields = []string{"*"}
searchRequest.Highlight = bleve.NewHighlight()
// Execute search
searchResults, err := s.index.Search(searchRequest)
if err != nil {
return nil, fmt.Errorf("search failed: %w", err)
}
// Parse results with deduplication
result := &SearchResult{
Files: make([]*FileDocument, 0),
Collections: make([]*CollectionDocument, 0),
TotalHits: searchResults.Total,
MaxScore: searchResults.MaxScore,
Took: searchResults.Took,
Query: query,
}
// Use maps to deduplicate by ID
seenFileIDs := make(map[string]bool)
seenCollectionIDs := make(map[string]bool)
for _, hit := range searchResults.Hits {
docType, ok := hit.Fields["type"].(string)
if !ok {
continue
}
if docType == "file" {
// Skip if we've already seen this file ID
if seenFileIDs[hit.ID] {
s.logger.Warn("Duplicate file in search results", zap.String("id", hit.ID))
continue
}
seenFileIDs[hit.ID] = true
file := &FileDocument{
ID: hit.ID,
Filename: getStringField(hit.Fields, "filename"),
Description: getStringField(hit.Fields, "description"),
CollectionID: getStringField(hit.Fields, "collection_id"),
CollectionName: getStringField(hit.Fields, "collection_name"),
Tags: getStringArrayField(hit.Fields, "tags"),
Size: getInt64Field(hit.Fields, "size"),
}
if createdAt, ok := hit.Fields["created_at"].(string); ok {
file.CreatedAt, _ = time.Parse(time.RFC3339, createdAt)
}
result.Files = append(result.Files, file)
} else if docType == "collection" {
// Skip if we've already seen this collection ID
if seenCollectionIDs[hit.ID] {
s.logger.Warn("Duplicate collection in search results", zap.String("id", hit.ID))
continue
}
seenCollectionIDs[hit.ID] = true
collection := &CollectionDocument{
ID: hit.ID,
Name: getStringField(hit.Fields, "name"),
Description: getStringField(hit.Fields, "description"),
Tags: getStringArrayField(hit.Fields, "tags"),
FileCount: getIntField(hit.Fields, "file_count"),
}
if createdAt, ok := hit.Fields["created_at"].(string); ok {
collection.CreatedAt, _ = time.Parse(time.RFC3339, createdAt)
}
result.Collections = append(result.Collections, collection)
}
}
result.TotalFiles = len(result.Files)
result.TotalCollections = len(result.Collections)
return result, nil
}
// RebuildIndex rebuilds the entire search index from scratch.
//
// This method:
// 1. Closes the existing index (if any)
// 2. Deletes the index directory completely
// 3. Creates a fresh new index
// 4. Batch-indexes all provided files and collections
//
// This approach ensures no stale or duplicate documents remain in the index.
// The userEmail is required to locate the user-specific index directory.
func (s *searchService) RebuildIndex(userEmail string, files []*FileDocument, collections []*CollectionDocument) error {
s.logger.Info("Rebuilding search index from scratch",
zap.Int("files", len(files)),
zap.Int("collections", len(collections)))
if userEmail == "" {
return fmt.Errorf("user email is required for rebuild")
}
// Close the current index
if s.index != nil {
s.logger.Info("Closing current index before rebuild")
if err := s.index.Close(); err != nil {
s.logger.Warn("Error closing index before rebuild", zap.Error(err))
}
s.index = nil
}
// Get the index path from config
ctx := context.Background()
indexPath, err := s.configService.GetUserSearchIndexDir(ctx, userEmail)
if err != nil {
return fmt.Errorf("failed to get search index path: %w", err)
}
// Delete the existing index directory
s.logger.Info("Deleting existing index", zap.String("path", indexPath))
// We don't check for error here because the directory might not exist
// and that's okay - we're about to create it
os.RemoveAll(indexPath)
// Create a fresh index
s.logger.Info("Creating fresh index", zap.String("path", indexPath))
indexMapping := buildIndexMapping()
index, err := bleve.New(indexPath, indexMapping)
if err != nil {
return fmt.Errorf("failed to create fresh index: %w", err)
}
s.index = index
// Now index all files and collections in a batch
batch := s.index.NewBatch()
// Index all files
for _, file := range files {
file.Type = "file"
if err := batch.Index(file.ID, file); err != nil {
s.logger.Error("Failed to batch index file", zap.String("id", file.ID), zap.Error(err))
}
}
// Index all collections
for _, collection := range collections {
collection.Type = "collection"
if err := batch.Index(collection.ID, collection); err != nil {
s.logger.Error("Failed to batch index collection", zap.String("id", collection.ID), zap.Error(err))
}
}
// Execute batch
if err := s.index.Batch(batch); err != nil {
return fmt.Errorf("failed to execute batch index: %w", err)
}
finalCount, _ := s.index.DocCount()
s.logger.Info("Search index rebuilt successfully",
zap.Uint64("documents", finalCount),
zap.Int("files_indexed", len(files)),
zap.Int("collections_indexed", len(collections)))
return nil
}
// GetIndexSize returns the size of the search index in bytes
func (s *searchService) GetIndexSize() (int64, error) {
if s.index == nil {
return 0, fmt.Errorf("search index not initialized")
}
// Note: Bleve doesn't provide a direct way to get index size
// We return the document count as a proxy for size
// For actual disk usage, you would need to walk the index directory
count, err := s.index.DocCount()
if err != nil {
return 0, err
}
return int64(count), nil
}
// GetDocumentCount returns the number of documents in the index
func (s *searchService) GetDocumentCount() (uint64, error) {
if s.index == nil {
return 0, fmt.Errorf("search index not initialized")
}
count, err := s.index.DocCount()
if err != nil {
return 0, err
}
return count, nil
}
// buildIndexMapping creates the Bleve index mapping for files and collections.
//
// Field types:
// - Text fields (filename, description, name, tags): Analyzed with "standard" analyzer
// for good tokenization without stemming (better for substring matching)
// - Keyword fields (collection_id, type): Exact match only, no analysis
// - Numeric fields (size, file_count): Stored as numbers for range queries
// - Date fields (created_at): Stored as datetime for date-based queries
func buildIndexMapping() mapping.IndexMapping {
indexMapping := bleve.NewIndexMapping()
// Use standard analyzer (not English) for better substring matching.
// The English analyzer applies stemming which can interfere with partial matches.
textFieldMapping := bleve.NewTextFieldMapping()
textFieldMapping.Analyzer = "standard"
// Create keyword field mapping (no analysis)
keywordFieldMapping := bleve.NewKeywordFieldMapping()
// Create numeric field mapping
numericFieldMapping := bleve.NewNumericFieldMapping()
// Create datetime field mapping
dateFieldMapping := bleve.NewDateTimeFieldMapping()
// File document mapping
fileMapping := bleve.NewDocumentMapping()
fileMapping.AddFieldMappingsAt("filename", textFieldMapping)
fileMapping.AddFieldMappingsAt("description", textFieldMapping)
fileMapping.AddFieldMappingsAt("collection_name", textFieldMapping)
fileMapping.AddFieldMappingsAt("tags", textFieldMapping)
fileMapping.AddFieldMappingsAt("collection_id", keywordFieldMapping)
fileMapping.AddFieldMappingsAt("size", numericFieldMapping)
fileMapping.AddFieldMappingsAt("created_at", dateFieldMapping)
fileMapping.AddFieldMappingsAt("type", keywordFieldMapping)
// Collection document mapping
collectionMapping := bleve.NewDocumentMapping()
collectionMapping.AddFieldMappingsAt("name", textFieldMapping)
collectionMapping.AddFieldMappingsAt("description", textFieldMapping)
collectionMapping.AddFieldMappingsAt("tags", textFieldMapping)
collectionMapping.AddFieldMappingsAt("file_count", numericFieldMapping)
collectionMapping.AddFieldMappingsAt("created_at", dateFieldMapping)
collectionMapping.AddFieldMappingsAt("type", keywordFieldMapping)
indexMapping.AddDocumentMapping("file", fileMapping)
indexMapping.AddDocumentMapping("collection", collectionMapping)
return indexMapping
}
// Helper functions to extract fields from search results
func getStringField(fields map[string]interface{}, key string) string {
if val, ok := fields[key].(string); ok {
return val
}
return ""
}
func getStringArrayField(fields map[string]interface{}, key string) []string {
if val, ok := fields[key].([]interface{}); ok {
result := make([]string, 0, len(val))
for _, v := range val {
if str, ok := v.(string); ok {
result = append(result, str)
}
}
return result
}
return []string{}
}
func getIntField(fields map[string]interface{}, key string) int {
if val, ok := fields[key].(float64); ok {
return int(val)
}
return 0
}
func getInt64Field(fields map[string]interface{}, key string) int64 {
if val, ok := fields[key].(float64); ok {
return int64(val)
}
return 0
}

View file

@ -0,0 +1,276 @@
// Package securitylog provides security event logging for audit purposes.
// This captures security-relevant events for monitoring and forensics.
package securitylog
import (
"time"
"go.uber.org/zap"
)
// EventType defines the type of security event
type EventType string
const (
// Authentication events
EventLoginAttempt EventType = "LOGIN_ATTEMPT"
EventLoginSuccess EventType = "LOGIN_SUCCESS"
EventLoginFailure EventType = "LOGIN_FAILURE"
EventLogout EventType = "LOGOUT"
EventSessionRestored EventType = "SESSION_RESTORED"
EventSessionExpired EventType = "SESSION_EXPIRED"
EventSessionRevoked EventType = "SESSION_REVOKED"
EventTokenRefresh EventType = "TOKEN_REFRESH"
EventTokenRefreshFail EventType = "TOKEN_REFRESH_FAILURE"
// Registration events
EventRegistration EventType = "REGISTRATION"
EventEmailVerification EventType = "EMAIL_VERIFICATION"
EventOTTRequest EventType = "OTT_REQUEST"
EventOTTVerify EventType = "OTT_VERIFY"
EventPasswordChallenge EventType = "PASSWORD_CHALLENGE"
// Rate limiting events
EventRateLimitExceeded EventType = "RATE_LIMIT_EXCEEDED"
// Data access events
EventCollectionCreate EventType = "COLLECTION_CREATE"
EventCollectionUpdate EventType = "COLLECTION_UPDATE"
EventCollectionDelete EventType = "COLLECTION_DELETE"
EventCollectionAccess EventType = "COLLECTION_ACCESS"
EventFileUpload EventType = "FILE_UPLOAD"
EventFileDownload EventType = "FILE_DOWNLOAD"
EventFileDelete EventType = "FILE_DELETE"
EventFileAccess EventType = "FILE_ACCESS"
EventFileOpen EventType = "FILE_OPEN"
// Export events
EventExportStart EventType = "EXPORT_START"
EventExportComplete EventType = "EXPORT_COMPLETE"
EventExportFailure EventType = "EXPORT_FAILURE"
// Configuration events
EventConfigChange EventType = "CONFIG_CHANGE"
EventConfigIntegrityFail EventType = "CONFIG_INTEGRITY_FAILURE"
EventCloudProviderChange EventType = "CLOUD_PROVIDER_CHANGE"
// Security events
EventSecurityValidationFail EventType = "SECURITY_VALIDATION_FAILURE"
EventURLValidationFail EventType = "URL_VALIDATION_FAILURE"
EventInputValidationFail EventType = "INPUT_VALIDATION_FAILURE"
EventPathTraversalAttempt EventType = "PATH_TRAVERSAL_ATTEMPT"
// Key management events
EventMasterKeyDerived EventType = "MASTER_KEY_DERIVED"
EventMasterKeyCleared EventType = "MASTER_KEY_CLEARED"
EventPasswordCleared EventType = "PASSWORD_CLEARED"
// Application lifecycle events
EventAppStart EventType = "APP_START"
EventAppShutdown EventType = "APP_SHUTDOWN"
)
// EventOutcome indicates the result of an event
type EventOutcome string
const (
OutcomeSuccess EventOutcome = "SUCCESS"
OutcomeFailure EventOutcome = "FAILURE"
OutcomeBlocked EventOutcome = "BLOCKED"
)
// SecurityEvent represents a security-relevant event
type SecurityEvent struct {
Timestamp time.Time `json:"timestamp"`
EventType EventType `json:"event_type"`
Outcome EventOutcome `json:"outcome"`
UserEmail string `json:"user_email,omitempty"` // Masked email
ResourceID string `json:"resource_id,omitempty"`
ResourceType string `json:"resource_type,omitempty"`
Details map[string]string `json:"details,omitempty"`
ErrorMsg string `json:"error,omitempty"`
}
// Service provides security event logging
type Service struct {
logger *zap.Logger
}
// New creates a new security logging service
func New(logger *zap.Logger) *Service {
return &Service{
logger: logger.Named("security"),
}
}
// ProvideService is the Wire provider for the security log service
func ProvideService(logger *zap.Logger) *Service {
return New(logger)
}
// LogEvent logs a security event
func (s *Service) LogEvent(event *SecurityEvent) {
event.Timestamp = time.Now().UTC()
fields := []zap.Field{
zap.String("event_type", string(event.EventType)),
zap.String("outcome", string(event.Outcome)),
zap.Time("timestamp", event.Timestamp),
}
if event.UserEmail != "" {
fields = append(fields, zap.String("user_email", event.UserEmail))
}
if event.ResourceID != "" {
fields = append(fields, zap.String("resource_id", event.ResourceID))
}
if event.ResourceType != "" {
fields = append(fields, zap.String("resource_type", event.ResourceType))
}
if event.ErrorMsg != "" {
fields = append(fields, zap.String("error", event.ErrorMsg))
}
for k, v := range event.Details {
fields = append(fields, zap.String("detail_"+k, v))
}
switch event.Outcome {
case OutcomeSuccess:
s.logger.Info("Security event", fields...)
case OutcomeFailure:
s.logger.Warn("Security event", fields...)
case OutcomeBlocked:
s.logger.Warn("Security event (blocked)", fields...)
default:
s.logger.Info("Security event", fields...)
}
}
// Helper methods for common events
// LogLoginAttempt logs a login attempt
func (s *Service) LogLoginAttempt(maskedEmail string) {
s.LogEvent(&SecurityEvent{
EventType: EventLoginAttempt,
Outcome: OutcomeSuccess,
UserEmail: maskedEmail,
})
}
// LogLoginSuccess logs a successful login
func (s *Service) LogLoginSuccess(maskedEmail string) {
s.LogEvent(&SecurityEvent{
EventType: EventLoginSuccess,
Outcome: OutcomeSuccess,
UserEmail: maskedEmail,
})
}
// LogLoginFailure logs a failed login
func (s *Service) LogLoginFailure(maskedEmail string, reason string) {
s.LogEvent(&SecurityEvent{
EventType: EventLoginFailure,
Outcome: OutcomeFailure,
UserEmail: maskedEmail,
ErrorMsg: reason,
})
}
// LogLogout logs a logout event
func (s *Service) LogLogout(maskedEmail string) {
s.LogEvent(&SecurityEvent{
EventType: EventLogout,
Outcome: OutcomeSuccess,
UserEmail: maskedEmail,
})
}
// LogRateLimitExceeded logs a rate limit exceeded event
func (s *Service) LogRateLimitExceeded(maskedEmail string, operation string) {
s.LogEvent(&SecurityEvent{
EventType: EventRateLimitExceeded,
Outcome: OutcomeBlocked,
UserEmail: maskedEmail,
Details: map[string]string{
"operation": operation,
},
})
}
// LogFileAccess logs a file access event
func (s *Service) LogFileAccess(maskedEmail string, fileID string, operation string, outcome EventOutcome) {
s.LogEvent(&SecurityEvent{
EventType: EventFileAccess,
Outcome: outcome,
UserEmail: maskedEmail,
ResourceID: fileID,
ResourceType: "file",
Details: map[string]string{
"operation": operation,
},
})
}
// LogCollectionAccess logs a collection access event
func (s *Service) LogCollectionAccess(maskedEmail string, collectionID string, operation string, outcome EventOutcome) {
s.LogEvent(&SecurityEvent{
EventType: EventCollectionAccess,
Outcome: outcome,
UserEmail: maskedEmail,
ResourceID: collectionID,
ResourceType: "collection",
Details: map[string]string{
"operation": operation,
},
})
}
// LogSecurityValidationFailure logs a security validation failure
func (s *Service) LogSecurityValidationFailure(eventType EventType, details map[string]string, errorMsg string) {
s.LogEvent(&SecurityEvent{
EventType: eventType,
Outcome: OutcomeBlocked,
Details: details,
ErrorMsg: errorMsg,
})
}
// LogExport logs an export operation
func (s *Service) LogExport(maskedEmail string, eventType EventType, outcome EventOutcome, details map[string]string) {
s.LogEvent(&SecurityEvent{
EventType: eventType,
Outcome: outcome,
UserEmail: maskedEmail,
Details: details,
})
}
// LogConfigChange logs a configuration change
func (s *Service) LogConfigChange(setting string, maskedEmail string) {
s.LogEvent(&SecurityEvent{
EventType: EventConfigChange,
Outcome: OutcomeSuccess,
UserEmail: maskedEmail,
Details: map[string]string{
"setting": setting,
},
})
}
// LogAppLifecycle logs application start/shutdown
func (s *Service) LogAppLifecycle(eventType EventType) {
s.LogEvent(&SecurityEvent{
EventType: eventType,
Outcome: OutcomeSuccess,
})
}
// LogKeyManagement logs key management operations
func (s *Service) LogKeyManagement(eventType EventType, maskedEmail string, outcome EventOutcome) {
s.LogEvent(&SecurityEvent{
EventType: eventType,
Outcome: outcome,
UserEmail: maskedEmail,
})
}

View file

@ -0,0 +1,284 @@
// Package storagemanager provides a service for managing user-specific storage.
// It handles the lifecycle of storage instances, creating new storage when a user
// logs in and cleaning up when they log out.
//
// Storage is organized as follows:
// - Global storage (session): {appDir}/session/ - stores current login session
// - User-specific storage: {appDir}/users/{emailHash}/ - stores user data
//
// This ensures:
// 1. Different users have completely isolated data
// 2. Dev and production modes have separate directories
// 3. Email addresses are not exposed in directory names (hashed)
package storagemanager
import (
"fmt"
"os"
"path/filepath"
"sync"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
fileDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
syncstateDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
collectionRepo "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/collection"
fileRepo "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/file"
syncstateRepo "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/syncstate"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
)
// Manager manages user-specific storage instances.
// It creates storage when a user logs in and cleans up when they log out.
type Manager struct {
logger *zap.Logger
mu sync.RWMutex
// Current user's email (empty if no user is logged in)
currentUserEmail string
// User-specific storage instances
localFilesStorage storage.Storage
syncStateStorage storage.Storage
// User-specific repositories (built on top of storage)
fileRepo fileDomain.Repository
collectionRepo collectionDomain.Repository
syncStateRepo syncstateDomain.Repository
}
// ProvideManager creates a new storage manager.
func ProvideManager(logger *zap.Logger) *Manager {
return &Manager{
logger: logger.Named("storage-manager"),
}
}
// InitializeForUser initializes user-specific storage for the given user.
// This should be called after a user successfully logs in.
// If storage is already initialized for a different user, it will be cleaned up first.
func (m *Manager) InitializeForUser(userEmail string) error {
m.mu.Lock()
defer m.mu.Unlock()
if userEmail == "" {
return fmt.Errorf("user email is required")
}
// If same user, no need to reinitialize
if m.currentUserEmail == userEmail && m.localFilesStorage != nil {
m.logger.Debug("Storage already initialized for user",
zap.String("email_hash", config.GetEmailHashForPath(userEmail)))
return nil
}
// Clean up existing storage if different user
if m.currentUserEmail != "" && m.currentUserEmail != userEmail {
m.logger.Info("Switching user storage",
zap.String("old_user_hash", config.GetEmailHashForPath(m.currentUserEmail)),
zap.String("new_user_hash", config.GetEmailHashForPath(userEmail)))
m.cleanupStorageUnsafe()
}
m.logger.Info("Initializing storage for user",
zap.String("email_hash", config.GetEmailHashForPath(userEmail)))
// Initialize local files storage
localFilesProvider, err := config.NewLevelDBConfigurationProviderForLocalFilesWithUser(userEmail)
if err != nil {
return fmt.Errorf("failed to create local files storage provider: %w", err)
}
m.localFilesStorage = leveldb.NewDiskStorage(localFilesProvider, m.logger.Named("local-files"))
// Initialize sync state storage
syncStateProvider, err := config.NewLevelDBConfigurationProviderForSyncStateWithUser(userEmail)
if err != nil {
m.cleanupStorageUnsafe()
return fmt.Errorf("failed to create sync state storage provider: %w", err)
}
m.syncStateStorage = leveldb.NewDiskStorage(syncStateProvider, m.logger.Named("sync-state"))
// Create repositories
m.fileRepo = fileRepo.ProvideRepository(m.localFilesStorage)
m.collectionRepo = collectionRepo.ProvideRepository(m.localFilesStorage)
m.syncStateRepo = syncstateRepo.ProvideRepository(m.syncStateStorage)
m.currentUserEmail = userEmail
m.logger.Info("Storage initialized successfully",
zap.String("email_hash", config.GetEmailHashForPath(userEmail)))
return nil
}
// Cleanup cleans up all user-specific storage.
// This should be called when a user logs out.
func (m *Manager) Cleanup() {
m.mu.Lock()
defer m.mu.Unlock()
m.cleanupStorageUnsafe()
}
// cleanupStorageUnsafe cleans up storage without acquiring the lock.
// Caller must hold the lock.
func (m *Manager) cleanupStorageUnsafe() {
if m.localFilesStorage != nil {
if closer, ok := m.localFilesStorage.(interface{ Close() error }); ok {
if err := closer.Close(); err != nil {
m.logger.Warn("Failed to close local files storage", zap.Error(err))
}
}
m.localFilesStorage = nil
}
if m.syncStateStorage != nil {
if closer, ok := m.syncStateStorage.(interface{ Close() error }); ok {
if err := closer.Close(); err != nil {
m.logger.Warn("Failed to close sync state storage", zap.Error(err))
}
}
m.syncStateStorage = nil
}
m.fileRepo = nil
m.collectionRepo = nil
m.syncStateRepo = nil
m.currentUserEmail = ""
m.logger.Debug("Storage cleaned up")
}
// IsInitialized returns true if storage has been initialized for a user.
func (m *Manager) IsInitialized() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.localFilesStorage != nil
}
// GetCurrentUserEmail returns the email of the user for whom storage is initialized.
// Returns empty string if no user storage is initialized.
func (m *Manager) GetCurrentUserEmail() string {
m.mu.RLock()
defer m.mu.RUnlock()
return m.currentUserEmail
}
// GetFileRepository returns the file repository for the current user.
// Returns nil if storage is not initialized.
func (m *Manager) GetFileRepository() fileDomain.Repository {
m.mu.RLock()
defer m.mu.RUnlock()
return m.fileRepo
}
// GetCollectionRepository returns the collection repository for the current user.
// Returns nil if storage is not initialized.
func (m *Manager) GetCollectionRepository() collectionDomain.Repository {
m.mu.RLock()
defer m.mu.RUnlock()
return m.collectionRepo
}
// GetSyncStateRepository returns the sync state repository for the current user.
// Returns nil if storage is not initialized.
func (m *Manager) GetSyncStateRepository() syncstateDomain.Repository {
m.mu.RLock()
defer m.mu.RUnlock()
return m.syncStateRepo
}
// GetLocalFilesStorage returns the raw local files storage for the current user.
// Returns nil if storage is not initialized.
func (m *Manager) GetLocalFilesStorage() storage.Storage {
m.mu.RLock()
defer m.mu.RUnlock()
return m.localFilesStorage
}
// DeleteUserData permanently deletes all local data for the specified user.
// This includes all files, metadata, and sync state stored on this device.
// IMPORTANT: This is a destructive operation and cannot be undone.
// The user will need to re-download all files from the cloud after this operation.
func (m *Manager) DeleteUserData(userEmail string) error {
m.mu.Lock()
defer m.mu.Unlock()
if userEmail == "" {
return fmt.Errorf("user email is required")
}
emailHash := config.GetEmailHashForPath(userEmail)
m.logger.Info("Deleting all local data for user",
zap.String("email_hash", emailHash))
// If this is the current user, clean up storage first
if m.currentUserEmail == userEmail {
m.cleanupStorageUnsafe()
}
// Get the user's data directory
userDir, err := config.GetUserSpecificDataDir("maplefile", userEmail)
if err != nil {
m.logger.Error("Failed to get user data directory", zap.Error(err))
return fmt.Errorf("failed to get user data directory: %w", err)
}
// Check if the directory exists
if _, err := os.Stat(userDir); os.IsNotExist(err) {
m.logger.Debug("User data directory does not exist, nothing to delete",
zap.String("email_hash", emailHash))
return nil
}
// Remove the entire user directory and all its contents
if err := os.RemoveAll(userDir); err != nil {
m.logger.Error("Failed to delete user data directory",
zap.Error(err),
zap.String("path", userDir))
return fmt.Errorf("failed to delete user data: %w", err)
}
m.logger.Info("Successfully deleted all local data for user",
zap.String("email_hash", emailHash))
return nil
}
// GetUserDataSize returns the total size of local data stored for the specified user in bytes.
// Returns 0 if no data exists or if there's an error calculating the size.
func (m *Manager) GetUserDataSize(userEmail string) (int64, error) {
if userEmail == "" {
return 0, fmt.Errorf("user email is required")
}
userDir, err := config.GetUserSpecificDataDir("maplefile", userEmail)
if err != nil {
return 0, fmt.Errorf("failed to get user data directory: %w", err)
}
// Check if the directory exists
if _, err := os.Stat(userDir); os.IsNotExist(err) {
return 0, nil
}
var totalSize int64
err = filepath.Walk(userDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Ignore errors and continue
}
if !info.IsDir() {
totalSize += info.Size()
}
return nil
})
if err != nil {
m.logger.Warn("Error calculating user data size", zap.Error(err))
return totalSize, nil // Return what we have
}
return totalSize, nil
}

View file

@ -0,0 +1,225 @@
package sync
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// CollectionSyncService defines the interface for collection synchronization
type CollectionSyncService interface {
Execute(ctx context.Context, input *SyncInput) (*SyncResult, error)
}
type collectionSyncService struct {
logger *zap.Logger
apiClient *client.Client
repoProvider RepositoryProvider
}
// ProvideCollectionSyncService creates a new collection sync service for Wire
func ProvideCollectionSyncService(
logger *zap.Logger,
apiClient *client.Client,
repoProvider RepositoryProvider,
) CollectionSyncService {
return &collectionSyncService{
logger: logger.Named("CollectionSyncService"),
apiClient: apiClient,
repoProvider: repoProvider,
}
}
// getCollectionRepo returns the collection repository, or an error if not initialized
func (s *collectionSyncService) getCollectionRepo() (collectionDomain.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetCollectionRepository()
if repo == nil {
return nil, fmt.Errorf("collection repository not available")
}
return repo, nil
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *collectionSyncService) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// Execute synchronizes collections from the cloud to local storage
func (s *collectionSyncService) Execute(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting collection synchronization")
// Get repositories (will fail if user not logged in)
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
s.logger.Error("Cannot sync - storage not initialized", zap.Error(err))
return nil, err
}
// Set defaults
if input == nil {
input = &SyncInput{}
}
if input.BatchSize <= 0 {
input.BatchSize = DefaultBatchSize
}
if input.MaxBatches <= 0 {
input.MaxBatches = DefaultMaxBatches
}
// Get current sync state
state, err := syncStateRepo.Get()
if err != nil {
s.logger.Error("Failed to get sync state", zap.Error(err))
return nil, err
}
result := &SyncResult{}
batchCount := 0
// Sync loop - fetch and process batches until done or max reached
for batchCount < input.MaxBatches {
// Prepare API request
syncInput := &client.SyncInput{
Cursor: state.CollectionCursor,
Limit: input.BatchSize,
}
// Fetch batch from cloud
resp, err := s.apiClient.SyncCollections(ctx, syncInput)
if err != nil {
s.logger.Error("Failed to fetch collections from cloud", zap.Error(err))
result.Errors = append(result.Errors, "failed to fetch collections: "+err.Error())
break
}
// Process each collection in the batch
for _, cloudCol := range resp.Collections {
if err := s.processCollection(ctx, cloudCol, input.Password, result); err != nil {
s.logger.Error("Failed to process collection",
zap.String("id", cloudCol.ID),
zap.Error(err))
result.Errors = append(result.Errors, "failed to process collection "+cloudCol.ID+": "+err.Error())
}
result.CollectionsProcessed++
}
// Update sync state with new cursor
state.UpdateCollectionSync(resp.NextCursor, resp.HasMore)
if err := syncStateRepo.Save(state); err != nil {
s.logger.Error("Failed to save sync state", zap.Error(err))
result.Errors = append(result.Errors, "failed to save sync state: "+err.Error())
}
batchCount++
// Check if we're done
if !resp.HasMore {
s.logger.Info("Collection sync completed - no more items")
break
}
}
s.logger.Info("Collection sync finished",
zap.Int("processed", result.CollectionsProcessed),
zap.Int("added", result.CollectionsAdded),
zap.Int("updated", result.CollectionsUpdated),
zap.Int("deleted", result.CollectionsDeleted),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// processCollection handles a single collection from the cloud
// Note: ctx and password are reserved for future use (on-demand content decryption)
func (s *collectionSyncService) processCollection(_ context.Context, cloudCol *client.Collection, _ string, result *SyncResult) error {
// Get collection repository
collectionRepo, err := s.getCollectionRepo()
if err != nil {
return err
}
// Check if collection exists locally
localCol, err := collectionRepo.Get(cloudCol.ID)
if err != nil {
return err
}
// Handle deleted collections
if cloudCol.State == collectionDomain.StateDeleted {
if localCol != nil {
if err := collectionRepo.Delete(cloudCol.ID); err != nil {
return err
}
result.CollectionsDeleted++
}
return nil
}
// The collection name comes from the API already decrypted for owned collections.
// For shared collections, it would need decryption using the key chain.
// For now, we use the name as-is from the API response.
collectionName := cloudCol.Name
// Create or update local collection
if localCol == nil {
// Create new local collection
newCol := s.mapCloudToLocal(cloudCol, collectionName)
if err := collectionRepo.Create(newCol); err != nil {
return err
}
result.CollectionsAdded++
} else {
// Update existing collection
updatedCol := s.mapCloudToLocal(cloudCol, collectionName)
updatedCol.SyncStatus = localCol.SyncStatus // Preserve local sync status
updatedCol.LastSyncedAt = time.Now()
if err := collectionRepo.Update(updatedCol); err != nil {
return err
}
result.CollectionsUpdated++
}
return nil
}
// mapCloudToLocal converts a cloud collection to local domain model
func (s *collectionSyncService) mapCloudToLocal(cloudCol *client.Collection, decryptedName string) *collectionDomain.Collection {
return &collectionDomain.Collection{
ID: cloudCol.ID,
ParentID: cloudCol.ParentID,
OwnerID: cloudCol.UserID,
EncryptedCollectionKey: cloudCol.EncryptedCollectionKey.Ciphertext,
Nonce: cloudCol.EncryptedCollectionKey.Nonce,
Name: decryptedName,
Description: cloudCol.Description,
CustomIcon: cloudCol.CustomIcon, // Custom icon (emoji or "icon:<id>")
TotalFiles: cloudCol.TotalFiles,
TotalSizeInBytes: cloudCol.TotalSizeInBytes,
PermissionLevel: cloudCol.PermissionLevel,
IsOwner: cloudCol.IsOwner,
OwnerName: cloudCol.OwnerName,
OwnerEmail: cloudCol.OwnerEmail,
SyncStatus: collectionDomain.SyncStatusCloudOnly,
LastSyncedAt: time.Now(),
State: cloudCol.State,
CreatedAt: cloudCol.CreatedAt,
ModifiedAt: cloudCol.ModifiedAt,
}
}

View file

@ -0,0 +1,254 @@
package sync
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
collectionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
fileDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// FileSyncService defines the interface for file synchronization
type FileSyncService interface {
Execute(ctx context.Context, input *SyncInput) (*SyncResult, error)
}
// RepositoryProvider provides access to user-specific repositories.
// This interface allows sync services to work with dynamically initialized storage.
// The storagemanager.Manager implements this interface.
type RepositoryProvider interface {
GetFileRepository() fileDomain.Repository
GetCollectionRepository() collectionDomain.Repository
GetSyncStateRepository() syncstate.Repository
IsInitialized() bool
}
type fileSyncService struct {
logger *zap.Logger
apiClient *client.Client
repoProvider RepositoryProvider
}
// ProvideFileSyncService creates a new file sync service for Wire
func ProvideFileSyncService(
logger *zap.Logger,
apiClient *client.Client,
repoProvider RepositoryProvider,
) FileSyncService {
return &fileSyncService{
logger: logger.Named("FileSyncService"),
apiClient: apiClient,
repoProvider: repoProvider,
}
}
// getFileRepo returns the file repository, or an error if not initialized
func (s *fileSyncService) getFileRepo() (fileDomain.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetFileRepository()
if repo == nil {
return nil, fmt.Errorf("file repository not available")
}
return repo, nil
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *fileSyncService) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// Execute synchronizes files from the cloud to local storage (metadata only)
func (s *fileSyncService) Execute(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting file synchronization")
// Get repositories (will fail if user not logged in)
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
s.logger.Error("Cannot sync - storage not initialized", zap.Error(err))
return nil, err
}
// Set defaults
if input == nil {
input = &SyncInput{}
}
if input.BatchSize <= 0 {
input.BatchSize = DefaultBatchSize
}
if input.MaxBatches <= 0 {
input.MaxBatches = DefaultMaxBatches
}
// Get current sync state
state, err := syncStateRepo.Get()
if err != nil {
s.logger.Error("Failed to get sync state", zap.Error(err))
return nil, err
}
result := &SyncResult{}
batchCount := 0
// Sync loop - fetch and process batches until done or max reached
for batchCount < input.MaxBatches {
// Prepare API request
syncInput := &client.SyncInput{
Cursor: state.FileCursor,
Limit: input.BatchSize,
}
// Fetch batch from cloud
resp, err := s.apiClient.SyncFiles(ctx, syncInput)
if err != nil {
s.logger.Error("Failed to fetch files from cloud", zap.Error(err))
result.Errors = append(result.Errors, "failed to fetch files: "+err.Error())
break
}
// Process each file in the batch
for _, cloudFile := range resp.Files {
if err := s.processFile(ctx, cloudFile, input.Password, result); err != nil {
s.logger.Error("Failed to process file",
zap.String("id", cloudFile.ID),
zap.Error(err))
result.Errors = append(result.Errors, "failed to process file "+cloudFile.ID+": "+err.Error())
}
result.FilesProcessed++
}
// Update sync state with new cursor
state.UpdateFileSync(resp.NextCursor, resp.HasMore)
if err := syncStateRepo.Save(state); err != nil {
s.logger.Error("Failed to save sync state", zap.Error(err))
result.Errors = append(result.Errors, "failed to save sync state: "+err.Error())
}
batchCount++
// Check if we're done
if !resp.HasMore {
s.logger.Info("File sync completed - no more items")
break
}
}
s.logger.Info("File sync finished",
zap.Int("processed", result.FilesProcessed),
zap.Int("added", result.FilesAdded),
zap.Int("updated", result.FilesUpdated),
zap.Int("deleted", result.FilesDeleted),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// processFile handles a single file from the cloud
// Note: ctx and password are reserved for future use (on-demand content decryption)
func (s *fileSyncService) processFile(_ context.Context, cloudFile *client.File, _ string, result *SyncResult) error {
// Get file repository
fileRepo, err := s.getFileRepo()
if err != nil {
return err
}
// Check if file exists locally
localFile, err := fileRepo.Get(cloudFile.ID)
if err != nil {
return err
}
// Handle deleted files
if cloudFile.State == fileDomain.StateDeleted {
if localFile != nil {
if err := fileRepo.Delete(cloudFile.ID); err != nil {
return err
}
result.FilesDeleted++
}
return nil
}
// Create or update local file (metadata only - no content download)
if localFile == nil {
// Create new local file record
newFile := s.mapCloudToLocal(cloudFile)
if err := fileRepo.Create(newFile); err != nil {
return err
}
result.FilesAdded++
} else {
// Update existing file metadata
updatedFile := s.mapCloudToLocal(cloudFile)
// Preserve local-only fields
updatedFile.FilePath = localFile.FilePath
updatedFile.EncryptedFilePath = localFile.EncryptedFilePath
updatedFile.ThumbnailPath = localFile.ThumbnailPath
updatedFile.Name = localFile.Name
updatedFile.MimeType = localFile.MimeType
updatedFile.Metadata = localFile.Metadata
// If file has local content, it's synced; otherwise it's cloud-only
if localFile.HasLocalContent() {
updatedFile.SyncStatus = fileDomain.SyncStatusSynced
} else {
updatedFile.SyncStatus = fileDomain.SyncStatusCloudOnly
}
updatedFile.LastSyncedAt = time.Now()
if err := fileRepo.Update(updatedFile); err != nil {
return err
}
result.FilesUpdated++
}
return nil
}
// mapCloudToLocal converts a cloud file to local domain model
func (s *fileSyncService) mapCloudToLocal(cloudFile *client.File) *fileDomain.File {
return &fileDomain.File{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
OwnerID: cloudFile.UserID,
EncryptedFileKey: fileDomain.EncryptedFileKeyData{
Ciphertext: cloudFile.EncryptedFileKey.Ciphertext,
Nonce: cloudFile.EncryptedFileKey.Nonce,
},
FileKeyNonce: cloudFile.FileKeyNonce,
EncryptedMetadata: cloudFile.EncryptedMetadata,
MetadataNonce: cloudFile.MetadataNonce,
FileNonce: cloudFile.FileNonce,
EncryptedSizeInBytes: cloudFile.EncryptedSizeInBytes,
DecryptedSizeInBytes: cloudFile.DecryptedSizeInBytes,
// Local paths are empty until file is downloaded (onloaded)
EncryptedFilePath: "",
FilePath: "",
ThumbnailPath: "",
// Metadata will be decrypted when file is onloaded
Name: "",
MimeType: "",
Metadata: nil,
SyncStatus: fileDomain.SyncStatusCloudOnly, // Files start as cloud-only
LastSyncedAt: time.Now(),
State: cloudFile.State,
StorageMode: cloudFile.StorageMode,
Version: cloudFile.Version,
CreatedAt: cloudFile.CreatedAt,
ModifiedAt: cloudFile.ModifiedAt,
ThumbnailURL: cloudFile.ThumbnailURL,
}
}

View file

@ -0,0 +1,149 @@
package sync
import (
"context"
"fmt"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
// Service provides unified sync operations
type Service interface {
// SyncAll synchronizes both collections and files
SyncAll(ctx context.Context, input *SyncInput) (*SyncResult, error)
// SyncCollections synchronizes collections only
SyncCollections(ctx context.Context, input *SyncInput) (*SyncResult, error)
// SyncFiles synchronizes files only
SyncFiles(ctx context.Context, input *SyncInput) (*SyncResult, error)
// GetSyncStatus returns the current sync status
GetSyncStatus(ctx context.Context) (*SyncStatus, error)
// ResetSync resets all sync state for a fresh sync
ResetSync(ctx context.Context) error
}
type service struct {
logger *zap.Logger
collectionSync CollectionSyncService
fileSync FileSyncService
repoProvider RepositoryProvider
}
// ProvideService creates a new unified sync service for Wire
func ProvideService(
logger *zap.Logger,
collectionSync CollectionSyncService,
fileSync FileSyncService,
repoProvider RepositoryProvider,
) Service {
return &service{
logger: logger.Named("SyncService"),
collectionSync: collectionSync,
fileSync: fileSync,
repoProvider: repoProvider,
}
}
// getSyncStateRepo returns the sync state repository, or an error if not initialized
func (s *service) getSyncStateRepo() (syncstate.Repository, error) {
if !s.repoProvider.IsInitialized() {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
repo := s.repoProvider.GetSyncStateRepository()
if repo == nil {
return nil, fmt.Errorf("sync state repository not available")
}
return repo, nil
}
// SyncAll synchronizes both collections and files
func (s *service) SyncAll(ctx context.Context, input *SyncInput) (*SyncResult, error) {
s.logger.Info("Starting full sync (collections + files)")
// Sync collections first
colResult, err := s.collectionSync.Execute(ctx, input)
if err != nil {
s.logger.Error("Collection sync failed during full sync", zap.Error(err))
return nil, err
}
// Sync files
fileResult, err := s.fileSync.Execute(ctx, input)
if err != nil {
s.logger.Error("File sync failed during full sync", zap.Error(err))
// Return partial result with collection data
return &SyncResult{
CollectionsProcessed: colResult.CollectionsProcessed,
CollectionsAdded: colResult.CollectionsAdded,
CollectionsUpdated: colResult.CollectionsUpdated,
CollectionsDeleted: colResult.CollectionsDeleted,
Errors: append(colResult.Errors, "file sync failed: "+err.Error()),
}, err
}
// Merge results
result := &SyncResult{
CollectionsProcessed: colResult.CollectionsProcessed,
CollectionsAdded: colResult.CollectionsAdded,
CollectionsUpdated: colResult.CollectionsUpdated,
CollectionsDeleted: colResult.CollectionsDeleted,
FilesProcessed: fileResult.FilesProcessed,
FilesAdded: fileResult.FilesAdded,
FilesUpdated: fileResult.FilesUpdated,
FilesDeleted: fileResult.FilesDeleted,
Errors: append(colResult.Errors, fileResult.Errors...),
}
s.logger.Info("Full sync completed",
zap.Int("collections_processed", result.CollectionsProcessed),
zap.Int("files_processed", result.FilesProcessed),
zap.Int("errors", len(result.Errors)))
return result, nil
}
// SyncCollections synchronizes collections only
func (s *service) SyncCollections(ctx context.Context, input *SyncInput) (*SyncResult, error) {
return s.collectionSync.Execute(ctx, input)
}
// SyncFiles synchronizes files only
func (s *service) SyncFiles(ctx context.Context, input *SyncInput) (*SyncResult, error) {
return s.fileSync.Execute(ctx, input)
}
// GetSyncStatus returns the current sync status
func (s *service) GetSyncStatus(ctx context.Context) (*SyncStatus, error) {
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
return nil, err
}
state, err := syncStateRepo.Get()
if err != nil {
return nil, err
}
return &SyncStatus{
CollectionsSynced: state.IsCollectionSyncComplete(),
FilesSynced: state.IsFileSyncComplete(),
FullySynced: state.IsFullySynced(),
}, nil
}
// ResetSync resets all sync state for a fresh sync
func (s *service) ResetSync(ctx context.Context) error {
s.logger.Info("Resetting sync state")
syncStateRepo, err := s.getSyncStateRepo()
if err != nil {
return err
}
return syncStateRepo.Reset()
}

View file

@ -0,0 +1,39 @@
package sync
// SyncResult represents the result of a sync operation
type SyncResult struct {
// Collection sync statistics
CollectionsProcessed int `json:"collections_processed"`
CollectionsAdded int `json:"collections_added"`
CollectionsUpdated int `json:"collections_updated"`
CollectionsDeleted int `json:"collections_deleted"`
// File sync statistics
FilesProcessed int `json:"files_processed"`
FilesAdded int `json:"files_added"`
FilesUpdated int `json:"files_updated"`
FilesDeleted int `json:"files_deleted"`
// Errors encountered during sync
Errors []string `json:"errors,omitempty"`
}
// SyncInput represents input parameters for sync operations
type SyncInput struct {
BatchSize int64 `json:"batch_size,omitempty"` // Number of items per batch (default: 50)
MaxBatches int `json:"max_batches,omitempty"` // Maximum batches to process (default: 100)
Password string `json:"password"` // Required for E2EE decryption
}
// SyncStatus represents the current sync status
type SyncStatus struct {
CollectionsSynced bool `json:"collections_synced"`
FilesSynced bool `json:"files_synced"`
FullySynced bool `json:"fully_synced"`
}
// DefaultBatchSize is the default number of items to fetch per API call
const DefaultBatchSize = 50
// DefaultMaxBatches is the default maximum number of batches to process
const DefaultMaxBatches = 100

View file

@ -0,0 +1,929 @@
# Token Manager Service
## Table of Contents
1. [Overview](#overview)
2. [Why Do We Need This?](#why-do-we-need-this)
3. [How It Works](#how-it-works)
4. [Architecture](#architecture)
5. [Configuration](#configuration)
6. [Lifecycle Management](#lifecycle-management)
7. [Error Handling](#error-handling)
8. [Testing](#testing)
9. [Troubleshooting](#troubleshooting)
10. [Examples](#examples)
---
## Overview
The Token Manager is a background service that automatically refreshes authentication tokens before they expire. This ensures users stay logged in without interruption and don't experience failed API requests due to expired tokens.
**Key Benefits:**
- ✅ Seamless user experience (no sudden logouts)
- ✅ No failed API requests due to expired tokens
- ✅ Automatic cleanup on app shutdown
- ✅ Graceful handling of refresh failures
---
## Why Do We Need This?
### The Problem
When you log into MapleFile, the backend gives you two tokens:
1. **Access Token** - Used for API requests (expires quickly, e.g., 1 hour)
2. **Refresh Token** - Used to get new access tokens (lasts longer, e.g., 30 days)
**Without Token Manager:**
```
User logs in → Gets tokens (expires in 1 hour)
User works for 61 minutes
User tries to upload file → ❌ 401 Unauthorized!
User gets logged out → 😞 Lost work, has to login again
```
**With Token Manager:**
```
User logs in → Gets tokens (expires in 1 hour)
Token Manager checks every 30 seconds
At 59 minutes → Token Manager refreshes tokens automatically
User works for hours → ✅ Everything just works!
```
### The Solution
The Token Manager runs in the background and:
1. **Checks** token expiration every 30 seconds
2. **Refreshes** tokens when < 1 minute remains
3. **Handles failures** gracefully (3 strikes = logout)
4. **Shuts down cleanly** when app closes
---
## How It Works
### High-Level Flow
```
┌─────────────────────────────────────────────────────────────┐
│ Application Lifecycle │
└─────────────────────────────────────────────────────────────┘
┌──────────────────────────────────────┐
│ App Starts / User Logs In │
└──────────────────────────────────────┘
┌──────────────────────────────────────┐
│ Token Manager Starts │
│ (background goroutine) │
└──────────────────────────────────────┘
┌──────────────────────────────────────┐
│ Every 30 seconds: │
│ 1. Check session │
│ 2. Calculate time until expiry │
│ 3. Refresh if < 1 minute
└──────────────────────────────────────┘
┌─────────┴─────────┐
▼ ▼
┌───────────────────┐ ┌──────────────────┐
│ Refresh Success │ │ Refresh Failed │
│ (reset counter) │ │ (increment) │
└───────────────────┘ └──────────────────┘
┌──────────────────┐
│ 3 failures? │
└──────────────────┘
Yes │ No
┌──────────┴──────┐
▼ ▼
┌─────────────────┐ ┌──────────┐
│ Force Logout │ │ Continue │
└─────────────────┘ └──────────┘
┌──────────────────────────────────────┐
│ App Shuts Down / User Logs Out │
└──────────────────────────────────────┘
┌──────────────────────────────────────┐
│ Token Manager Stops Gracefully │
│ (goroutine cleanup) │
└──────────────────────────────────────┘
```
### Detailed Process
#### 1. **Starting the Token Manager**
When a user logs in OR when the app restarts with a valid session:
```go
// In CompleteLogin or Startup
tokenManager.Start()
```
This creates a background goroutine that runs continuously.
#### 2. **Background Refresh Loop**
The goroutine runs this logic every 30 seconds:
```go
1. Get current session from LevelDB
2. Check if session exists and is valid
3. Calculate: timeUntilExpiry = session.ExpiresAt - time.Now()
4. If timeUntilExpiry < 1 minute:
a. Call API to refresh tokens
b. API returns new access + refresh tokens
c. Tokens automatically saved to session
5. If refresh fails:
a. Increment failure counter
b. If counter >= 3: Force logout
6. If refresh succeeds:
a. Reset failure counter to 0
```
#### 3. **Stopping the Token Manager**
When user logs out OR app shuts down:
```go
// Create a timeout context (max 3 seconds)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Stop gracefully
tokenManager.Stop(ctx)
```
This signals the goroutine to stop and waits for confirmation.
---
## Architecture
### Component Structure
```
internal/service/tokenmanager/
├── config.go # Configuration settings
├── manager.go # Main token manager logic
├── provider.go # Wire dependency injection
└── README.md # This file
```
### Key Components
#### 1. **Manager Struct**
```go
type Manager struct {
// Dependencies
config Config // Settings (intervals, thresholds)
client *client.Client // API client for token refresh
authService *auth.Service // Auth service for logout
getSession *session.GetByIdUseCase // Get current session
logger *zap.Logger // Structured logging
// Lifecycle management
ctx context.Context // Manager's context
cancel context.CancelFunc // Cancel function
stopCh chan struct{} // Signal to stop
stoppedCh chan struct{} // Confirmation of stopped
running atomic.Bool // Is manager running?
// Refresh state
mu sync.Mutex // Protects failure counter
consecutiveFailures int // Track failures
}
```
#### 2. **Config Struct**
```go
type Config struct {
RefreshBeforeExpiry time.Duration // How early to refresh (default: 1 min)
CheckInterval time.Duration // How often to check (default: 30 sec)
MaxConsecutiveFailures int // Failures before logout (default: 3)
}
```
### Goroutine Management
#### Why Use Goroutines?
A **goroutine** is Go's way of running code in the background (like a separate thread). We need this because:
- Main app needs to respond to UI events
- Token checking can happen in the background
- No blocking of user actions
#### The Double-Channel Pattern
We use **two channels** for clean shutdown:
```go
stopCh chan struct{} // We close this to signal "please stop"
stoppedCh chan struct{} // Goroutine closes this to say "I stopped"
```
**Why two channels?**
```go
// Without confirmation:
close(stopCh) // Signal stop
// Goroutine might still be running! ⚠️
// App shuts down → goroutine orphaned → potential crash
// With confirmation:
close(stopCh) // Signal stop
<-stoppedCh // Wait for confirmation
// Now we KNOW goroutine is done ✅
```
#### Thread Safety
**Problem:** Multiple parts of the app might access the token manager at once.
**Solution:** Use synchronization primitives:
1. **`atomic.Bool` for running flag**
```go
// Atomic operations are thread-safe (no mutex needed)
if !tm.running.CompareAndSwap(false, true) {
return // Already running, don't start again
}
```
2. **`sync.Mutex` for failure counter**
```go
// Lock before accessing shared data
tm.mu.Lock()
defer tm.mu.Unlock()
tm.consecutiveFailures++
```
---
## Configuration
### Default Settings
```go
Config{
RefreshBeforeExpiry: 1 * time.Minute, // Refresh with 1 min remaining
CheckInterval: 30 * time.Second, // Check every 30 seconds
MaxConsecutiveFailures: 3, // 3 failures = logout
}
```
### Why These Values?
| Setting | Value | Reasoning |
|---------|-------|-----------|
| **RefreshBeforeExpiry** | 1 minute | Conservative buffer. Even if one check fails, we have time for next attempt |
| **CheckInterval** | 30 seconds | Frequent enough to catch the 1-minute window, not too aggressive on resources |
| **MaxConsecutiveFailures** | 3 failures | Balances between transient network issues and genuine auth problems |
### Customizing Configuration
To change settings, modify `provider.go`:
```go
func ProvideManager(...) *Manager {
config := Config{
RefreshBeforeExpiry: 2 * time.Minute, // More conservative
CheckInterval: 1 * time.Minute, // Less frequent checks
MaxConsecutiveFailures: 5, // More tolerant
}
return New(config, client, authService, getSession, logger)
}
```
---
## Lifecycle Management
### 1. **Starting the Token Manager**
**Called from:**
- `Application.Startup()` - If valid session exists from previous run
- `Application.CompleteLogin()` - After successful login
**What happens:**
```go
func (m *Manager) Start() {
// 1. Check if already running (thread-safe)
if !m.running.CompareAndSwap(false, true) {
return // Already running, do nothing
}
// 2. Create context for goroutine
m.ctx, m.cancel = context.WithCancel(context.Background())
// 3. Create channels for communication
m.stopCh = make(chan struct{})
m.stoppedCh = make(chan struct{})
// 4. Reset failure counter
m.consecutiveFailures = 0
// 5. Launch background goroutine
go m.refreshLoop()
}
```
**Why it's safe to call multiple times:**
The `CompareAndSwap` operation ensures only ONE goroutine starts, even if `Start()` is called many times.
### 2. **Running the Refresh Loop**
**The goroutine does this forever (until stopped):**
```go
func (m *Manager) refreshLoop() {
// Ensure we always mark as stopped when exiting
defer close(m.stoppedCh)
defer m.running.Store(false)
// Create ticker (fires every 30 seconds)
ticker := time.NewTicker(m.config.CheckInterval)
defer ticker.Stop()
// Do initial check immediately
m.checkAndRefresh()
// Loop forever
for {
select {
case <-m.stopCh:
// Stop signal received
return
case <-m.ctx.Done():
// Context cancelled
return
case <-ticker.C:
// 30 seconds elapsed, check again
m.checkAndRefresh()
}
}
}
```
**The `select` statement explained:**
Think of `select` like a switch statement for channels. It waits for one of these events:
- `stopCh` closed → Time to stop
- `ctx.Done()` → Forced cancellation
- `ticker.C` → 30 seconds passed, do work
### 3. **Stopping the Token Manager**
**Called from:**
- `Application.Shutdown()` - App closing
- `Application.Logout()` - User logging out
**What happens:**
```go
func (m *Manager) Stop(ctx context.Context) error {
// 1. Check if running
if !m.running.Load() {
return nil // Not running, nothing to do
}
// 2. Signal stop (close the channel)
close(m.stopCh)
// 3. Wait for confirmation OR timeout
select {
case <-m.stoppedCh:
// Goroutine confirmed it stopped
return nil
case <-ctx.Done():
// Timeout! Force cancel
m.cancel()
// Give it 100ms more
select {
case <-m.stoppedCh:
return nil
case <-time.After(100 * time.Millisecond):
return ctx.Err() // Failed to stop cleanly
}
}
}
```
**Why the timeout?**
If the goroutine is stuck (e.g., in a long API call), we can't wait forever. The app needs to shut down!
---
## Error Handling
### 1. **Refresh Failures**
**Types of failures:**
| Failure Type | Cause | Handling |
|--------------|-------|----------|
| **Network Error** | No internet connection | Increment counter, retry next check |
| **401 Unauthorized** | Refresh token expired | Increment counter, likely force logout |
| **500 Server Error** | Backend issue | Increment counter, retry next check |
| **Timeout** | Slow network | Increment counter, retry next check |
**Failure tracking:**
```go
func (m *Manager) checkAndRefresh() error {
m.mu.Lock()
defer m.mu.Unlock()
// ... check if refresh needed ...
// Attempt refresh
if err := m.client.RefreshToken(ctx); err != nil {
m.consecutiveFailures++
if m.consecutiveFailures >= m.config.MaxConsecutiveFailures {
// Too many failures! Force logout
return m.forceLogout()
}
return err
}
// Success! Reset counter
m.consecutiveFailures = 0
return nil
}
```
### 2. **Force Logout**
**When it happens:**
- 3 consecutive refresh failures
- Session expired on startup
**What it does:**
```go
func (m *Manager) forceLogout() error {
m.logger.Warn("Forcing logout due to token refresh issues")
// Use background context (not manager's context which might be cancelled)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Clear session from LevelDB
if err := m.authService.Logout(ctx); err != nil {
m.logger.Error("Failed to force logout", zap.Error(err))
return err
}
// User will see login screen on next UI interaction
return nil
}
```
**User experience:**
When force logout happens, the user will see the login screen the next time they interact with the app. Their work is NOT lost (local files remain), they just need to log in again.
### 3. **Session Not Found**
**Scenario:** User manually deleted session file, or session expired.
**Handling:**
```go
// Get current session
sess, err := m.getSession.Execute()
if err != nil || sess == nil {
// No session = user not logged in
// This is normal, not an error
return nil // Do nothing
}
```
---
## Testing
### Manual Testing
#### Test 1: Normal Refresh
1. Log in to the app
2. Watch logs for token manager start
3. Wait ~30 seconds
4. Check logs for "Token refresh not needed yet"
5. Verify `time_until_expiry` is decreasing
**Expected logs:**
```
INFO Token manager starting
INFO Token refresh loop started
DEBUG Token refresh not needed yet {"time_until_expiry": "59m30s"}
... wait 30 seconds ...
DEBUG Token refresh not needed yet {"time_until_expiry": "59m0s"}
```
#### Test 2: Automatic Refresh
1. Log in and get tokens with short expiry (if possible)
2. Wait until < 1 minute remaining
3. Watch logs for automatic refresh
**Expected logs:**
```
INFO Token refresh needed {"time_until_expiry": "45s"}
INFO Token refreshed successfully
DEBUG Token refresh not needed yet {"time_until_expiry": "59m30s"}
```
#### Test 3: Graceful Shutdown
1. Log in (token manager running)
2. Close the app (Cmd+Q on Mac, Alt+F4 on Windows)
3. Check logs for clean shutdown
**Expected logs:**
```
INFO MapleFile desktop application shutting down
INFO Token manager stopping...
INFO Token refresh loop received stop signal
INFO Token refresh loop exited
INFO Token manager stopped gracefully
```
#### Test 4: Logout
1. Log in (token manager running)
2. Click logout button
3. Verify token manager stops
**Expected logs:**
```
INFO Token manager stopping...
INFO Token manager stopped gracefully
INFO User logged out successfully
```
#### Test 5: Session Resume on Restart
1. Log in
2. Close app
3. Restart app
4. Check logs for session resume
**Expected logs:**
```
INFO MapleFile desktop application started
INFO Resuming valid session from previous run
INFO Session restored to API client
INFO Token manager starting
INFO Token manager started for resumed session
```
### Unit Testing (TODO)
```go
// Example test structure (to be implemented)
func TestTokenManager_Start(t *testing.T) {
// Test that Start() can be called multiple times safely
// Test that goroutine actually starts
}
func TestTokenManager_Stop(t *testing.T) {
// Test graceful shutdown
// Test timeout handling
}
func TestTokenManager_RefreshLogic(t *testing.T) {
// Test refresh when < 1 minute
// Test no refresh when > 1 minute
}
func TestTokenManager_FailureHandling(t *testing.T) {
// Test failure counter increment
// Test force logout after 3 failures
// Test counter reset on success
}
```
---
## Troubleshooting
### Problem: Token manager not starting
**Symptoms:**
- No "Token manager starting" log
- App works but might get logged out after token expires
**Possible causes:**
1. **No session on startup**
```
Check logs for: "No session found on startup"
Solution: This is normal if user hasn't logged in yet
```
2. **Session expired**
```
Check logs for: "Session expired on startup"
Solution: User needs to log in again
```
3. **Token manager already running**
```
Check logs for: "Token manager already running"
Solution: This is expected behavior (prevents duplicate goroutines)
```
### Problem: "Token manager stop timeout"
**Symptoms:**
- App takes long time to close
- Warning in logs: "Token manager stop timeout, forcing cancellation"
**Possible causes:**
1. **Refresh in progress during shutdown**
```
Goroutine might be in the middle of API call
Solution: Wait for current API call to timeout (max 30s)
```
2. **Network issue**
```
API call hanging due to network problems
Solution: Force cancellation (already handled automatically)
```
### Problem: Getting logged out unexpectedly
**Symptoms:**
- User sees login screen randomly
- Logs show "Forcing logout due to token refresh issues"
**Possible causes:**
1. **Network connectivity issues**
```
Check logs for repeated: "Token refresh failed"
Solution: Check internet connection, backend availability
```
2. **Backend API down**
```
All refresh attempts failing
Solution: Check backend service status
```
3. **Refresh token expired**
```
Backend returns 401 on refresh
Solution: User needs to log in again (this is expected)
```
### Problem: High CPU/memory usage
**Symptoms:**
- App using lots of resources
- Multiple token managers running
**Diagnosis:**
```bash
# Check goroutines
curl http://localhost:34115/debug/pprof/goroutine?debug=1
# Look for multiple "refreshLoop" goroutines
```
**Possible causes:**
1. **Token manager not stopping on logout**
```
Check logs for missing: "Token manager stopped gracefully"
Solution: Bug in stop logic (report issue)
```
2. **Multiple Start() calls**
```
Should not happen (atomic bool prevents this)
Solution: Report issue with reproduction steps
```
---
## Examples
### Example 1: Adding Custom Logging
Want to know exactly when refresh happens?
```go
// In tokenmanager/manager.go, modify checkAndRefresh():
func (m *Manager) checkAndRefresh() error {
// ... existing code ...
// Before refresh
m.logger.Info("REFRESH STARTING",
zap.Time("now", time.Now()),
zap.Time("token_expires_at", sess.ExpiresAt))
if err := m.client.RefreshToken(ctx); err != nil {
// Log failure details
m.logger.Error("REFRESH FAILED",
zap.Error(err),
zap.String("error_type", fmt.Sprintf("%T", err)))
return err
}
// After refresh
m.logger.Info("REFRESH COMPLETED",
zap.Time("completion_time", time.Now()))
return nil
}
```
### Example 2: Custom Failure Callback
Want to notify UI when logout happens?
```go
// Add callback to Manager struct:
type Manager struct {
// ... existing fields ...
onForceLogout func(reason string) // NEW
}
// In checkAndRefresh():
if m.consecutiveFailures >= m.config.MaxConsecutiveFailures {
reason := fmt.Sprintf("%d consecutive refresh failures", m.consecutiveFailures)
if m.onForceLogout != nil {
m.onForceLogout(reason) // Notify callback
}
return m.forceLogout()
}
// In Application, set callback:
func (a *Application) Startup(ctx context.Context) {
// ... existing code ...
// Set callback to emit Wails event
a.tokenManager.onForceLogout = func(reason string) {
runtime.EventsEmit(a.ctx, "auth:logged-out", reason)
}
}
```
### Example 3: Metrics Collection
Want to track refresh statistics?
```go
type RefreshMetrics struct {
TotalRefreshes int64
SuccessfulRefreshes int64
FailedRefreshes int64
LastRefreshTime time.Time
}
// Add to Manager:
type Manager struct {
// ... existing fields ...
metrics RefreshMetrics
metricsMu sync.Mutex
}
// In checkAndRefresh():
if err := m.client.RefreshToken(ctx); err != nil {
m.metricsMu.Lock()
m.metrics.TotalRefreshes++
m.metrics.FailedRefreshes++
m.metricsMu.Unlock()
return err
}
m.metricsMu.Lock()
m.metrics.TotalRefreshes++
m.metrics.SuccessfulRefreshes++
m.metrics.LastRefreshTime = time.Now()
m.metricsMu.Unlock()
// Export metrics via Wails:
func (a *Application) GetRefreshMetrics() map[string]interface{} {
return map[string]interface{}{
"total": a.tokenManager.metrics.TotalRefreshes,
"successful": a.tokenManager.metrics.SuccessfulRefreshes,
"failed": a.tokenManager.metrics.FailedRefreshes,
}
}
```
---
## Summary for Junior Developers
### Key Concepts to Remember
1. **Goroutines are background threads**
- They run concurrently with your main app
- Need careful management (start/stop)
2. **Channels are for communication**
- `close(stopCh)` = "Please stop"
- `<-stoppedCh` = "I confirm I stopped"
3. **Mutexes prevent race conditions**
- Lock before accessing shared data
- Always defer unlock
4. **Atomic operations are thread-safe**
- Use for simple flags
- No mutex needed
5. **Context carries deadlines**
- Respect timeouts
- Use for cancellation
### What NOT to Do
❌ **Don't call Start() in a loop**
```go
// Bad!
for {
tokenManager.Start() // Creates goroutine leak!
}
```
❌ **Don't forget to Stop()**
```go
// Bad!
func Logout() {
authService.Logout() // Token manager still running!
}
```
❌ **Don't block on Stop() without timeout**
```go
// Bad!
tokenManager.Stop(context.Background()) // Could hang forever!
// Good!
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
tokenManager.Stop(ctx)
```
### Learning Resources
- **Go Concurrency Patterns**: https://go.dev/blog/pipelines
- **Context Package**: https://go.dev/blog/context
- **Sync Package**: https://pkg.go.dev/sync
### Getting Help
If you're stuck:
1. Check the logs (they're very detailed)
2. Look at the troubleshooting section above
3. Ask senior developers for code review
4. File an issue with reproduction steps
---
## Changelog
### v1.0.0 (2025-11-21)
- Initial implementation
- Background refresh every 30 seconds
- Refresh when < 1 minute before expiry
- Graceful shutdown with timeout handling
- Automatic logout after 3 consecutive failures
- Session resume on app restart

View file

@ -0,0 +1,27 @@
package tokenmanager
import "time"
// Config holds configuration for the token manager
type Config struct {
// RefreshBeforeExpiry is how long before expiry to refresh the token
// Default: 1 minute
RefreshBeforeExpiry time.Duration
// CheckInterval is how often to check if refresh is needed
// Default: 30 seconds
CheckInterval time.Duration
// MaxConsecutiveFailures is how many consecutive refresh failures before forcing logout
// Default: 3
MaxConsecutiveFailures int
}
// DefaultConfig returns the default configuration
func DefaultConfig() Config {
return Config{
RefreshBeforeExpiry: 1 * time.Minute,
CheckInterval: 30 * time.Second,
MaxConsecutiveFailures: 3,
}
}

View file

@ -0,0 +1,228 @@
package tokenmanager
import (
"context"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
)
// Manager handles automatic token refresh with graceful shutdown
type Manager struct {
config Config
client *client.Client
authService *auth.Service
getSession *session.GetByIdUseCase
logger *zap.Logger
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
stopCh chan struct{} // Signal to stop
stoppedCh chan struct{} // Confirmation of stopped
running atomic.Bool // Thread-safe running flag
// Refresh state management
mu sync.Mutex
consecutiveFailures int
}
// New creates a new token manager
func New(
config Config,
client *client.Client,
authService *auth.Service,
getSession *session.GetByIdUseCase,
logger *zap.Logger,
) *Manager {
return &Manager{
config: config,
client: client,
authService: authService,
getSession: getSession,
logger: logger.Named("token-manager"),
}
}
// Start begins the token refresh background process
// Safe to call multiple times - will only start once
func (m *Manager) Start() {
// Only start if not already running
if !m.running.CompareAndSwap(false, true) {
m.logger.Debug("Token manager already running, skipping start")
return
}
m.ctx, m.cancel = context.WithCancel(context.Background())
m.stopCh = make(chan struct{})
m.stoppedCh = make(chan struct{})
m.consecutiveFailures = 0
m.logger.Info("Token manager starting")
go m.refreshLoop()
}
// Stop gracefully stops the token refresh background process
// Blocks until stopped or context deadline exceeded
func (m *Manager) Stop(ctx context.Context) error {
if !m.running.Load() {
m.logger.Debug("Token manager not running, nothing to stop")
return nil
}
m.logger.Info("Token manager stopping...")
// Signal stop
close(m.stopCh)
// Wait for goroutine to finish or timeout
select {
case <-m.stoppedCh:
m.logger.Info("Token manager stopped gracefully")
return nil
case <-ctx.Done():
m.logger.Warn("Token manager stop timeout, forcing cancellation")
m.cancel()
// Wait a bit more for cancellation to take effect
select {
case <-m.stoppedCh:
m.logger.Info("Token manager stopped after forced cancellation")
return nil
case <-time.After(100 * time.Millisecond):
m.logger.Error("Token manager failed to stop cleanly")
return ctx.Err()
}
}
}
// IsRunning returns true if the token manager is currently running
func (m *Manager) IsRunning() bool {
return m.running.Load()
}
// refreshLoop is the background goroutine that checks and refreshes tokens
func (m *Manager) refreshLoop() {
defer close(m.stoppedCh)
defer m.running.Store(false)
defer m.logger.Info("Token refresh loop exited")
ticker := time.NewTicker(m.config.CheckInterval)
defer ticker.Stop()
m.logger.Info("Token refresh loop started",
zap.Duration("check_interval", m.config.CheckInterval),
zap.Duration("refresh_before_expiry", m.config.RefreshBeforeExpiry))
// Do initial check immediately
if err := m.checkAndRefresh(); err != nil {
m.logger.Error("Initial token refresh check failed", zap.Error(err))
}
for {
select {
case <-m.stopCh:
m.logger.Info("Token refresh loop received stop signal")
return
case <-m.ctx.Done():
m.logger.Info("Token refresh loop context cancelled")
return
case <-ticker.C:
if err := m.checkAndRefresh(); err != nil {
m.logger.Error("Token refresh check failed", zap.Error(err))
}
}
}
}
// checkAndRefresh checks if token refresh is needed and performs it
func (m *Manager) checkAndRefresh() error {
m.mu.Lock()
defer m.mu.Unlock()
// Get current session
sess, err := m.getSession.Execute()
if err != nil {
m.logger.Debug("No session found, skipping refresh check", zap.Error(err))
return nil // Not an error - user might not be logged in
}
if sess == nil {
m.logger.Debug("Session is nil, skipping refresh check")
return nil
}
// Check if session is still valid
if sess.IsExpired() {
m.logger.Warn("Session has expired, forcing logout")
return m.forceLogout()
}
// Check if refresh is needed
timeUntilExpiry := time.Until(sess.ExpiresAt)
if timeUntilExpiry > m.config.RefreshBeforeExpiry {
// No refresh needed yet
if m.consecutiveFailures > 0 {
// Reset failure counter on successful check
m.logger.Info("Session valid, resetting failure counter")
m.consecutiveFailures = 0
}
m.logger.Debug("Token refresh not needed yet",
zap.Duration("time_until_expiry", timeUntilExpiry))
return nil
}
// Refresh needed
m.logger.Info("Token refresh needed",
zap.Duration("time_until_expiry", timeUntilExpiry))
// Attempt refresh (with background context, not the manager's context)
refreshCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := m.client.RefreshToken(refreshCtx); err != nil {
m.consecutiveFailures++
m.logger.Error("Token refresh failed",
zap.Error(err),
zap.Int("consecutive_failures", m.consecutiveFailures),
zap.Int("max_failures", m.config.MaxConsecutiveFailures))
if m.consecutiveFailures >= m.config.MaxConsecutiveFailures {
m.logger.Error("Max consecutive refresh failures reached, forcing logout")
return m.forceLogout()
}
return err
}
// Success - reset failure counter
m.consecutiveFailures = 0
m.logger.Info("Token refreshed successfully",
zap.Duration("time_until_old_expiry", timeUntilExpiry))
return nil
}
// forceLogout forces a logout due to refresh failures
func (m *Manager) forceLogout() error {
m.logger.Warn("Forcing logout due to token refresh issues")
// Use background context since manager might be shutting down
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := m.authService.Logout(ctx); err != nil {
m.logger.Error("Failed to force logout", zap.Error(err))
return err
}
m.logger.Info("Force logout completed successfully")
return nil
}

View file

@ -0,0 +1,20 @@
package tokenmanager
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
)
// ProvideManager creates the token manager for Wire
func ProvideManager(
client *client.Client,
authService *auth.Service,
getSession *session.GetByIdUseCase,
logger *zap.Logger,
) *Manager {
config := DefaultConfig()
return New(config, client, authService, getSession, logger)
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type CreateUseCase struct {
collectionRepo collection.Repository
}
// ProvideCreateUseCase creates the use case for Wire
func ProvideCreateUseCase(collectionRepo collection.Repository) *CreateUseCase {
return &CreateUseCase{collectionRepo: collectionRepo}
}
// Execute creates a new collection record
func (uc *CreateUseCase) Execute(c *collection.Collection) error {
return uc.collectionRepo.Create(c)
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type DeleteUseCase struct {
collectionRepo collection.Repository
}
// ProvideDeleteUseCase creates the use case for Wire
func ProvideDeleteUseCase(collectionRepo collection.Repository) *DeleteUseCase {
return &DeleteUseCase{collectionRepo: collectionRepo}
}
// Execute deletes a collection by ID
func (uc *DeleteUseCase) Execute(id string) error {
return uc.collectionRepo.Delete(id)
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type GetUseCase struct {
collectionRepo collection.Repository
}
// ProvideGetUseCase creates the use case for Wire
func ProvideGetUseCase(collectionRepo collection.Repository) *GetUseCase {
return &GetUseCase{collectionRepo: collectionRepo}
}
// Execute retrieves a collection by ID
func (uc *GetUseCase) Execute(id string) (*collection.Collection, error) {
return uc.collectionRepo.Get(id)
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type ListUseCase struct {
collectionRepo collection.Repository
}
// ProvideListUseCase creates the use case for Wire
func ProvideListUseCase(collectionRepo collection.Repository) *ListUseCase {
return &ListUseCase{collectionRepo: collectionRepo}
}
// Execute returns all collections
func (uc *ListUseCase) Execute() ([]*collection.Collection, error) {
return uc.collectionRepo.List()
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type ListByParentUseCase struct {
collectionRepo collection.Repository
}
// ProvideListByParentUseCase creates the use case for Wire
func ProvideListByParentUseCase(collectionRepo collection.Repository) *ListByParentUseCase {
return &ListByParentUseCase{collectionRepo: collectionRepo}
}
// Execute returns all collections with a specific parent ID
func (uc *ListByParentUseCase) Execute(parentID string) ([]*collection.Collection, error) {
return uc.collectionRepo.ListByParent(parentID)
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type ListRootUseCase struct {
collectionRepo collection.Repository
}
// ProvideListRootUseCase creates the use case for Wire
func ProvideListRootUseCase(collectionRepo collection.Repository) *ListRootUseCase {
return &ListRootUseCase{collectionRepo: collectionRepo}
}
// Execute returns all root-level collections (no parent)
func (uc *ListRootUseCase) Execute() ([]*collection.Collection, error) {
return uc.collectionRepo.ListRoot()
}

View file

@ -0,0 +1,19 @@
package collection
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
)
type UpdateUseCase struct {
collectionRepo collection.Repository
}
// ProvideUpdateUseCase creates the use case for Wire
func ProvideUpdateUseCase(collectionRepo collection.Repository) *UpdateUseCase {
return &UpdateUseCase{collectionRepo: collectionRepo}
}
// Execute updates an existing collection record
func (uc *UpdateUseCase) Execute(c *collection.Collection) error {
return uc.collectionRepo.Update(c)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type CreateUseCase struct {
fileRepo file.Repository
}
// ProvideCreateUseCase creates the use case for Wire
func ProvideCreateUseCase(fileRepo file.Repository) *CreateUseCase {
return &CreateUseCase{fileRepo: fileRepo}
}
// Execute creates a new file record
func (uc *CreateUseCase) Execute(f *file.File) error {
return uc.fileRepo.Create(f)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type DeleteUseCase struct {
fileRepo file.Repository
}
// ProvideDeleteUseCase creates the use case for Wire
func ProvideDeleteUseCase(fileRepo file.Repository) *DeleteUseCase {
return &DeleteUseCase{fileRepo: fileRepo}
}
// Execute deletes a file by ID
func (uc *DeleteUseCase) Execute(id string) error {
return uc.fileRepo.Delete(id)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type GetUseCase struct {
fileRepo file.Repository
}
// ProvideGetUseCase creates the use case for Wire
func ProvideGetUseCase(fileRepo file.Repository) *GetUseCase {
return &GetUseCase{fileRepo: fileRepo}
}
// Execute retrieves a file by ID
func (uc *GetUseCase) Execute(id string) (*file.File, error) {
return uc.fileRepo.Get(id)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type ListUseCase struct {
fileRepo file.Repository
}
// ProvideListUseCase creates the use case for Wire
func ProvideListUseCase(fileRepo file.Repository) *ListUseCase {
return &ListUseCase{fileRepo: fileRepo}
}
// Execute returns all files
func (uc *ListUseCase) Execute() ([]*file.File, error) {
return uc.fileRepo.List()
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type ListByCollectionUseCase struct {
fileRepo file.Repository
}
// ProvideListByCollectionUseCase creates the use case for Wire
func ProvideListByCollectionUseCase(fileRepo file.Repository) *ListByCollectionUseCase {
return &ListByCollectionUseCase{fileRepo: fileRepo}
}
// Execute returns all files for a specific collection
func (uc *ListByCollectionUseCase) Execute(collectionID string) ([]*file.File, error) {
return uc.fileRepo.ListByCollection(collectionID)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type ListByStatusUseCase struct {
fileRepo file.Repository
}
// ProvideListByStatusUseCase creates the use case for Wire
func ProvideListByStatusUseCase(fileRepo file.Repository) *ListByStatusUseCase {
return &ListByStatusUseCase{fileRepo: fileRepo}
}
// Execute returns all files with a specific sync status
func (uc *ListByStatusUseCase) Execute(status file.SyncStatus) ([]*file.File, error) {
return uc.fileRepo.ListByStatus(status)
}

View file

@ -0,0 +1,19 @@
package file
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
type UpdateUseCase struct {
fileRepo file.Repository
}
// ProvideUpdateUseCase creates the use case for Wire
func ProvideUpdateUseCase(fileRepo file.Repository) *UpdateUseCase {
return &UpdateUseCase{fileRepo: fileRepo}
}
// Execute updates an existing file record
func (uc *UpdateUseCase) Execute(f *file.File) error {
return uc.fileRepo.Update(f)
}

View file

@ -0,0 +1,33 @@
package session
import (
"time"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
)
type CreateUseCase struct {
sessionRepo session.Repository
}
// ProvideCreateUseCase creates the use case for Wire
func ProvideCreateUseCase(sessionRepo session.Repository) *CreateUseCase {
return &CreateUseCase{sessionRepo: sessionRepo}
}
// Execute creates and stores a new session
func (uc *CreateUseCase) Execute(
userID, email, accessToken, refreshToken string,
expiresIn time.Duration,
) error {
sess := &session.Session{
UserID: userID,
Email: email,
AccessToken: accessToken,
RefreshToken: refreshToken,
ExpiresAt: time.Now().Add(expiresIn),
CreatedAt: time.Now(),
}
return uc.sessionRepo.Save(sess)
}

View file

@ -0,0 +1,19 @@
package session
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
)
type DeleteUseCase struct {
sessionRepo session.Repository
}
// ProvideDeleteUseCase creates the use case for Wire
func ProvideDeleteUseCase(sessionRepo session.Repository) *DeleteUseCase {
return &DeleteUseCase{sessionRepo: sessionRepo}
}
// Execute deletes the current session
func (uc *DeleteUseCase) Execute() error {
return uc.sessionRepo.Delete()
}

View file

@ -0,0 +1,19 @@
package session
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
)
type GetByIdUseCase struct {
sessionRepo session.Repository
}
// ProvideGetByIdUseCase creates the use case for Wire
func ProvideGetByIdUseCase(sessionRepo session.Repository) *GetByIdUseCase {
return &GetByIdUseCase{sessionRepo: sessionRepo}
}
// Execute retrieves the current session
func (uc *GetByIdUseCase) Execute() (*session.Session, error) {
return uc.sessionRepo.Get()
}

View file

@ -0,0 +1,22 @@
package session
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
)
type SaveUseCase struct {
sessionRepo session.Repository
}
// ProvideSaveUseCase creates the use case for Wire
func ProvideSaveUseCase(sessionRepo session.Repository) *SaveUseCase {
return &SaveUseCase{sessionRepo: sessionRepo}
}
// Execute saves an existing session (for updates)
func (uc *SaveUseCase) Execute(sess *session.Session) error {
if sess == nil {
return nil
}
return uc.sessionRepo.Save(sess)
}

View file

@ -0,0 +1,19 @@
package syncstate
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
type GetUseCase struct {
syncStateRepo syncstate.Repository
}
// ProvideGetUseCase creates the use case for Wire
func ProvideGetUseCase(syncStateRepo syncstate.Repository) *GetUseCase {
return &GetUseCase{syncStateRepo: syncStateRepo}
}
// Execute retrieves the current sync state
func (uc *GetUseCase) Execute() (*syncstate.SyncState, error) {
return uc.syncStateRepo.Get()
}

View file

@ -0,0 +1,19 @@
package syncstate
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
type ResetUseCase struct {
syncStateRepo syncstate.Repository
}
// ProvideResetUseCase creates the use case for Wire
func ProvideResetUseCase(syncStateRepo syncstate.Repository) *ResetUseCase {
return &ResetUseCase{syncStateRepo: syncStateRepo}
}
// Execute resets the sync state for a fresh sync
func (uc *ResetUseCase) Execute() error {
return uc.syncStateRepo.Reset()
}

View file

@ -0,0 +1,19 @@
package syncstate
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/syncstate"
)
type SaveUseCase struct {
syncStateRepo syncstate.Repository
}
// ProvideSaveUseCase creates the use case for Wire
func ProvideSaveUseCase(syncStateRepo syncstate.Repository) *SaveUseCase {
return &SaveUseCase{syncStateRepo: syncStateRepo}
}
// Execute saves the sync state
func (uc *SaveUseCase) Execute(state *syncstate.SyncState) error {
return uc.syncStateRepo.Save(state)
}

View file

@ -0,0 +1,34 @@
package user
import (
"time"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/user"
)
type CreateUseCase struct {
userRepo user.Repository
}
func ProvideCreateUseCase(userRepo user.Repository) *CreateUseCase {
return &CreateUseCase{
userRepo: userRepo,
}
}
func (uc *CreateUseCase) Execute(
id, email, firstName, lastName string,
storageQuotaBytes int64,
) error {
u := &user.User{
ID: id,
Email: email,
FirstName: firstName,
LastName: lastName,
StorageQuotaBytes: storageQuotaBytes,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
return uc.userRepo.Save(u)
}

View file

@ -0,0 +1,19 @@
package user
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/user"
)
type GetByEmailUseCase struct {
userRepo user.Repository
}
func ProvideGetByEmailUseCase(userRepo user.Repository) *GetByEmailUseCase {
return &GetByEmailUseCase{
userRepo: userRepo,
}
}
func (uc *GetByEmailUseCase) Execute(email string) (*user.User, error) {
return uc.userRepo.GetByEmail(email)
}

View file

@ -0,0 +1,19 @@
package user
import (
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/user"
)
type GetByIdUseCase struct {
userRepo user.Repository
}
func ProvideGetByIdUseCase(userRepo user.Repository) *GetByIdUseCase {
return &GetByIdUseCase{
userRepo: userRepo,
}
}
func (uc *GetByIdUseCase) Execute(id string) (*user.User, error) {
return uc.userRepo.GetByID(id)
}

View file

@ -0,0 +1,57 @@
package utils
import "strings"
// MaskEmail masks an email address for logging purposes.
// Example: "user@example.com" becomes "u***@e***.com"
// This preserves enough information for debugging while protecting privacy.
func MaskEmail(email string) string {
if email == "" {
return ""
}
parts := strings.Split(email, "@")
if len(parts) != 2 {
// Not a valid email format, mask most of it
if len(email) <= 2 {
return "***"
}
return string(email[0]) + "***"
}
localPart := parts[0]
domainPart := parts[1]
// Mask local part: show first character, mask the rest
maskedLocal := maskPart(localPart)
// Mask domain: show first character of domain name, mask rest, keep TLD
maskedDomain := maskDomain(domainPart)
return maskedLocal + "@" + maskedDomain
}
// maskPart masks a string showing only the first character
func maskPart(s string) string {
if len(s) == 0 {
return "***"
}
if len(s) == 1 {
return s + "***"
}
return string(s[0]) + "***"
}
// maskDomain masks the domain part, preserving the TLD
func maskDomain(domain string) string {
lastDot := strings.LastIndex(domain, ".")
if lastDot == -1 {
// No TLD found, just mask it
return maskPart(domain)
}
domainName := domain[:lastDot]
tld := domain[lastDot:] // includes the dot
return maskPart(domainName) + tld
}