Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,977 @@
package app
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"time"
"github.com/tyler-smith/go-bip39"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// RequestOTT requests a one-time token for login
func (a *Application) RequestOTT(email string) error {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return err
}
// Check rate limit before making request
// Note: We do NOT reset on success here - the rate limit prevents spamming
// the "request OTT" button. Users should wait between OTT requests.
if err := a.rateLimiter.Check(ratelimiter.OpRequestOTT, email); err != nil {
a.logger.Warn("OTT request rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return err
}
return a.authService.RequestOTT(a.ctx, email)
}
// Logout logs out the current user and deletes all local data (default behavior for security).
// Use LogoutWithOptions for more control over local data deletion.
func (a *Application) Logout() error {
return a.LogoutWithOptions(true) // Default to deleting local data for security
}
// LogoutWithOptions logs out the current user with control over local data deletion.
// If deleteLocalData is true, all locally cached files and metadata will be permanently deleted.
// If deleteLocalData is false, local data is preserved for faster login next time.
func (a *Application) LogoutWithOptions(deleteLocalData bool) error {
// Get session before clearing
session, _ := a.authService.GetCurrentSession(a.ctx)
var userEmail string
if session != nil {
userEmail = session.Email
}
// Stop token manager first
stopCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := a.tokenManager.Stop(stopCtx); err != nil {
a.logger.Error("Failed to stop token manager during logout", zap.Error(err))
// Continue with logout even if token manager stop failed
}
// Clear stored password from RAM
if session != nil {
if err := a.passwordStore.ClearPassword(session.Email); err != nil {
a.logger.Error("Failed to clear stored password", zap.Error(err))
} else {
a.logger.Info("Password cleared from secure RAM", zap.String("email", utils.MaskEmail(session.Email)))
}
// Clear cached master key from memory (if it exists)
if a.keyCache.HasMasterKey(session.Email) {
if err := a.keyCache.ClearMasterKey(session.Email); err != nil {
a.logger.Warn("Failed to clear cached master key", zap.Error(err))
} else {
a.logger.Info("Cached master key cleared from secure memory", zap.String("email", utils.MaskEmail(session.Email)))
}
} else {
a.logger.Debug("No cached master key to clear (expected after app restart)", zap.String("email", utils.MaskEmail(session.Email)))
}
}
// Close search index
if err := a.searchService.Close(); err != nil {
a.logger.Error("Failed to close search index during logout", zap.Error(err))
// Continue with logout even if search cleanup fails
} else {
a.logger.Info("Search index closed")
}
// Handle local data based on user preference
if deleteLocalData && userEmail != "" {
// Delete all local data permanently
if err := a.storageManager.DeleteUserData(userEmail); err != nil {
a.logger.Error("Failed to delete local user data", zap.Error(err))
// Continue with logout even if deletion fails
} else {
a.logger.Info("All local user data deleted", zap.String("email", utils.MaskEmail(userEmail)))
}
} else {
// Just cleanup storage connections (keep data on disk)
a.storageManager.Cleanup()
a.logger.Info("User storage connections closed, local data preserved")
}
// Clear session
return a.authService.Logout(a.ctx)
}
// GetLocalDataSize returns the size of locally stored data for the current user in bytes.
// This can be used to show the user how much data will be deleted on logout.
func (a *Application) GetLocalDataSize() (int64, error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return 0, nil
}
size, err := a.storageManager.GetUserDataSize(session.Email)
if err != nil {
a.logger.Warn("Failed to get local data size", zap.Error(err))
return 0, err
}
return size, nil
}
// IsLoggedIn checks if a user is logged in
func (a *Application) IsLoggedIn() (bool, error) {
return a.authService.IsLoggedIn(a.ctx)
}
// Register creates a new user account
func (a *Application) Register(input *client.RegisterInput) error {
// Validate input
if err := inputvalidation.ValidateEmail(input.Email); err != nil {
return err
}
if err := inputvalidation.ValidateDisplayName(input.FirstName, "first name"); err != nil {
return err
}
if err := inputvalidation.ValidateDisplayName(input.LastName, "last name"); err != nil {
return err
}
// Note: Password is not sent directly in RegisterInput - it's used client-side
// to derive encryption keys. The encrypted master key and salt are validated
// by their presence and format on the server side.
// Check rate limit before making request
// Note: We do NOT reset on success - registration is a one-time operation
// and keeping the rate limit prevents re-registration spam attempts.
if err := a.rateLimiter.Check(ratelimiter.OpRegister, input.Email); err != nil {
a.logger.Warn("Registration rate limited",
zap.String("email", utils.MaskEmail(input.Email)),
zap.Error(err))
return err
}
return a.authService.Register(a.ctx, input)
}
// VerifyEmail verifies the email with the verification code
func (a *Application) VerifyEmail(email, code string) error {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return err
}
if err := inputvalidation.ValidateOTT(code); err != nil {
return err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpVerifyEmail, email); err != nil {
a.logger.Warn("Email verification rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return err
}
input := &client.VerifyEmailInput{
Email: email,
Code: code,
}
err := a.authService.VerifyEmail(a.ctx, input)
if err == nil {
// Reset rate limit on success
a.rateLimiter.Reset(ratelimiter.OpVerifyEmail, email)
}
return err
}
// VerifyOTTResponse contains the OTT verification response with encrypted challenge
type VerifyOTTResponse struct {
Message string `json:"message"`
ChallengeID string `json:"challengeId"`
EncryptedChallenge string `json:"encryptedChallenge"`
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
PublicKey string `json:"publicKey"`
// KDFAlgorithm specifies which key derivation algorithm to use.
// Value: "PBKDF2-SHA256"
KDFAlgorithm string `json:"kdfAlgorithm"`
}
// VerifyOTT verifies the one-time token and returns the encrypted challenge
func (a *Application) VerifyOTT(email, ott string) (*VerifyOTTResponse, error) {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return nil, err
}
if err := inputvalidation.ValidateOTT(ott); err != nil {
return nil, err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpVerifyOTT, email); err != nil {
a.logger.Warn("OTT verification rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, err
}
resp, err := a.authService.VerifyOTT(a.ctx, email, ott)
if err != nil {
a.logger.Error("OTT verification failed", zap.Error(err))
return nil, err
}
// Reset rate limit on success
a.rateLimiter.Reset(ratelimiter.OpVerifyOTT, email)
// Get KDF algorithm from response, default to PBKDF2-SHA256
kdfAlgorithm := resp.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
return &VerifyOTTResponse{
Message: resp.Message,
ChallengeID: resp.ChallengeID,
EncryptedChallenge: resp.EncryptedChallenge,
Salt: resp.Salt,
EncryptedMasterKey: resp.EncryptedMasterKey,
EncryptedPrivateKey: resp.EncryptedPrivateKey,
PublicKey: resp.PublicKey,
KDFAlgorithm: kdfAlgorithm,
}, nil
}
// CompleteLoginInput contains the data needed to complete login
type CompleteLoginInput struct {
Email string `json:"email"`
ChallengeID string `json:"challengeId"`
DecryptedData string `json:"decryptedData"`
Password string `json:"password"`
// Encrypted user data for future password verification
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
PublicKey string `json:"publicKey"`
// KDFAlgorithm specifies which key derivation algorithm to use.
// Value: "PBKDF2-SHA256"
KDFAlgorithm string `json:"kdfAlgorithm"`
}
// CompleteLogin completes the login process with the decrypted challenge
func (a *Application) CompleteLogin(input *CompleteLoginInput) error {
// Validate input
if err := inputvalidation.ValidateEmail(input.Email); err != nil {
return err
}
if err := inputvalidation.ValidatePassword(input.Password); err != nil {
return err
}
if input.ChallengeID == "" {
return fmt.Errorf("challenge ID is required")
}
if input.DecryptedData == "" {
return fmt.Errorf("decrypted data is required")
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpCompleteLogin, input.Email); err != nil {
a.logger.Warn("Login completion rate limited",
zap.String("email", utils.MaskEmail(input.Email)),
zap.Error(err))
return err
}
clientInput := &client.CompleteLoginInput{
Email: input.Email,
ChallengeID: input.ChallengeID,
DecryptedData: input.DecryptedData,
}
_, err := a.authService.CompleteLogin(a.ctx, clientInput)
if err != nil {
a.logger.Error("Login completion failed", zap.Error(err))
return err
}
// Reset all rate limits for this user on successful login
a.rateLimiter.ResetAll(input.Email)
// Store encrypted user data in session for future password verification
session, err := a.authService.GetCurrentSession(a.ctx)
if err == nil && session != nil {
session.Salt = input.Salt
session.EncryptedMasterKey = input.EncryptedMasterKey
session.EncryptedPrivateKey = input.EncryptedPrivateKey
session.PublicKey = input.PublicKey
// Store KDF algorithm so VerifyPassword knows which algorithm to use
session.KDFAlgorithm = input.KDFAlgorithm
if session.KDFAlgorithm == "" {
session.KDFAlgorithm = e2ee.PBKDF2Algorithm
}
// Update session with encrypted data
if err := a.authService.UpdateSession(a.ctx, session); err != nil {
a.logger.Warn("Failed to update session with encrypted data", zap.Error(err))
// Continue anyway - password storage will still work
} else {
a.logger.Info("Encrypted user data stored in session for password verification")
}
}
// Store password in secure RAM
if err := a.passwordStore.StorePassword(input.Email, input.Password); err != nil {
a.logger.Error("Failed to store password in RAM", zap.Error(err))
// Don't fail login if password storage fails
} else {
a.logger.Info("Password stored securely in RAM for E2EE operations", zap.String("email", utils.MaskEmail(input.Email)))
}
// Cache master key for session to avoid re-decrypting for every file operation
if input.Salt != "" && input.EncryptedMasterKey != "" && input.Password != "" {
kdfAlgorithm := input.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
if err := a.cacheMasterKeyFromPassword(input.Email, input.Password, input.Salt, input.EncryptedMasterKey, kdfAlgorithm); err != nil {
a.logger.Warn("Failed to cache master key during login", zap.Error(err))
// Continue anyway - user can still use the app, just slower
}
}
a.logger.Info("User logged in successfully", zap.String("email", utils.MaskEmail(input.Email)))
// Initialize user-specific storage for the logged-in user
if err := a.storageManager.InitializeForUser(input.Email); err != nil {
a.logger.Error("Failed to initialize user storage", zap.Error(err))
// Don't fail login - user can still use cloud features, just not local storage
} else {
a.logger.Info("User storage initialized", zap.String("email", utils.MaskEmail(input.Email)))
}
// Initialize search index for the logged-in user
if err := a.searchService.Initialize(a.ctx, input.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
// Don't fail login if search initialization fails - it's not critical
// The app can still function without search
} else {
a.logger.Info("Search index initialized", zap.String("email", utils.MaskEmail(input.Email)))
// Rebuild search index from local data in the background
userEmail := input.Email // Capture email before goroutine
go func() {
if err := a.rebuildSearchIndexForUser(userEmail); err != nil {
a.logger.Warn("Failed to rebuild search index after login", zap.Error(err))
}
}()
}
// Start token manager for automatic token refresh
a.tokenManager.Start()
a.logger.Info("Token manager started for new session")
return nil
}
// DecryptLoginChallenge decrypts the login challenge using the user's password.
// The kdfAlgorithm parameter specifies which key derivation function to use.
// If kdfAlgorithm is empty, it defaults to "PBKDF2-SHA256".
func (a *Application) DecryptLoginChallenge(password, saltBase64, encryptedMasterKeyBase64, encryptedChallengeBase64, encryptedPrivateKeyBase64, publicKeyBase64, kdfAlgorithm string) (string, error) {
// Default to PBKDF2-SHA256
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
a.logger.Debug("Decrypting login challenge", zap.String("kdf_algorithm", kdfAlgorithm))
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(saltBase64)
if err != nil {
a.logger.Error("Failed to decode salt", zap.Error(err))
return "", fmt.Errorf("invalid salt encoding: %w", err)
}
encryptedChallenge, err := base64.StdEncoding.DecodeString(encryptedChallengeBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted challenge", zap.Error(err))
return "", fmt.Errorf("invalid challenge encoding: %w", err)
}
publicKey, err := base64.StdEncoding.DecodeString(publicKeyBase64)
if err != nil {
a.logger.Error("Failed to decode public key", zap.Error(err))
return "", fmt.Errorf("invalid public key encoding: %w", err)
}
// Decode encrypted private key
encryptedPrivateKeyCombined, err := base64.StdEncoding.DecodeString(encryptedPrivateKeyBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted private key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted private key encoding: %w", err)
}
// Decode encrypted master key
encryptedMasterKeyCombined, err := base64.StdEncoding.DecodeString(encryptedMasterKeyBase64)
if err != nil {
a.logger.Error("Failed to decode encrypted master key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted master key encoding: %w", err)
}
// 1. Derive KEK from password and salt using PBKDF2-SHA256
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
a.logger.Error("Failed to create secure keychain", zap.Error(err), zap.String("kdf_algorithm", kdfAlgorithm))
return "", fmt.Errorf("failed to derive key from password: %w", err)
}
defer keychain.Clear()
// 2. Decrypt master key with KEK into protected memory
// Auto-detect nonce size: web frontend uses 24-byte nonces (XSalsa20), native uses 12-byte (ChaCha20)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedMasterKeyCombined)
if err != nil {
a.logger.Error("Failed to split encrypted master key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted master key format: %w", err)
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
a.logger.Error("Failed to decrypt master key", zap.Error(err), zap.String("kdf_algorithm", kdfAlgorithm))
return "", fmt.Errorf("failed to decrypt master key (wrong password?): %w", err)
}
defer masterKey.Destroy()
// 3. Decrypt private key with master key into protected memory
// Auto-detect nonce size based on the encrypted data
privateKeyNonce, privateKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedPrivateKeyCombined)
if err != nil {
a.logger.Error("Failed to split encrypted private key", zap.Error(err))
return "", fmt.Errorf("invalid encrypted private key format: %w", err)
}
encryptedPrivateKeyStruct := &e2ee.EncryptedKey{
Ciphertext: privateKeyCiphertext,
Nonce: privateKeyNonce,
}
privateKey, err := e2ee.DecryptPrivateKeySecure(encryptedPrivateKeyStruct, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt private key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt private key: %w", err)
}
defer privateKey.Destroy()
// 4. Decrypt the challenge using the private key (NaCl anonymous box)
decryptedChallenge, err := e2ee.DecryptAnonymousBox(encryptedChallenge, publicKey, privateKey.Bytes())
if err != nil {
a.logger.Error("Failed to decrypt challenge", zap.Error(err))
return "", fmt.Errorf("failed to decrypt login challenge: %w", err)
}
// Convert decrypted challenge to base64 for sending to server
decryptedChallengeBase64 := base64.StdEncoding.EncodeToString(decryptedChallenge)
a.logger.Info("Successfully decrypted login challenge")
return decryptedChallengeBase64, nil
}
// RegistrationKeys contains all the E2EE keys needed for registration
type RegistrationKeys struct {
Salt string `json:"salt"`
EncryptedMasterKey string `json:"encryptedMasterKey"`
PublicKey string `json:"publicKey"`
EncryptedPrivateKey string `json:"encryptedPrivateKey"`
EncryptedRecoveryKey string `json:"encryptedRecoveryKey"`
MasterKeyEncryptedWithRecoveryKey string `json:"masterKeyEncryptedWithRecoveryKey"`
// RecoveryMnemonic is the 12-word BIP39 mnemonic phrase that must be shown to the user
// The user MUST save this phrase securely - it's their only way to recover their account
RecoveryMnemonic string `json:"recoveryMnemonic"`
}
// RecoveryInitiateResponse contains the response from initiating account recovery
type RecoveryInitiateResponse struct {
Message string `json:"message"`
SessionID string `json:"sessionId"`
EncryptedChallenge string `json:"encryptedChallenge"`
}
// InitiateRecovery starts the account recovery process for the given email
func (a *Application) InitiateRecovery(email string) (*RecoveryInitiateResponse, error) {
// Validate input
if err := inputvalidation.ValidateEmail(email); err != nil {
return nil, err
}
// Check rate limit before making request
if err := a.rateLimiter.Check(ratelimiter.OpRequestOTT, email); err != nil {
a.logger.Warn("Recovery initiation rate limited",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, err
}
resp, err := a.authService.InitiateRecovery(a.ctx, email, "recovery_key")
if err != nil {
a.logger.Error("Recovery initiation failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery initiated successfully", zap.String("email", utils.MaskEmail(email)))
return &RecoveryInitiateResponse{
Message: resp.Message,
SessionID: resp.SessionID,
EncryptedChallenge: resp.EncryptedChallenge,
}, nil
}
// DecryptRecoveryChallengeInput contains the data needed to process recovery challenge
type DecryptRecoveryChallengeInput struct {
RecoveryMnemonic string `json:"recoveryMnemonic"`
EncryptedChallenge string `json:"encryptedChallenge"`
}
// DecryptRecoveryChallengeResult contains the result of processing recovery challenge
type DecryptRecoveryChallengeResult struct {
DecryptedChallenge string `json:"decryptedChallenge"`
IsValid bool `json:"isValid"`
}
// DecryptRecoveryChallenge validates the recovery mnemonic and processes the challenge.
// Note: The backend currently sends an unencrypted challenge (base64-encoded plaintext).
// This function validates the recovery phrase format and passes through the challenge.
// When the backend implements proper encryption, this function will decrypt the challenge.
func (a *Application) DecryptRecoveryChallenge(input *DecryptRecoveryChallengeInput) (*DecryptRecoveryChallengeResult, error) {
// Validate recovery mnemonic (must be 12 words)
if input.RecoveryMnemonic == "" {
return nil, fmt.Errorf("recovery mnemonic is required")
}
// Validate the mnemonic is a valid BIP39 phrase
if !bip39.IsMnemonicValid(input.RecoveryMnemonic) {
a.logger.Warn("Invalid recovery mnemonic format")
return nil, fmt.Errorf("invalid recovery phrase: must be 12 valid BIP39 words")
}
// Count words to ensure we have exactly 12
words := len(splitMnemonic(input.RecoveryMnemonic))
if words != 12 {
return nil, fmt.Errorf("invalid recovery phrase: must be exactly 12 words, got %d", words)
}
// Validate the encrypted challenge is present
if input.EncryptedChallenge == "" {
return nil, fmt.Errorf("encrypted challenge is required")
}
// Derive recovery key from mnemonic to validate it's a valid recovery phrase
// This also prepares for future decryption when backend implements encryption
seed := bip39.NewSeed(input.RecoveryMnemonic, "")
recoveryKey := seed[:32]
a.logger.Debug("Recovery key derived successfully",
zap.Int("key_length", len(recoveryKey)),
zap.Int("word_count", words))
// TEMPORARY WORKAROUND: Backend currently sends base64-encoded plaintext challenge
// instead of encrypted challenge. See backend TODO in recovery_initiate.go:108-113
// Until backend implements proper encryption, we just validate and pass through.
// Decode the challenge to validate it's valid base64
challengeBytes, err := base64.StdEncoding.DecodeString(input.EncryptedChallenge)
if err != nil {
a.logger.Error("Failed to decode challenge", zap.Error(err))
return nil, fmt.Errorf("invalid challenge format: %w", err)
}
// Re-encode to base64 for sending to backend
decryptedChallengeBase64 := base64.StdEncoding.EncodeToString(challengeBytes)
a.logger.Info("Recovery challenge processed successfully (backend workaround active)")
return &DecryptRecoveryChallengeResult{
DecryptedChallenge: decryptedChallengeBase64,
IsValid: true,
}, nil
}
// splitMnemonic splits a mnemonic phrase into words
func splitMnemonic(mnemonic string) []string {
var words []string
for _, word := range splitByWhitespace(mnemonic) {
if word != "" {
words = append(words, word)
}
}
return words
}
// splitByWhitespace splits a string by whitespace characters
func splitByWhitespace(s string) []string {
return splitString(s)
}
// splitString splits a string into words by spaces
func splitString(s string) []string {
var result []string
word := ""
for _, r := range s {
if r == ' ' || r == '\t' || r == '\n' || r == '\r' {
if word != "" {
result = append(result, word)
word = ""
}
} else {
word += string(r)
}
}
if word != "" {
result = append(result, word)
}
return result
}
// RecoveryVerifyResponse contains the response from verifying recovery
type RecoveryVerifyResponse struct {
Message string `json:"message"`
RecoveryToken string `json:"recoveryToken"`
CanResetCredentials bool `json:"canResetCredentials"`
}
// VerifyRecovery verifies the recovery challenge with the server
func (a *Application) VerifyRecovery(sessionID, decryptedChallenge string) (*RecoveryVerifyResponse, error) {
if sessionID == "" {
return nil, fmt.Errorf("session ID is required")
}
if decryptedChallenge == "" {
return nil, fmt.Errorf("decrypted challenge is required")
}
input := &client.RecoveryVerifyInput{
SessionID: sessionID,
DecryptedChallenge: decryptedChallenge,
}
resp, err := a.authService.VerifyRecovery(a.ctx, input)
if err != nil {
a.logger.Error("Recovery verification failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery verification successful")
return &RecoveryVerifyResponse{
Message: resp.Message,
RecoveryToken: resp.RecoveryToken,
CanResetCredentials: resp.CanResetCredentials,
}, nil
}
// CompleteRecoveryInput contains the data needed to complete account recovery
type CompleteRecoveryInput struct {
RecoveryToken string `json:"recoveryToken"`
RecoveryMnemonic string `json:"recoveryMnemonic"`
NewPassword string `json:"newPassword"`
}
// CompleteRecoveryResponse contains the response from completing recovery
type CompleteRecoveryResponse struct {
Message string `json:"message"`
Success bool `json:"success"`
}
// CompleteRecovery completes the account recovery by re-encrypting keys with a new password.
// This function:
// 1. Validates the recovery mnemonic
// 2. Derives the recovery key from the mnemonic
// 3. Generates new encryption keys with the new password
// 4. Sends the new encrypted keys to the server
func (a *Application) CompleteRecovery(input *CompleteRecoveryInput) (*CompleteRecoveryResponse, error) {
// Validate inputs
if input.RecoveryToken == "" {
return nil, fmt.Errorf("recovery token is required")
}
if input.RecoveryMnemonic == "" {
return nil, fmt.Errorf("recovery mnemonic is required")
}
if err := inputvalidation.ValidatePassword(input.NewPassword); err != nil {
return nil, err
}
// Validate the mnemonic is a valid BIP39 phrase
if !bip39.IsMnemonicValid(input.RecoveryMnemonic) {
return nil, fmt.Errorf("invalid recovery phrase: must be 12 valid BIP39 words")
}
// Count words to ensure we have exactly 12
words := len(splitMnemonic(input.RecoveryMnemonic))
if words != 12 {
return nil, fmt.Errorf("invalid recovery phrase: must be exactly 12 words, got %d", words)
}
a.logger.Info("Starting recovery completion - generating new encryption keys")
// 1. Derive recovery key from mnemonic
seed := bip39.NewSeed(input.RecoveryMnemonic, "")
recoveryKeyBytes := seed[:32]
recoveryKey, err := e2ee.NewSecureBuffer(recoveryKeyBytes)
if err != nil {
e2ee.ClearBytes(recoveryKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for recovery key: %w", err)
}
defer recoveryKey.Destroy()
e2ee.ClearBytes(recoveryKeyBytes)
// 2. Generate new salt for the new password
newSalt, err := e2ee.GenerateSalt()
if err != nil {
return nil, fmt.Errorf("failed to generate new salt: %w", err)
}
// 3. Create new keychain with PBKDF2-SHA256 (for web frontend compatibility)
newKeychain, err := e2ee.NewSecureKeyChainWithAlgorithm(input.NewPassword, newSalt, e2ee.PBKDF2Algorithm)
if err != nil {
return nil, fmt.Errorf("failed to create new keychain: %w", err)
}
defer newKeychain.Clear()
// 4. Generate new master key
masterKeyBytes, err := e2ee.GenerateMasterKey()
if err != nil {
return nil, fmt.Errorf("failed to generate new master key: %w", err)
}
masterKey, err := e2ee.NewSecureBuffer(masterKeyBytes)
if err != nil {
e2ee.ClearBytes(masterKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for master key: %w", err)
}
defer masterKey.Destroy()
e2ee.ClearBytes(masterKeyBytes)
// 5. Encrypt master key with new KEK
encryptedMasterKey, err := newKeychain.EncryptMasterKeySecretBox(masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt master key: %w", err)
}
// 6. Generate new keypair
newPublicKey, privateKeyBytes, err := e2ee.GenerateKeyPair()
if err != nil {
return nil, fmt.Errorf("failed to generate new keypair: %w", err)
}
privateKey, err := e2ee.NewSecureBuffer(privateKeyBytes)
if err != nil {
e2ee.ClearBytes(privateKeyBytes)
return nil, fmt.Errorf("failed to create secure buffer for private key: %w", err)
}
defer privateKey.Destroy()
e2ee.ClearBytes(privateKeyBytes)
// 7. Encrypt private key with master key
encryptedPrivateKey, err := e2ee.EncryptPrivateKeySecretBox(privateKey.Bytes(), masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt private key: %w", err)
}
// 8. Encrypt recovery key with master key
encryptedRecoveryKey, err := e2ee.EncryptRecoveryKeySecretBox(recoveryKey.Bytes(), masterKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt recovery key: %w", err)
}
// 9. Encrypt master key with recovery key (for future recovery)
masterKeyEncryptedWithRecoveryKey, err := e2ee.EncryptMasterKeyWithRecoveryKeySecretBox(masterKey.Bytes(), recoveryKey.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to encrypt master key with recovery key: %w", err)
}
// 10. Convert all keys to base64 for transport
newSaltBase64 := base64.StdEncoding.EncodeToString(newSalt)
newPublicKeyBase64 := base64.StdEncoding.EncodeToString(newPublicKey)
newEncryptedMasterKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedMasterKey.Nonce, encryptedMasterKey.Ciphertext),
)
newEncryptedPrivateKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedPrivateKey.Nonce, encryptedPrivateKey.Ciphertext),
)
newEncryptedRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedRecoveryKey.Nonce, encryptedRecoveryKey.Ciphertext),
)
newMasterKeyEncryptedWithRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(masterKeyEncryptedWithRecoveryKey.Nonce, masterKeyEncryptedWithRecoveryKey.Ciphertext),
)
// 11. Call API to complete recovery
apiInput := &client.RecoveryCompleteInput{
RecoveryToken: input.RecoveryToken,
NewSalt: newSaltBase64,
NewPublicKey: newPublicKeyBase64,
NewEncryptedMasterKey: newEncryptedMasterKeyBase64,
NewEncryptedPrivateKey: newEncryptedPrivateKeyBase64,
NewEncryptedRecoveryKey: newEncryptedRecoveryKeyBase64,
NewMasterKeyEncryptedWithRecoveryKey: newMasterKeyEncryptedWithRecoveryKeyBase64,
}
resp, err := a.authService.CompleteRecovery(a.ctx, apiInput)
if err != nil {
a.logger.Error("Recovery completion failed", zap.Error(err))
return nil, err
}
a.logger.Info("Recovery completed successfully - new encryption keys set")
return &CompleteRecoveryResponse{
Message: resp.Message,
Success: resp.Success,
}, nil
}
// GenerateRegistrationKeys generates all E2EE keys needed for user registration.
// This function uses PBKDF2-SHA256 for key derivation and XSalsa20-Poly1305 (SecretBox)
// for symmetric encryption to ensure compatibility with the web frontend.
func (a *Application) GenerateRegistrationKeys(password string) (*RegistrationKeys, error) {
// 1. Generate salt (16 bytes for PBKDF2)
salt, err := e2ee.GenerateSalt()
if err != nil {
a.logger.Error("Failed to generate salt", zap.Error(err))
return nil, err
}
// 2. Create secure keychain using PBKDF2-SHA256 (compatible with web frontend)
// This derives KEK from password + salt using PBKDF2-SHA256 with 100,000 iterations
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, e2ee.PBKDF2Algorithm)
if err != nil {
a.logger.Error("Failed to create secure keychain", zap.Error(err))
return nil, err
}
defer keychain.Clear() // Clear sensitive data when done
// 3. Generate master key in protected memory
masterKeyBytes, err := e2ee.GenerateMasterKey()
if err != nil {
a.logger.Error("Failed to generate master key", zap.Error(err))
return nil, err
}
masterKey, err := e2ee.NewSecureBuffer(masterKeyBytes)
if err != nil {
e2ee.ClearBytes(masterKeyBytes)
a.logger.Error("Failed to create secure buffer for master key", zap.Error(err))
return nil, err
}
defer masterKey.Destroy()
e2ee.ClearBytes(masterKeyBytes)
// 4. Encrypt master key with KEK using XSalsa20-Poly1305 (SecretBox)
// This produces 24-byte nonces compatible with web frontend's libsodium
encryptedMasterKey, err := keychain.EncryptMasterKeySecretBox(masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt master key", zap.Error(err))
return nil, err
}
// 5. Generate NaCl keypair for asymmetric encryption
publicKey, privateKeyBytes, err := e2ee.GenerateKeyPair()
if err != nil {
a.logger.Error("Failed to generate keypair", zap.Error(err))
return nil, err
}
privateKey, err := e2ee.NewSecureBuffer(privateKeyBytes)
if err != nil {
e2ee.ClearBytes(privateKeyBytes)
a.logger.Error("Failed to create secure buffer for private key", zap.Error(err))
return nil, err
}
defer privateKey.Destroy()
e2ee.ClearBytes(privateKeyBytes)
// 6. Encrypt private key with master key using XSalsa20-Poly1305 (SecretBox)
encryptedPrivateKey, err := e2ee.EncryptPrivateKeySecretBox(privateKey.Bytes(), masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt private key", zap.Error(err))
return nil, err
}
// 7. Generate BIP39 mnemonic (12 words) for account recovery
// This matches the web frontend's approach for cross-platform compatibility
entropy := make([]byte, 16) // 128 bits = 12 words
if _, err := rand.Read(entropy); err != nil {
a.logger.Error("Failed to generate entropy for recovery mnemonic", zap.Error(err))
return nil, err
}
recoveryMnemonic, err := bip39.NewMnemonic(entropy)
if err != nil {
a.logger.Error("Failed to generate recovery mnemonic", zap.Error(err))
return nil, err
}
a.logger.Info("Generated 12-word recovery mnemonic")
// Convert mnemonic to seed (64 bytes via HMAC-SHA512) then take first 32 bytes
// This matches web frontend's mnemonicToRecoveryKey() function
seed := bip39.NewSeed(recoveryMnemonic, "") // Empty passphrase like web frontend
recoveryKeyBytes := seed[:32] // Use first 32 bytes as recovery key
recoveryKey, err := e2ee.NewSecureBuffer(recoveryKeyBytes)
if err != nil {
e2ee.ClearBytes(recoveryKeyBytes)
a.logger.Error("Failed to create secure buffer for recovery key", zap.Error(err))
return nil, err
}
defer recoveryKey.Destroy()
e2ee.ClearBytes(recoveryKeyBytes)
// 8. Encrypt recovery key with master key using XSalsa20-Poly1305 (SecretBox)
encryptedRecoveryKey, err := e2ee.EncryptRecoveryKeySecretBox(recoveryKey.Bytes(), masterKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt recovery key", zap.Error(err))
return nil, err
}
// 9. Encrypt master key with recovery key using XSalsa20-Poly1305 (SecretBox)
masterKeyEncryptedWithRecoveryKey, err := e2ee.EncryptMasterKeyWithRecoveryKeySecretBox(masterKey.Bytes(), recoveryKey.Bytes())
if err != nil {
a.logger.Error("Failed to encrypt master key with recovery key", zap.Error(err))
return nil, err
}
// Convert all keys to base64 for transport
// Combine nonce and ciphertext for each encrypted key
encryptedMasterKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedMasterKey.Nonce, encryptedMasterKey.Ciphertext),
)
encryptedPrivateKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedPrivateKey.Nonce, encryptedPrivateKey.Ciphertext),
)
encryptedRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(encryptedRecoveryKey.Nonce, encryptedRecoveryKey.Ciphertext),
)
masterKeyEncryptedWithRecoveryKeyBase64 := base64.StdEncoding.EncodeToString(
e2ee.CombineNonceAndCiphertext(masterKeyEncryptedWithRecoveryKey.Nonce, masterKeyEncryptedWithRecoveryKey.Ciphertext),
)
a.logger.Info("Successfully generated E2EE registration keys using PBKDF2-SHA256 + XSalsa20-Poly1305")
return &RegistrationKeys{
Salt: base64.StdEncoding.EncodeToString(salt),
EncryptedMasterKey: encryptedMasterKeyBase64,
PublicKey: base64.StdEncoding.EncodeToString(publicKey),
EncryptedPrivateKey: encryptedPrivateKeyBase64,
EncryptedRecoveryKey: encryptedRecoveryKeyBase64,
MasterKeyEncryptedWithRecoveryKey: masterKeyEncryptedWithRecoveryKeyBase64,
RecoveryMnemonic: recoveryMnemonic,
}, nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,444 @@
package app
import (
"encoding/json"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// DashboardData contains the formatted dashboard data for the frontend
type DashboardData struct {
Summary DashboardSummary `json:"summary"`
StorageUsageTrend StorageUsageTrend `json:"storage_usage_trend"`
RecentFiles []DashboardRecentFile `json:"recent_files"`
}
// DashboardSummary contains summary statistics
type DashboardSummary struct {
TotalFiles int `json:"total_files"`
TotalFolders int `json:"total_folders"`
StorageUsed string `json:"storage_used"`
StorageLimit string `json:"storage_limit"`
StorageUsagePercentage int `json:"storage_usage_percentage"`
}
// StorageUsageTrend contains storage usage trend data
type StorageUsageTrend struct {
Period string `json:"period"`
DataPoints []StorageTrendDataPoint `json:"data_points"`
}
// StorageTrendDataPoint represents a single data point in the storage trend
type StorageTrendDataPoint struct {
Date string `json:"date"`
Usage string `json:"usage"`
}
// DashboardRecentFile represents a recent file for dashboard display
type DashboardRecentFile struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
Name string `json:"name"`
Size string `json:"size"`
SizeInBytes int64 `json:"size_in_bytes"`
MimeType string `json:"mime_type"`
CreatedAt string `json:"created_at"`
IsDecrypted bool `json:"is_decrypted"`
SyncStatus string `json:"sync_status"`
HasLocalContent bool `json:"has_local_content"`
}
// GetDashboardData fetches and formats dashboard data from the backend
func (a *Application) GetDashboardData() (*DashboardData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
// This is important after app restarts or hot reloads
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
a.logger.Debug("Restored tokens to API client for dashboard request",
zap.String("user_id", session.UserID),
zap.Time("token_expires_at", session.ExpiresAt))
// Check if access token is about to expire or already expired
timeUntilExpiry := time.Until(session.ExpiresAt)
now := time.Now()
sessionAge := now.Sub(session.CreatedAt)
a.logger.Debug("Token status check",
zap.Time("now", now),
zap.Time("expires_at", session.ExpiresAt),
zap.Duration("time_until_expiry", timeUntilExpiry),
zap.Duration("session_age", sessionAge))
if timeUntilExpiry < 0 {
a.logger.Warn("Access token already expired, refresh should happen automatically",
zap.Duration("expired_since", -timeUntilExpiry))
} else if timeUntilExpiry < 2*time.Minute {
a.logger.Info("Access token expiring soon, refresh may be needed",
zap.Duration("time_until_expiry", timeUntilExpiry))
}
// If session is very old (more than 1 day), recommend fresh login
if sessionAge > 24*time.Hour {
a.logger.Warn("Session is very old, consider logging out and logging in again",
zap.Duration("session_age", sessionAge))
}
// Fetch dashboard data from backend
// The client will automatically refresh the token if it gets a 401
a.logger.Debug("Calling backend API for dashboard data")
resp, err := apiClient.GetDashboard(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch dashboard data",
zap.Error(err),
zap.String("error_type", fmt.Sprintf("%T", err)))
// Check if this is an unauthorized error that should trigger token refresh
if apiErr, ok := err.(*client.APIError); ok {
a.logger.Error("API Error details",
zap.Int("status", apiErr.Status),
zap.String("title", apiErr.Title),
zap.String("detail", apiErr.Detail))
}
return nil, fmt.Errorf("failed to fetch dashboard: %w", err)
}
if resp.Dashboard == nil {
return nil, fmt.Errorf("dashboard data is empty")
}
dashboard := resp.Dashboard
// Format summary data
summary := DashboardSummary{
TotalFiles: dashboard.Summary.TotalFiles,
TotalFolders: dashboard.Summary.TotalFolders,
StorageUsed: formatStorageAmount(dashboard.Summary.StorageUsed),
StorageLimit: formatStorageAmount(dashboard.Summary.StorageLimit),
StorageUsagePercentage: dashboard.Summary.StorageUsagePercentage,
}
// Format storage usage trend
dataPoints := make([]StorageTrendDataPoint, len(dashboard.StorageUsageTrend.DataPoints))
for i, dp := range dashboard.StorageUsageTrend.DataPoints {
dataPoints[i] = StorageTrendDataPoint{
Date: dp.Date,
Usage: formatStorageAmount(dp.Usage),
}
}
trend := StorageUsageTrend{
Period: dashboard.StorageUsageTrend.Period,
DataPoints: dataPoints,
}
// Get master key for decryption (needed for cloud-only files)
masterKey, cleanup, masterKeyErr := a.keyCache.GetMasterKey(session.Email)
if masterKeyErr != nil {
a.logger.Warn("Master key not available for dashboard file decryption",
zap.Error(masterKeyErr))
} else {
defer cleanup()
}
// Build a cache of collection keys for efficient decryption
// First, pre-populate from the dashboard response's collection_keys (if available)
// This avoids making additional API calls for each collection
collectionKeyCache := make(map[string][]byte) // collectionID -> decrypted collection key
if masterKeyErr == nil && len(dashboard.CollectionKeys) > 0 {
a.logger.Debug("Pre-populating collection key cache from dashboard response",
zap.Int("collection_keys_count", len(dashboard.CollectionKeys)))
for _, ck := range dashboard.CollectionKeys {
// Decode the encrypted collection key
collKeyCiphertext, decodeErr := tryDecodeBase64(ck.EncryptedCollectionKey)
if decodeErr != nil {
a.logger.Warn("Failed to decode collection key ciphertext from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decodeErr))
continue
}
collKeyNonce, decodeErr := tryDecodeBase64(ck.EncryptedCollectionKeyNonce)
if decodeErr != nil {
a.logger.Warn("Failed to decode collection key nonce from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decodeErr))
continue
}
// Handle combined ciphertext format (nonce prepended to ciphertext)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
// Decrypt the collection key with the master key
collectionKey, decryptErr := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if decryptErr != nil {
a.logger.Warn("Failed to decrypt collection key from dashboard",
zap.String("collection_id", ck.CollectionID),
zap.Error(decryptErr))
continue
}
// Cache the decrypted collection key
collectionKeyCache[ck.CollectionID] = collectionKey
a.logger.Debug("Cached collection key from dashboard response",
zap.String("collection_id", ck.CollectionID))
}
a.logger.Info("Pre-populated collection key cache from dashboard",
zap.Int("cached_keys", len(collectionKeyCache)))
}
// Format recent files (use local data if available, otherwise decrypt from cloud)
recentFiles := make([]DashboardRecentFile, 0, len(dashboard.RecentFiles))
for _, cloudFile := range dashboard.RecentFiles {
// Debug: Log what we received from the API
a.logger.Debug("Processing dashboard recent file",
zap.String("file_id", cloudFile.ID),
zap.String("collection_id", cloudFile.CollectionID),
zap.Int("encrypted_file_key_ciphertext_len", len(cloudFile.EncryptedFileKey.Ciphertext)),
zap.Int("encrypted_file_key_nonce_len", len(cloudFile.EncryptedFileKey.Nonce)),
zap.String("encrypted_file_key_ciphertext_preview", truncateForLog(cloudFile.EncryptedFileKey.Ciphertext, 50)),
zap.Int("encrypted_metadata_len", len(cloudFile.EncryptedMetadata)))
// Default values for files not in local repository
filename := "Encrypted File"
isDecrypted := false
syncStatus := file.SyncStatusCloudOnly // Default: cloud only
hasLocalContent := false
sizeInBytes := cloudFile.EncryptedFileSizeInBytes
mimeType := "application/octet-stream"
// Check local repository for this file to get decrypted name and sync status
localFile, err := a.mustGetFileRepo().Get(cloudFile.ID)
if err == nil && localFile != nil && localFile.State != file.StateDeleted {
// File exists locally - use local data
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
// Use decrypted filename if available
if localFile.Name != "" {
filename = localFile.Name
isDecrypted = true
}
// Use decrypted mime type if available
if localFile.MimeType != "" {
mimeType = localFile.MimeType
}
// Use local size (decrypted) if available
if localFile.DecryptedSizeInBytes > 0 {
sizeInBytes = localFile.DecryptedSizeInBytes
}
} else if masterKeyErr == nil && cloudFile.EncryptedMetadata != "" {
// File not in local repo, but we have the master key - try to decrypt from cloud data
decryptedFilename, decryptedMimeType, decryptErr := a.decryptDashboardFileMetadata(
cloudFile, masterKey, collectionKeyCache, apiClient)
if decryptErr != nil {
// Log at Warn level for better visibility during troubleshooting
a.logger.Warn("Failed to decrypt dashboard file metadata",
zap.String("file_id", cloudFile.ID),
zap.String("collection_id", cloudFile.CollectionID),
zap.Int("encrypted_file_key_ciphertext_len", len(cloudFile.EncryptedFileKey.Ciphertext)),
zap.Int("encrypted_file_key_nonce_len", len(cloudFile.EncryptedFileKey.Nonce)),
zap.Error(decryptErr))
} else {
filename = decryptedFilename
mimeType = decryptedMimeType
isDecrypted = true
}
}
recentFiles = append(recentFiles, DashboardRecentFile{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
Name: filename,
Size: formatFileSize(sizeInBytes),
SizeInBytes: sizeInBytes,
MimeType: mimeType,
CreatedAt: cloudFile.CreatedAt.Format(time.RFC3339),
IsDecrypted: isDecrypted,
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
})
}
dashboardData := &DashboardData{
Summary: summary,
StorageUsageTrend: trend,
RecentFiles: recentFiles,
}
a.logger.Info("Dashboard data fetched successfully",
zap.Int("total_files", summary.TotalFiles),
zap.Int("recent_files", len(recentFiles)))
return dashboardData, nil
}
// formatStorageAmount converts StorageAmount to human-readable string
func formatStorageAmount(amount client.StorageAmount) string {
if amount.Value == 0 {
return "0 B"
}
return fmt.Sprintf("%.2f %s", amount.Value, amount.Unit)
}
// formatFileSize converts bytes to human-readable format
func formatFileSize(bytes int64) string {
if bytes == 0 {
return "0 B"
}
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
units := []string{"B", "KB", "MB", "GB", "TB"}
return fmt.Sprintf("%.1f %s", float64(bytes)/float64(div), units[exp+1])
}
// decryptDashboardFileMetadata decrypts file metadata for a dashboard recent file
// Collection keys should already be pre-populated in the cache from the dashboard API response
func (a *Application) decryptDashboardFileMetadata(
cloudFile client.RecentFileDashboard,
masterKey []byte,
collectionKeyCache map[string][]byte,
apiClient *client.Client,
) (filename string, mimeType string, err error) {
// Step 1: Get the collection key from cache (should be pre-populated from dashboard API response)
collectionKey, exists := collectionKeyCache[cloudFile.CollectionID]
if !exists {
// Collection key was not provided by the dashboard API - this shouldn't happen
// but we log a warning for debugging
a.logger.Warn("Collection key not found in cache - dashboard API should have provided it",
zap.String("collection_id", cloudFile.CollectionID),
zap.String("file_id", cloudFile.ID))
return "", "", fmt.Errorf("collection key not available for collection %s", cloudFile.CollectionID)
}
// Step 2: Get the file's encrypted_file_key
// First try using the dashboard data, but if empty, fetch from the file endpoint directly
var fileKeyCiphertext, fileKeyNonce []byte
if cloudFile.EncryptedFileKey.Ciphertext != "" && cloudFile.EncryptedFileKey.Nonce != "" {
// Use data from dashboard response
var decodeErr error
fileKeyCiphertext, decodeErr = tryDecodeBase64(cloudFile.EncryptedFileKey.Ciphertext)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key ciphertext: %w", decodeErr)
}
fileKeyNonce, decodeErr = tryDecodeBase64(cloudFile.EncryptedFileKey.Nonce)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key nonce: %w", decodeErr)
}
} else {
// Dashboard response has empty encrypted_file_key, fetch from file endpoint
// This endpoint properly deserializes the encrypted_file_key through the repository
a.logger.Debug("Dashboard encrypted_file_key is empty, fetching from file endpoint",
zap.String("file_id", cloudFile.ID))
file, fetchErr := apiClient.GetFile(a.ctx, cloudFile.ID)
if fetchErr != nil {
return "", "", fmt.Errorf("failed to fetch file %s: %w", cloudFile.ID, fetchErr)
}
if file.EncryptedFileKey.Ciphertext == "" || file.EncryptedFileKey.Nonce == "" {
return "", "", fmt.Errorf("file endpoint also returned empty encrypted_file_key for file %s", cloudFile.ID)
}
var decodeErr error
fileKeyCiphertext, decodeErr = tryDecodeBase64(file.EncryptedFileKey.Ciphertext)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key ciphertext from file endpoint: %w", decodeErr)
}
fileKeyNonce, decodeErr = tryDecodeBase64(file.EncryptedFileKey.Nonce)
if decodeErr != nil {
return "", "", fmt.Errorf("failed to decode file key nonce from file endpoint: %w", decodeErr)
}
}
// Handle combined ciphertext format for file key
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return "", "", fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 3: Decrypt the file metadata with the file key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(cloudFile.EncryptedMetadata)
if err != nil {
return "", "", fmt.Errorf("failed to decode encrypted metadata: %w", err)
}
// Split nonce and ciphertext from the combined metadata (auto-detect nonce size)
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", "", fmt.Errorf("failed to split metadata nonce/ciphertext: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", "", fmt.Errorf("failed to decrypt metadata: %w", err)
}
// Step 4: Parse the decrypted metadata JSON
var metadata struct {
Name string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", "", fmt.Errorf("failed to parse metadata JSON: %w", err)
}
return metadata.Name, metadata.MimeType, nil
}
// truncateForLog truncates a string for logging purposes
func truncateForLog(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View file

@ -0,0 +1,451 @@
package app
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
sysRuntime "runtime"
"strings"
"time"
"unicode"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
)
// =============================================================================
// EXPORT TYPES AND UTILITIES
// =============================================================================
// ExportError represents an error that occurred during export
type ExportError struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
CollectionID string `json:"collection_id"`
ErrorMessage string `json:"error_message"`
Timestamp string `json:"timestamp"`
}
// ExportEstimate provides an estimate of what will be exported
type ExportEstimate struct {
TotalCollections int `json:"total_collections"`
OwnedCollections int `json:"owned_collections"`
SharedCollections int `json:"shared_collections"`
TotalFiles int `json:"total_files"`
TotalSizeBytes int64 `json:"total_size_bytes"`
LocalFilesCount int `json:"local_files_count"`
CloudOnlyCount int `json:"cloud_only_count"`
EstimatedTime string `json:"estimated_time"`
}
// UserProfileExport represents exported user profile data
type UserProfileExport struct {
ID string `json:"id"`
Email string `json:"email"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Name string `json:"name"`
Phone string `json:"phone,omitempty"`
Country string `json:"country,omitempty"`
Timezone string `json:"timezone,omitempty"`
CreatedAt string `json:"created_at"`
ExportedAt string `json:"exported_at"`
}
// CollectionExportData represents a single collection in the export
type CollectionExportData struct {
ID string `json:"id"`
Name string `json:"name"`
CollectionType string `json:"collection_type"`
ParentID string `json:"parent_id,omitempty"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
IsShared bool `json:"is_shared"`
}
// CollectionsExport represents all exported collections
type CollectionsExport struct {
OwnedCollections []*CollectionExportData `json:"owned_collections"`
SharedCollections []*CollectionExportData `json:"shared_collections"`
TotalCount int `json:"total_count"`
ExportedAt string `json:"exported_at"`
}
// FileExportData represents a single file's metadata in the export
type FileExportData struct {
ID string `json:"id"`
Filename string `json:"filename"`
MimeType string `json:"mime_type"`
SizeBytes int64 `json:"size_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
CollectionID string `json:"collection_id"`
CollectionName string `json:"collection_name"`
}
// FilesMetadataExport represents all exported file metadata
type FilesMetadataExport struct {
Files []*FileExportData `json:"files"`
TotalCount int `json:"total_count"`
TotalSize int64 `json:"total_size_bytes"`
ExportedAt string `json:"exported_at"`
}
// FileExportResult represents the result of exporting a single file
type FileExportResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
SourceType string `json:"source_type"`
SizeBytes int64 `json:"size_bytes"`
DestPath string `json:"dest_path"`
Success bool `json:"success"`
ErrorMessage string `json:"error_message,omitempty"`
}
// ExportSummary is the final summary of the export operation
type ExportSummary struct {
ExportedAt string `json:"exported_at"`
ExportPath string `json:"export_path"`
TotalCollections int `json:"total_collections"`
OwnedCollections int `json:"owned_collections"`
SharedCollections int `json:"shared_collections"`
TotalFiles int `json:"total_files"`
FilesExported int `json:"files_exported"`
FilesCopiedLocal int `json:"files_copied_local"`
FilesDownloaded int `json:"files_downloaded"`
FilesFailed int `json:"files_failed"`
TotalSizeBytes int64 `json:"total_size_bytes"`
Errors []ExportError `json:"errors,omitempty"`
}
// =============================================================================
// EXPORT SETUP OPERATIONS
// =============================================================================
// SelectExportDirectory opens a dialog for the user to select an export directory
func (a *Application) SelectExportDirectory() (string, error) {
// Get user's home directory as default
homeDir, err := os.UserHomeDir()
if err != nil {
homeDir = ""
}
dir, err := runtime.OpenDirectoryDialog(a.ctx, runtime.OpenDialogOptions{
DefaultDirectory: homeDir,
Title: "Select Export Directory",
CanCreateDirectories: true,
ShowHiddenFiles: false,
TreatPackagesAsDirectories: false,
})
if err != nil {
a.logger.Error("Failed to open directory dialog", zap.Error(err))
return "", fmt.Errorf("failed to open directory dialog: %w", err)
}
return dir, nil
}
// GetExportEstimate returns an estimate of what will be exported
func (a *Application) GetExportEstimate() (*ExportEstimate, error) {
a.logger.Info("Getting export estimate")
// Get current session
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("not authenticated: %w", err)
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get dashboard for storage stats
dashResp, err := apiClient.GetDashboard(a.ctx)
if err != nil {
a.logger.Warn("Failed to get dashboard for estimate", zap.Error(err))
}
// Get owned collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
// Get shared collections
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
// Count files and check local availability
totalFiles := 0
localFilesCount := 0
cloudOnlyCount := 0
var totalSizeBytes int64 = 0
allCollections := append(ownedCollections, sharedCollections...)
for _, coll := range allCollections {
totalFiles += coll.TotalFiles
}
// Check local file repository for files with decrypted content available
// We check for FilePath (decrypted file) since that's what we copy during export
localFiles, err := a.mustGetFileRepo().List()
if err == nil {
for _, f := range localFiles {
if f.FilePath != "" {
localFilesCount++
totalSizeBytes += f.DecryptedSizeInBytes
}
}
}
cloudOnlyCount = totalFiles - localFilesCount
if cloudOnlyCount < 0 {
cloudOnlyCount = 0
}
// Note: Dashboard has storage in formatted units (e.g., "1.5 GB")
// We use our calculated totalSizeBytes instead for accuracy
_ = dashResp // Suppress unused variable warning if dashboard call failed
// Estimate time based on file count and sizes
estimatedTime := "Less than a minute"
if cloudOnlyCount > 0 {
// Rough estimate: 1 file per second for cloud downloads
seconds := cloudOnlyCount
if seconds > 60 {
minutes := seconds / 60
if minutes > 60 {
estimatedTime = fmt.Sprintf("About %d hours", minutes/60)
} else {
estimatedTime = fmt.Sprintf("About %d minutes", minutes)
}
} else {
estimatedTime = fmt.Sprintf("About %d seconds", seconds)
}
}
estimate := &ExportEstimate{
TotalCollections: len(allCollections),
OwnedCollections: len(ownedCollections),
SharedCollections: len(sharedCollections),
TotalFiles: totalFiles,
TotalSizeBytes: totalSizeBytes,
LocalFilesCount: localFilesCount,
CloudOnlyCount: cloudOnlyCount,
EstimatedTime: estimatedTime,
}
a.logger.Info("Export estimate calculated",
zap.Int("total_collections", estimate.TotalCollections),
zap.Int("total_files", estimate.TotalFiles),
zap.Int("local_files", estimate.LocalFilesCount),
zap.Int("cloud_only", estimate.CloudOnlyCount))
return estimate, nil
}
// CreateExportDirectory creates the export directory with timestamp
func (a *Application) CreateExportDirectory(basePath string) (string, error) {
timestamp := time.Now().Format("2006-01-02_15-04-05")
exportDir := filepath.Join(basePath, fmt.Sprintf("MapleFile_Export_%s", timestamp))
if err := os.MkdirAll(exportDir, 0755); err != nil {
return "", fmt.Errorf("failed to create export directory: %w", err)
}
// Create subdirectories
filesDir := filepath.Join(exportDir, "files")
if err := os.MkdirAll(filesDir, 0755); err != nil {
return "", fmt.Errorf("failed to create files directory: %w", err)
}
return exportDir, nil
}
// OpenExportFolder opens the export folder in the system file manager
func (a *Application) OpenExportFolder(path string) error {
// Security: Validate the path before passing to exec.Command
if path == "" {
return fmt.Errorf("path cannot be empty")
}
// Get absolute path and clean it
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("invalid path: %w", err)
}
absPath = filepath.Clean(absPath)
// Verify the path exists and is a directory
info, err := os.Stat(absPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("path does not exist: %s", absPath)
}
return fmt.Errorf("failed to access path: %w", err)
}
if !info.IsDir() {
return fmt.Errorf("path is not a directory: %s", absPath)
}
a.logger.Info("Opening export folder",
zap.String("path", absPath))
var cmd *exec.Cmd
switch sysRuntime.GOOS {
case "darwin":
cmd = exec.Command("open", absPath)
case "windows":
cmd = exec.Command("explorer", absPath)
case "linux":
cmd = exec.Command("xdg-open", absPath)
default:
return fmt.Errorf("unsupported operating system: %s", sysRuntime.GOOS)
}
return cmd.Start()
}
// =============================================================================
// HELPER FUNCTIONS
// =============================================================================
// sanitizeFilename removes or replaces characters that are invalid in filenames.
// This function provides defense-in-depth against path traversal attacks by:
// 1. Extracting only the base filename (removing any path components)
// 2. Handling special directory references (. and ..)
// 3. Removing control characters
// 4. Replacing invalid filesystem characters
// 5. Handling Windows reserved names
// 6. Limiting filename length
func sanitizeFilename(name string) string {
// Step 1: Extract only the base filename to prevent path traversal
// This handles cases like "../../../etc/passwd" -> "passwd"
name = filepath.Base(name)
// Step 2: Handle special directory references
if name == "." || name == ".." || name == "" {
return "unnamed"
}
// Step 3: Trim leading/trailing whitespace and dots
// Windows doesn't allow filenames ending with dots or spaces
name = strings.TrimSpace(name)
name = strings.Trim(name, ".")
if name == "" {
return "unnamed"
}
// Step 4: Remove control characters (ASCII 0-31)
result := make([]rune, 0, len(name))
for _, r := range name {
if r < 32 || !unicode.IsPrint(r) {
continue // Skip control characters
}
result = append(result, r)
}
name = string(result)
// Step 5: Replace invalid filesystem characters
// These are invalid on Windows: \ / : * ? " < > |
// Forward/back slashes are also dangerous for path traversal
replacer := map[rune]rune{
'/': '-',
'\\': '-',
':': '-',
'*': '-',
'?': '-',
'"': '\'',
'<': '(',
'>': ')',
'|': '-',
}
result = make([]rune, 0, len(name))
for _, r := range name {
if replacement, ok := replacer[r]; ok {
result = append(result, replacement)
} else {
result = append(result, r)
}
}
name = string(result)
// Step 6: Handle Windows reserved names
// These names are reserved regardless of extension: CON, PRN, AUX, NUL,
// COM1-COM9, LPT1-LPT9
upperName := strings.ToUpper(name)
// Extract name without extension for comparison
nameWithoutExt := upperName
if idx := strings.LastIndex(upperName, "."); idx > 0 {
nameWithoutExt = upperName[:idx]
}
reservedNames := map[string]bool{
"CON": true, "PRN": true, "AUX": true, "NUL": true,
"COM1": true, "COM2": true, "COM3": true, "COM4": true,
"COM5": true, "COM6": true, "COM7": true, "COM8": true, "COM9": true,
"LPT1": true, "LPT2": true, "LPT3": true, "LPT4": true,
"LPT5": true, "LPT6": true, "LPT7": true, "LPT8": true, "LPT9": true,
}
if reservedNames[nameWithoutExt] {
name = "_" + name
}
// Step 7: Limit filename length
// Most filesystems support 255 bytes; we use 200 to leave room for path
const maxFilenameLength = 200
if len(name) > maxFilenameLength {
// Try to preserve the extension
ext := filepath.Ext(name)
if len(ext) < maxFilenameLength-10 {
nameWithoutExt := name[:len(name)-len(ext)]
if len(nameWithoutExt) > maxFilenameLength-len(ext) {
nameWithoutExt = nameWithoutExt[:maxFilenameLength-len(ext)]
}
name = nameWithoutExt + ext
} else {
name = name[:maxFilenameLength]
}
}
// Final check
if name == "" {
return "unnamed"
}
return name
}
// copyFile copies a file from src to dst
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
if err != nil {
return err
}
return destFile.Sync()
}

View file

@ -0,0 +1,204 @@
package app
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"go.uber.org/zap"
)
// =============================================================================
// EXPORT DATA OPERATIONS (Profile, Collections, Metadata)
// =============================================================================
// ExportUserProfile exports the user's profile data
func (a *Application) ExportUserProfile(exportPath string) (*UserProfileExport, error) {
a.logger.Info("Exporting user profile", zap.String("export_path", exportPath))
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("not authenticated: %w", err)
}
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get user profile
me, err := apiClient.GetMe(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get user profile: %w", err)
}
profile := &UserProfileExport{
ID: me.ID,
Email: me.Email,
FirstName: me.FirstName,
LastName: me.LastName,
Name: me.Name,
Phone: me.Phone,
Country: me.Country,
Timezone: me.Timezone,
CreatedAt: me.CreatedAt.Format(time.RFC3339),
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
profilePath := filepath.Join(exportPath, "profile.json")
data, err := json.MarshalIndent(profile, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal profile: %w", err)
}
if err := os.WriteFile(profilePath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write profile file: %w", err)
}
a.logger.Info("User profile exported successfully", zap.String("path", profilePath))
return profile, nil
}
// ExportCollections exports all collections (owned and shared)
func (a *Application) ExportCollections(exportPath string) (*CollectionsExport, error) {
a.logger.Info("Exporting collections", zap.String("export_path", exportPath))
// Get owned collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
// Get shared collections
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
// Convert to export format
ownedExport := make([]*CollectionExportData, len(ownedCollections))
for i, c := range ownedCollections {
ownedExport[i] = &CollectionExportData{
ID: c.ID,
Name: c.Name,
CollectionType: c.CollectionType,
ParentID: c.ParentID,
FileCount: c.TotalFiles,
CreatedAt: c.CreatedAt,
ModifiedAt: c.ModifiedAt,
IsShared: false,
}
}
sharedExport := make([]*CollectionExportData, len(sharedCollections))
for i, c := range sharedCollections {
sharedExport[i] = &CollectionExportData{
ID: c.ID,
Name: c.Name,
CollectionType: c.CollectionType,
ParentID: c.ParentID,
FileCount: c.TotalFiles,
CreatedAt: c.CreatedAt,
ModifiedAt: c.ModifiedAt,
IsShared: true,
}
}
export := &CollectionsExport{
OwnedCollections: ownedExport,
SharedCollections: sharedExport,
TotalCount: len(ownedExport) + len(sharedExport),
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
collectionsPath := filepath.Join(exportPath, "collections.json")
data, err := json.MarshalIndent(export, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal collections: %w", err)
}
if err := os.WriteFile(collectionsPath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write collections file: %w", err)
}
a.logger.Info("Collections exported successfully",
zap.String("path", collectionsPath),
zap.Int("owned", len(ownedExport)),
zap.Int("shared", len(sharedExport)))
return export, nil
}
// ExportAllFilesMetadata exports metadata for all files in all collections
func (a *Application) ExportAllFilesMetadata(exportPath string) (*FilesMetadataExport, error) {
a.logger.Info("Exporting all files metadata", zap.String("export_path", exportPath))
// Get all collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
allCollections := append(ownedCollections, sharedCollections...)
allFiles := make([]*FileExportData, 0)
var totalSize int64 = 0
// Get files for each collection
for _, coll := range allCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
allFiles = append(allFiles, &FileExportData{
ID: f.ID,
Filename: f.Filename,
MimeType: f.ContentType,
SizeBytes: f.Size,
CreatedAt: f.CreatedAt,
ModifiedAt: f.ModifiedAt,
CollectionID: coll.ID,
CollectionName: coll.Name,
})
totalSize += f.Size
}
}
export := &FilesMetadataExport{
Files: allFiles,
TotalCount: len(allFiles),
TotalSize: totalSize,
ExportedAt: time.Now().Format(time.RFC3339),
}
// Save to file
metadataPath := filepath.Join(exportPath, "files_metadata.json")
data, err := json.MarshalIndent(export, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal files metadata: %w", err)
}
if err := os.WriteFile(metadataPath, data, 0644); err != nil {
return nil, fmt.Errorf("failed to write files metadata file: %w", err)
}
a.logger.Info("Files metadata exported successfully",
zap.String("path", metadataPath),
zap.Int("total_files", len(allFiles)),
zap.Int64("total_size", totalSize))
return export, nil
}

View file

@ -0,0 +1,346 @@
package app
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
)
// =============================================================================
// EXPORT FILE CONTENT OPERATIONS
// =============================================================================
// ExportFileContent exports a single file - copies from local if available, otherwise downloads from cloud
func (a *Application) ExportFileContent(fileID string, collectionName string, exportPath string) (*FileExportResult, error) {
a.logger.Debug("Exporting file content",
zap.String("file_id", fileID),
zap.String("collection_name", collectionName))
result := &FileExportResult{
FileID: fileID,
Success: false,
}
// Create collection folder in export path
// Sanitize collection name for filesystem
safeName := sanitizeFilename(collectionName)
collectionDir := filepath.Join(exportPath, "files", safeName)
if err := os.MkdirAll(collectionDir, 0755); err != nil {
result.ErrorMessage = fmt.Sprintf("failed to create directory: %v", err)
return result, nil
}
// Check if file exists locally with decrypted content (FilePath must be non-empty)
// Note: HasLocalContent() returns true for encrypted-only files, but we need the
// decrypted FilePath to copy, not EncryptedFilePath
localFile, err := a.mustGetFileRepo().Get(fileID)
if err == nil && localFile != nil && localFile.FilePath != "" {
// File exists locally with decrypted content - copy it
result.Filename = localFile.Name
result.SourceType = "local"
result.SizeBytes = localFile.DecryptedSizeInBytes
destPath := filepath.Join(collectionDir, sanitizeFilename(localFile.Name))
result.DestPath = destPath
// Copy the file
if err := copyFile(localFile.FilePath, destPath); err != nil {
result.ErrorMessage = fmt.Sprintf("failed to copy local file: %v", err)
return result, nil
}
result.Success = true
a.logger.Debug("File copied from local storage",
zap.String("file_id", fileID),
zap.String("dest", destPath))
return result, nil
}
// File not available locally - download from cloud and save directly to export path
result.SourceType = "cloud"
// Download and decrypt file directly to export directory
filename, fileSize, err := a.downloadFileToPath(fileID, collectionDir)
if err != nil {
result.ErrorMessage = fmt.Sprintf("failed to download file: %v", err)
return result, nil
}
result.Filename = filename
result.SizeBytes = fileSize
result.DestPath = filepath.Join(collectionDir, sanitizeFilename(filename))
result.Success = true
a.logger.Debug("File downloaded from cloud",
zap.String("file_id", fileID),
zap.String("dest", result.DestPath))
return result, nil
}
// downloadFileToPath downloads and decrypts a file directly to a specified directory.
// Returns the filename, file size, and error. This is used for bulk exports without user dialog.
func (a *Application) downloadFileToPath(fileID string, destDir string) (string, int64, error) {
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return "", 0, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
return "", 0, fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
// Use SDK client which has automatic token refresh on 401
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata using SDK (has automatic 401 retry)
fileData, err := apiClient.GetFile(a.ctx, fileID)
if err != nil {
return "", 0, fmt.Errorf("failed to get file: %w", err)
}
// Step 2: Get collection to decrypt collection key using SDK (has automatic 401 retry)
collData, err := apiClient.GetCollection(a.ctx, fileData.CollectionID)
if err != nil {
return "", 0, fmt.Errorf("failed to get collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// SDK returns EncryptedCollectionKey as an object with Ciphertext and Nonce fields
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return "", 0, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return "", 0, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return "", 0, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return "", 0, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return "", 0, fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", 0, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", 0, fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL using SDK (has automatic 401 retry)
downloadResp, err := apiClient.GetPresignedDownloadURL(a.ctx, fileID)
if err != nil {
return "", 0, fmt.Errorf("failed to get download URL: %w", err)
}
// Step 7: Download encrypted file from S3 using SDK helper
encryptedContent, err := apiClient.DownloadFromPresignedURL(a.ctx, downloadResp.FileURL)
if err != nil {
return "", 0, fmt.Errorf("failed to download file: %w", err)
}
// Step 8: Decrypt file content
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
return "", 0, fmt.Errorf("failed to decrypt file: %w", err)
}
// Step 9: Write decrypted content to destination
destPath := filepath.Join(destDir, sanitizeFilename(metadata.Filename))
if err := os.WriteFile(destPath, decryptedContent, 0600); err != nil {
return "", 0, fmt.Errorf("failed to save file: %w", err)
}
return metadata.Filename, int64(len(decryptedContent)), nil
}
// ExportAllFiles exports all files from all collections
func (a *Application) ExportAllFiles(exportPath string, progressCallback func(current, total int, filename string)) (*ExportSummary, error) {
a.logger.Info("Exporting all files", zap.String("export_path", exportPath))
summary := &ExportSummary{
ExportedAt: time.Now().Format(time.RFC3339),
ExportPath: exportPath,
Errors: make([]ExportError, 0),
}
// Get all collections
ownedCollections, err := a.ListCollections()
if err != nil {
return nil, fmt.Errorf("failed to list owned collections: %w", err)
}
summary.OwnedCollections = len(ownedCollections)
sharedCollections, err := a.listSharedCollections()
if err != nil {
a.logger.Warn("Failed to list shared collections", zap.Error(err))
sharedCollections = []*CollectionData{}
}
summary.SharedCollections = len(sharedCollections)
summary.TotalCollections = summary.OwnedCollections + summary.SharedCollections
// Build list of all files to export
type fileToExport struct {
fileID string
collectionID string
collectionName string
}
allFilesToExport := make([]fileToExport, 0)
// Collect files from owned collections
for _, coll := range ownedCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
allFilesToExport = append(allFilesToExport, fileToExport{
fileID: f.ID,
collectionID: coll.ID,
collectionName: coll.Name,
})
}
}
// Collect files from shared collections
for _, coll := range sharedCollections {
files, err := a.ListFilesByCollection(coll.ID)
if err != nil {
a.logger.Warn("Failed to list files for shared collection",
zap.String("collection_id", coll.ID),
zap.Error(err))
continue
}
for _, f := range files {
// Prefix shared collection names to distinguish them
collName := "Shared - " + coll.Name
allFilesToExport = append(allFilesToExport, fileToExport{
fileID: f.ID,
collectionID: coll.ID,
collectionName: collName,
})
}
}
summary.TotalFiles = len(allFilesToExport)
// Export each file
for i, f := range allFilesToExport {
if progressCallback != nil {
progressCallback(i+1, summary.TotalFiles, f.collectionName)
}
result, err := a.ExportFileContent(f.fileID, f.collectionName, exportPath)
if err != nil || !result.Success {
summary.FilesFailed++
errMsg := "unknown error"
if err != nil {
errMsg = err.Error()
} else if result.ErrorMessage != "" {
errMsg = result.ErrorMessage
}
summary.Errors = append(summary.Errors, ExportError{
FileID: f.fileID,
Filename: result.Filename,
CollectionID: f.collectionID,
ErrorMessage: errMsg,
Timestamp: time.Now().Format(time.RFC3339),
})
continue
}
summary.FilesExported++
summary.TotalSizeBytes += result.SizeBytes
if result.SourceType == "local" {
summary.FilesCopiedLocal++
} else {
summary.FilesDownloaded++
}
}
// Save export manifest
manifestPath := filepath.Join(exportPath, "export_manifest.json")
manifestData, err := json.MarshalIndent(summary, "", " ")
if err != nil {
a.logger.Warn("Failed to marshal export manifest", zap.Error(err))
} else {
if err := os.WriteFile(manifestPath, manifestData, 0644); err != nil {
a.logger.Warn("Failed to write export manifest", zap.Error(err))
}
}
a.logger.Info("Export completed",
zap.Int("files_exported", summary.FilesExported),
zap.Int("files_failed", summary.FilesFailed),
zap.Int("copied_local", summary.FilesCopiedLocal),
zap.Int("downloaded", summary.FilesDownloaded))
return summary, nil
}

View file

@ -0,0 +1,610 @@
package app
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// =============================================================================
// FILE QUERY OPERATIONS
// =============================================================================
// EmbeddedTagData represents a tag attached to a file (for display purposes)
type EmbeddedTagData struct {
ID string `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
}
// FileDetailData represents detailed file information for the frontend
type FileDetailData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
Filename string `json:"filename"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
// Sync status fields
SyncStatus string `json:"sync_status"`
HasLocalContent bool `json:"has_local_content"`
LocalFilePath string `json:"local_file_path,omitempty"`
// Tags assigned to this file
Tags []*EmbeddedTagData `json:"tags"`
}
// ListFilesByCollection lists all files in a collection
func (a *Application) ListFilesByCollection(collectionID string) ([]*FileData, error) {
// Validate input
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return nil, err
}
apiClient := a.authService.GetAPIClient()
files, err := apiClient.ListFilesByCollection(a.ctx, collectionID)
if err != nil {
a.logger.Error("Failed to list files",
zap.String("collection_id", collectionID),
zap.Error(err))
return nil, fmt.Errorf("failed to list files: %w", err)
}
// Get collection key for decrypting file metadata (needed for cloud-only files)
var collectionKey []byte
collectionKeyReady := false
// Lazy-load collection key only when needed
getCollectionKey := func() ([]byte, error) {
if collectionKeyReady {
return collectionKey, nil
}
// Get session for master key
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get session: %w", err)
}
// Get master key from cache
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Define a custom response struct that matches the actual backend API
// (the client SDK's Collection struct has EncryptedCollectionKey as string)
type collectionAPIResponse struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
// Make direct HTTP request to get collection
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+collectionID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
accessToken, _ := apiClient.GetTokens()
req.Header.Set("Authorization", "Bearer "+accessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to fetch collection: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch collection: status %d", resp.StatusCode)
}
var collection collectionAPIResponse
if err := json.NewDecoder(resp.Body).Decode(&collection); err != nil {
return nil, fmt.Errorf("failed to decode collection response: %w", err)
}
// Decode collection key components
// Use tryDecodeBase64 to handle multiple base64 encoding formats
keyCiphertext, err := tryDecodeBase64(collection.EncryptedCollectionKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
keyNonce, err := tryDecodeBase64(collection.EncryptedCollectionKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualKeyCiphertext := extractActualCiphertext(keyCiphertext, keyNonce)
// Decrypt collection key with master key
collectionKey, err = e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualKeyCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
collectionKeyReady = true
return collectionKey, nil
}
result := make([]*FileData, 0, len(files))
for _, cloudFile := range files {
// Skip deleted files - don't show them in the GUI
if cloudFile.State == file.StateDeleted {
continue
}
// Default values
filename := "Encrypted File"
contentType := "application/octet-stream"
fileSize := cloudFile.EncryptedSizeInBytes
// Check local repository for sync status
syncStatus := file.SyncStatusCloudOnly // Default: cloud only (from API)
hasLocalContent := false
localFilePath := ""
localFile, err := a.mustGetFileRepo().Get(cloudFile.ID)
if err == nil && localFile != nil {
// Skip if local file is marked as deleted
if localFile.State == file.StateDeleted {
continue
}
// File exists in local repo - use local data
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
localFilePath = localFile.FilePath
// Use decrypted data from local storage
if localFile.Name != "" {
filename = localFile.Name
}
if localFile.MimeType != "" {
contentType = localFile.MimeType
}
if localFile.DecryptedSizeInBytes > 0 {
fileSize = localFile.DecryptedSizeInBytes
}
} else {
// File not in local repo - decrypt metadata from cloud
colKey, err := getCollectionKey()
if err != nil {
a.logger.Warn("Failed to get collection key for metadata decryption",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
// Continue with placeholder values
} else {
// Decrypt file key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyCiphertext, err := tryDecodeBase64(cloudFile.EncryptedFileKey.Ciphertext)
if err != nil {
a.logger.Warn("Failed to decode file key ciphertext",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
fileKeyNonce, err := tryDecodeBase64(cloudFile.EncryptedFileKey.Nonce)
if err != nil {
a.logger.Warn("Failed to decode file key nonce",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, colKey)
if err != nil {
a.logger.Warn("Failed to decrypt file key",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Decrypt metadata
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(cloudFile.EncryptedMetadata)
if err != nil {
a.logger.Warn("Failed to decode encrypted metadata",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
a.logger.Warn("Failed to split metadata nonce/ciphertext",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
a.logger.Warn("Failed to decrypt metadata",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Parse decrypted metadata JSON
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
a.logger.Warn("Failed to parse metadata JSON",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
} else {
// Successfully decrypted - use actual values
if metadata.Filename != "" {
filename = metadata.Filename
}
if metadata.MimeType != "" {
contentType = metadata.MimeType
}
if metadata.Size > 0 {
fileSize = metadata.Size
}
}
}
}
}
}
}
}
}
}
// Process embedded tags from the API response
// The backend includes tags in the list response, so we decrypt them here
// instead of making separate API calls per file
embeddedTags := make([]*EmbeddedTagData, 0, len(cloudFile.Tags))
if len(cloudFile.Tags) > 0 {
// Get master key for tag decryption (we need it for each file with tags)
// Note: This is inside the file loop, so we get a fresh key reference for each file
session, err := a.authService.GetCurrentSession(a.ctx)
if err == nil {
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err == nil {
// Decrypt each embedded tag
for _, tagData := range cloudFile.Tags {
// Convert to client.Tag format for decryption
clientTag := &client.Tag{
ID: tagData.ID,
EncryptedName: tagData.EncryptedName,
EncryptedColor: tagData.EncryptedColor,
EncryptedTagKey: tagData.EncryptedTagKey,
}
// Decrypt the tag
decryptedTag, err := a.decryptTag(clientTag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt embedded tag for file, skipping",
zap.String("file_id", cloudFile.ID),
zap.String("tag_id", tagData.ID),
zap.Error(err))
continue
}
embeddedTags = append(embeddedTags, &EmbeddedTagData{
ID: decryptedTag.ID,
Name: decryptedTag.Name,
Color: decryptedTag.Color,
})
}
cleanup()
} else {
a.logger.Debug("Failed to get master key for tag decryption, skipping tags",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
}
} else {
a.logger.Debug("Failed to get session for tag decryption, skipping tags",
zap.String("file_id", cloudFile.ID),
zap.Error(err))
}
}
result = append(result, &FileData{
ID: cloudFile.ID,
CollectionID: cloudFile.CollectionID,
Filename: filename,
Size: fileSize,
ContentType: contentType,
CreatedAt: cloudFile.CreatedAt.Format(time.RFC3339),
ModifiedAt: cloudFile.ModifiedAt.Format(time.RFC3339),
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
LocalFilePath: localFilePath,
Tags: embeddedTags,
})
}
a.logger.Info("Listed files",
zap.String("collection_id", collectionID),
zap.Int("count", len(result)))
return result, nil
}
// GetFile retrieves a single file's details by ID
func (a *Application) GetFile(fileID string) (*FileDetailData, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
// Get current session
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get the cached master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
apiClient := a.authService.GetAPIClient()
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Make HTTP request to get file details
// Note: Backend uses /api/v1/file/{id} (singular) not /api/v1/files/{id}
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
a.logger.Error("Failed to create get file request", zap.Error(err))
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
a.logger.Error("Failed to send get file request", zap.Error(err))
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to get file",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to get file: %s", string(body))
}
// Parse response
var fileResp struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
EncryptedFileSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
Tags []struct {
ID string `json:"id"`
EncryptedName string `json:"encrypted_name"`
EncryptedColor string `json:"encrypted_color"`
EncryptedTagKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_tag_key"`
} `json:"tags,omitempty"`
}
if err := json.NewDecoder(resp.Body).Decode(&fileResp); err != nil {
a.logger.Error("Failed to decode file response", zap.Error(err))
return nil, fmt.Errorf("failed to decode response: %w", err)
}
// Now we need to get the collection to decrypt the file key
// First get the collection's encrypted collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileResp.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create get collection request", zap.Error(err))
return nil, fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
a.logger.Error("Failed to get collection for file", zap.Error(err))
return nil, fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
a.logger.Error("Failed to get collection",
zap.Int("status", collResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
// Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode collection key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode collection key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileResp.EncryptedFileKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode file key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileResp.EncryptedFileKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode file key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
a.logger.Error("Failed to decrypt file key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Decrypt file metadata with file key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
encryptedMetadataBytes, err := tryDecodeBase64(fileResp.EncryptedMetadata)
if err != nil {
a.logger.Error("Failed to decode encrypted metadata", zap.Error(err))
return nil, fmt.Errorf("failed to decode metadata: %w", err)
}
// Split nonce and ciphertext (auto-detect nonce size: 12 for ChaCha20, 24 for XSalsa20)
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
a.logger.Error("Failed to split metadata nonce/ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
a.logger.Error("Failed to decrypt file metadata", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt metadata: %w", err)
}
// Parse decrypted metadata JSON
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
a.logger.Error("Failed to parse file metadata", zap.Error(err))
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
// Check local repository for sync status
syncStatus := file.SyncStatusCloudOnly // Default: cloud only
hasLocalContent := false
localFilePath := ""
localFile, err := a.mustGetFileRepo().Get(fileResp.ID)
if err == nil && localFile != nil {
syncStatus = localFile.SyncStatus
hasLocalContent = localFile.HasLocalContent()
localFilePath = localFile.FilePath
}
// Process embedded tags from the API response
embeddedTags := make([]*EmbeddedTagData, 0, len(fileResp.Tags))
for _, tagData := range fileResp.Tags {
// Convert the embedded tag structure to client.Tag format for decryption
clientTag := &client.Tag{
ID: tagData.ID,
EncryptedName: tagData.EncryptedName,
EncryptedColor: tagData.EncryptedColor,
EncryptedTagKey: &client.EncryptedTagKey{
Ciphertext: tagData.EncryptedTagKey.Ciphertext,
Nonce: tagData.EncryptedTagKey.Nonce,
},
}
// Decrypt the tag using the existing decryptTag helper
decryptedTag, err := a.decryptTag(clientTag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt embedded tag, skipping",
zap.String("file_id", fileResp.ID),
zap.String("tag_id", tagData.ID),
zap.Error(err))
continue
}
embeddedTags = append(embeddedTags, &EmbeddedTagData{
ID: decryptedTag.ID,
Name: decryptedTag.Name,
Color: decryptedTag.Color,
})
a.logger.Debug("Decrypted embedded tag for file",
zap.String("file_id", fileResp.ID),
zap.String("tag_id", decryptedTag.ID),
zap.String("name", decryptedTag.Name),
zap.String("color", decryptedTag.Color))
}
return &FileDetailData{
ID: fileResp.ID,
CollectionID: fileResp.CollectionID,
Filename: metadata.Filename,
MimeType: metadata.MimeType,
Size: metadata.Size,
EncryptedFileSizeInBytes: fileResp.EncryptedFileSizeInBytes,
CreatedAt: fileResp.CreatedAt,
ModifiedAt: fileResp.ModifiedAt,
Version: fileResp.Version,
State: fileResp.State,
SyncStatus: syncStatus.String(),
HasLocalContent: hasLocalContent,
LocalFilePath: localFilePath,
Tags: embeddedTags,
}, nil
}

View file

@ -0,0 +1,191 @@
package app
import (
"fmt"
"os"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// =============================================================================
// FILE CLEANUP OPERATIONS
// =============================================================================
// DeleteFile soft-deletes a file from both the cloud and local storage
func (a *Application) DeleteFile(fileID string) error {
a.logger.Info("DeleteFile called", zap.String("file_id", fileID))
// Use the SDK client which has automatic token refresh on 401
apiClient := a.authService.GetAPIClient()
// Use SDK's DeleteFile method which has automatic 401 retry
if err := apiClient.DeleteFile(a.ctx, fileID); err != nil {
a.logger.Error("Failed to delete file from cloud",
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to delete file: %w", err)
}
// Cloud delete succeeded - now clean up local data
a.cleanupLocalFile(fileID)
a.logger.Info("File deleted successfully", zap.String("file_id", fileID))
return nil
}
// cleanupLocalFile removes physical binary files immediately and marks the metadata as deleted.
// The metadata record is kept for background cleanup later.
func (a *Application) cleanupLocalFile(fileID string) {
// Get the local file record
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil || localFile == nil {
a.logger.Debug("No local file record to clean up", zap.String("file_id", fileID))
return
}
// IMMEDIATELY delete physical binary files
// Delete the physical decrypted file if it exists
if localFile.FilePath != "" {
if err := os.Remove(localFile.FilePath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local decrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local decrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath))
}
}
// Delete the physical encrypted file if it exists
if localFile.EncryptedFilePath != "" {
if err := os.Remove(localFile.EncryptedFilePath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local encrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.EncryptedFilePath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local encrypted file",
zap.String("file_id", fileID),
zap.String("path", localFile.EncryptedFilePath))
}
}
// Delete the thumbnail if it exists
if localFile.ThumbnailPath != "" {
if err := os.Remove(localFile.ThumbnailPath); err != nil {
if !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local thumbnail",
zap.String("file_id", fileID),
zap.String("path", localFile.ThumbnailPath),
zap.Error(err))
}
} else {
a.logger.Info("Deleted local thumbnail",
zap.String("file_id", fileID),
zap.String("path", localFile.ThumbnailPath))
}
}
// Mark the metadata record as deleted (will be cleaned up later by background process)
// Clear the file paths since the physical files are now deleted
localFile.State = file.StateDeleted
localFile.FilePath = ""
localFile.EncryptedFilePath = ""
localFile.ThumbnailPath = ""
localFile.ModifiedAt = time.Now()
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Warn("Failed to mark local file metadata as deleted",
zap.String("file_id", fileID),
zap.Error(err))
} else {
a.logger.Info("Marked local file metadata as deleted (will be cleaned up later)",
zap.String("file_id", fileID))
// Remove from search index
if err := a.searchService.DeleteFile(fileID); err != nil {
a.logger.Warn("Failed to remove file from search index",
zap.String("file_id", fileID),
zap.Error(err))
}
}
}
// purgeDeletedFileMetadata permanently removes a deleted file's metadata record.
// This is called by the background cleanup process after a retention period.
func (a *Application) purgeDeletedFileMetadata(fileID string) {
if err := a.mustGetFileRepo().Delete(fileID); err != nil {
a.logger.Warn("Failed to purge deleted file metadata",
zap.String("file_id", fileID),
zap.Error(err))
} else {
a.logger.Info("Purged deleted file metadata",
zap.String("file_id", fileID))
}
}
// deletedFileRetentionPeriod is how long to keep deleted file metadata before purging.
// This allows for potential recovery or sync conflict resolution.
const deletedFileRetentionPeriod = 7 * 24 * time.Hour // 7 days
// cleanupDeletedFiles runs in the background to clean up deleted files.
// It handles two cases:
// 1. Files marked as deleted that still have physical files (cleans up binaries immediately)
// 2. Files marked as deleted past the retention period (purges metadata)
func (a *Application) cleanupDeletedFiles() {
a.logger.Info("Starting background cleanup of deleted files")
// Get all local files
localFiles, err := a.mustGetFileRepo().List()
if err != nil {
a.logger.Error("Failed to list local files for cleanup", zap.Error(err))
return
}
binaryCleanedCount := 0
metadataPurgedCount := 0
now := time.Now()
for _, localFile := range localFiles {
// Only process deleted files
if localFile.State != file.StateDeleted {
continue
}
// Check if there are still physical files to clean up
if localFile.FilePath != "" || localFile.EncryptedFilePath != "" || localFile.ThumbnailPath != "" {
a.logger.Info("Cleaning up orphaned binary files for deleted record",
zap.String("file_id", localFile.ID))
a.cleanupLocalFile(localFile.ID)
binaryCleanedCount++
continue
}
// Check if metadata is past retention period and can be purged
if now.Sub(localFile.ModifiedAt) > deletedFileRetentionPeriod {
a.logger.Info("Purging deleted file metadata (past retention period)",
zap.String("file_id", localFile.ID),
zap.Time("deleted_at", localFile.ModifiedAt))
a.purgeDeletedFileMetadata(localFile.ID)
metadataPurgedCount++
}
}
if binaryCleanedCount > 0 || metadataPurgedCount > 0 {
a.logger.Info("Background cleanup completed",
zap.Int("binaries_cleaned", binaryCleanedCount),
zap.Int("metadata_purged", metadataPurgedCount))
} else {
a.logger.Debug("Background cleanup completed, no cleanup needed")
}
}

View file

@ -0,0 +1,880 @@
package app
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
sysRuntime "runtime"
"strings"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// =============================================================================
// FILE DOWNLOAD OPERATIONS
// =============================================================================
// tryDecodeBase64 attempts to decode a base64 string using multiple encoding variants.
// The web frontend uses URL-safe base64 without padding (libsodium default),
// while Go typically uses standard base64 with padding.
func tryDecodeBase64(s string) ([]byte, error) {
var lastErr error
// Try URL-safe base64 without padding FIRST (libsodium's URLSAFE_NO_PADDING)
// This is the format used by the web frontend
if data, err := base64.RawURLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 with padding (Go's default)
if data, err := base64.StdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try standard base64 without padding
if data, err := base64.RawStdEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
// Try URL-safe base64 with padding
if data, err := base64.URLEncoding.DecodeString(s); err == nil {
return data, nil
} else {
lastErr = err
}
return nil, fmt.Errorf("failed to decode base64 with any encoding variant (input length: %d, first 50 chars: %s, last error: %w)", len(s), truncateString(s, 50), lastErr)
}
// truncateString truncates a string to the specified length
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
// GetFileDownloadURL gets a presigned download URL for a file
func (a *Application) GetFileDownloadURL(fileID string) (string, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return "", err
}
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return "", fmt.Errorf("not authenticated: %w", err)
}
apiClient := a.authService.GetAPIClient()
// Make the HTTP GET request for download URL
// Note: Backend uses singular "file" not plural "files" in the path
req, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID+"/download-url", nil)
if err != nil {
a.logger.Error("Failed to create download URL request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
resp, err := a.httpClient.Do(req)
if err != nil {
a.logger.Error("Failed to send download URL request", zap.Error(err))
return "", fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to get download URL",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return "", fmt.Errorf("failed to get download URL: %s", string(body))
}
// Response structure matches backend's GetPresignedDownloadURLResponseDTO
var urlResp struct {
PresignedDownloadURL string `json:"presigned_download_url"`
DownloadURLExpirationTime string `json:"download_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&urlResp); err != nil {
a.logger.Error("Failed to decode download URL response", zap.Error(err))
return "", fmt.Errorf("failed to decode response: %w", err)
}
return urlResp.PresignedDownloadURL, nil
}
// DownloadFile downloads, decrypts, and saves a file to the user's chosen location.
// If the file already exists locally, it copies from local storage instead of re-downloading.
func (a *Application) DownloadFile(fileID string) (string, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return "", err
}
a.logger.Info("Starting file download", zap.String("file_id", fileID))
// First, check if file already exists locally
localFile, err := a.mustGetFileRepo().Get(fileID)
if err == nil && localFile != nil && localFile.FilePath != "" {
// Check if local file actually exists on disk
if _, statErr := os.Stat(localFile.FilePath); statErr == nil {
a.logger.Info("File exists locally, using local copy",
zap.String("file_id", fileID),
zap.String("local_path", localFile.FilePath))
// Open save dialog for user to choose location
savePath, dialogErr := runtime.SaveFileDialog(a.ctx, runtime.SaveDialogOptions{
Title: "Save File As",
DefaultFilename: localFile.Name,
})
if dialogErr != nil {
return "", fmt.Errorf("failed to open save dialog: %w", dialogErr)
}
// User cancelled the dialog
if savePath == "" {
a.logger.Info("User cancelled save dialog")
return "", nil
}
// Copy local file to chosen location
srcFile, copyErr := os.Open(localFile.FilePath)
if copyErr != nil {
return "", fmt.Errorf("failed to open local file: %w", copyErr)
}
defer srcFile.Close()
dstFile, copyErr := os.Create(savePath)
if copyErr != nil {
return "", fmt.Errorf("failed to create destination file: %w", copyErr)
}
defer dstFile.Close()
if _, copyErr := io.Copy(dstFile, srcFile); copyErr != nil {
return "", fmt.Errorf("failed to copy file: %w", copyErr)
}
a.logger.Info("File saved from local copy",
zap.String("file_id", fileID),
zap.String("save_path", savePath))
return savePath, nil
}
}
// File not available locally, download from cloud
a.logger.Info("File not available locally, downloading from cloud", zap.String("file_id", fileID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return "", fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return "", fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata
fileReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
a.logger.Error("Failed to create get file request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
fileReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
fileResp, err := a.httpClient.Do(fileReq)
if err != nil {
a.logger.Error("Failed to get file metadata", zap.Error(err))
return "", fmt.Errorf("failed to get file: %w", err)
}
defer fileResp.Body.Close()
if fileResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(fileResp.Body)
a.logger.Error("Failed to get file", zap.Int("status", fileResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to get file: %s", string(body))
}
var fileData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
}
if err := json.NewDecoder(fileResp.Body).Decode(&fileData); err != nil {
a.logger.Error("Failed to decode file response", zap.Error(err))
return "", fmt.Errorf("failed to decode response: %w", err)
}
// Step 2: Get collection to decrypt collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileData.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create get collection request", zap.Error(err))
return "", fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
a.logger.Error("Failed to get collection", zap.Error(err))
return "", fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
a.logger.Error("Failed to get collection", zap.Int("status", collResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return "", fmt.Errorf("failed to decode collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return "", fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return "", fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return "", fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return "", fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
a.logger.Info("Decrypting file key",
zap.Int("nonce_size", len(fileKeyNonce)),
zap.Int("ciphertext_size", len(actualFileKeyCiphertext)),
zap.Int("collection_key_size", len(collectionKey)))
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
a.logger.Error("Failed to decrypt file key", zap.Error(err))
return "", fmt.Errorf("failed to decrypt file key: %w", err)
}
a.logger.Info("File key decrypted successfully", zap.Int("file_key_size", len(fileKey)))
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle URL-safe base64 without padding (libsodium format)
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return "", fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return "", fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return "", fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return "", fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL
downloadURL, err := a.GetFileDownloadURL(fileID)
if err != nil {
return "", fmt.Errorf("failed to get download URL: %w", err)
}
// Step 6.5: Validate download URL before use (SSRF protection)
if err := inputvalidation.ValidateDownloadURL(downloadURL); err != nil {
a.logger.Error("Download URL validation failed",
zap.String("file_id", fileID),
zap.Error(err))
return "", fmt.Errorf("download URL validation failed: %w", err)
}
// Step 7: Download encrypted file from S3 (use large download client - no timeout for big files)
a.logger.Info("Downloading encrypted file from S3", zap.String("filename", metadata.Filename))
downloadResp, err := a.httpClient.GetLargeDownload(downloadURL)
if err != nil {
a.logger.Error("Failed to download file from S3", zap.Error(err))
return "", fmt.Errorf("failed to download file: %w", err)
}
defer downloadResp.Body.Close()
if downloadResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(downloadResp.Body)
a.logger.Error("S3 download failed", zap.Int("status", downloadResp.StatusCode), zap.String("body", string(body)))
return "", fmt.Errorf("failed to download file from storage: status %d", downloadResp.StatusCode)
}
encryptedContent, err := io.ReadAll(downloadResp.Body)
if err != nil {
a.logger.Error("Failed to read encrypted content", zap.Error(err))
return "", fmt.Errorf("failed to read file content: %w", err)
}
a.logger.Info("Downloaded encrypted file", zap.Int("encrypted_size", len(encryptedContent)))
// Step 8: Decrypt file content
a.logger.Info("Decrypting file content",
zap.Int("encrypted_size", len(encryptedContent)),
zap.Int("file_key_size", len(fileKey)),
zap.Int("first_bytes_of_content", int(encryptedContent[0])))
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
a.logger.Error("Failed to decrypt file content",
zap.Error(err),
zap.Int("encrypted_size", len(encryptedContent)),
zap.Int("file_key_size", len(fileKey)))
return "", fmt.Errorf("failed to decrypt file: %w", err)
}
a.logger.Info("File content decrypted successfully",
zap.Int("decrypted_size", len(decryptedContent)))
a.logger.Info("Decrypted file", zap.Int("decrypted_size", len(decryptedContent)))
// Step 9: Open save dialog for user to choose location
savePath, err := runtime.SaveFileDialog(a.ctx, runtime.SaveDialogOptions{
Title: "Save File As",
DefaultFilename: metadata.Filename,
})
if err != nil {
a.logger.Error("Failed to open save dialog", zap.Error(err))
return "", fmt.Errorf("failed to open save dialog: %w", err)
}
// User cancelled the dialog
if savePath == "" {
a.logger.Info("User cancelled save dialog")
return "", nil
}
// Step 10: Write decrypted content to file (0600 = owner read/write only for security)
if err := os.WriteFile(savePath, decryptedContent, 0600); err != nil {
a.logger.Error("Failed to write file", zap.Error(err), zap.String("path", savePath))
return "", fmt.Errorf("failed to save file: %w", err)
}
a.logger.Info("File downloaded and decrypted successfully",
zap.String("file_id", fileID),
zap.String("filename", metadata.Filename),
zap.String("save_path", savePath),
zap.Int("size", len(decryptedContent)))
return savePath, nil
}
// OnloadFileResult represents the result of onloading a file for offline access
type OnloadFileResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
LocalFilePath string `json:"local_file_path"`
Size int64 `json:"size"`
Success bool `json:"success"`
Message string `json:"message"`
}
// OnloadFile downloads and stores a file locally for offline access (no save dialog)
func (a *Application) OnloadFile(fileID string) (*OnloadFileResult, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
a.logger.Info("Onloading file for offline access", zap.String("file_id", fileID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from cache
email := session.Email
masterKey, cleanupMasterKey, err := a.keyCache.GetMasterKey(email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return nil, fmt.Errorf("encryption key not available: %w", err)
}
defer cleanupMasterKey()
apiClient := a.authService.GetAPIClient()
// Step 1: Get file metadata
fileReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/file/"+fileID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
fileReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
fileResp, err := a.httpClient.Do(fileReq)
if err != nil {
return nil, fmt.Errorf("failed to get file: %w", err)
}
defer fileResp.Body.Close()
if fileResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(fileResp.Body)
return nil, fmt.Errorf("failed to get file: %s", string(body))
}
var fileData struct {
ID string `json:"id"`
CollectionID string `json:"collection_id"`
EncryptedMetadata string `json:"encrypted_metadata"`
FileNonce string `json:"file_nonce"`
EncryptedSizeInBytes int64 `json:"encrypted_file_size_in_bytes"`
EncryptedFileKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_file_key"`
}
if err := json.NewDecoder(fileResp.Body).Decode(&fileData); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
// Step 2: Get collection to decrypt collection key
collReq, err := http.NewRequestWithContext(a.ctx, "GET", apiClient.GetBaseURL()+"/api/v1/collections/"+fileData.CollectionID, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
collReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collResp, err := a.httpClient.Do(collReq)
if err != nil {
return nil, fmt.Errorf("failed to get collection: %w", err)
}
defer collResp.Body.Close()
if collResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collResp.Body)
return nil, fmt.Errorf("failed to get collection: %s", string(body))
}
var collData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collResp.Body).Decode(&collData); err != nil {
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
// Step 3: Decrypt collection key with master key
// Use tryDecodeBase64 to handle multiple base64 encoding formats
collKeyNonce, err := tryDecodeBase64(collData.EncryptedCollectionKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
collKeyCiphertext, err := tryDecodeBase64(collData.EncryptedCollectionKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollKeyCiphertext := extractActualCiphertext(collKeyCiphertext, collKeyNonce)
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollKeyCiphertext,
Nonce: collKeyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
// Step 4: Decrypt file key with collection key
// NOTE: The web frontend may send combined ciphertext (nonce + encrypted_data)
// or separate fields. We handle both formats.
// Use tryDecodeBase64 to handle multiple base64 encoding formats
fileKeyNonce, err := tryDecodeBase64(fileData.EncryptedFileKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode file key nonce: %w", err)
}
fileKeyCiphertext, err := tryDecodeBase64(fileData.EncryptedFileKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode file key ciphertext: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualFileKeyCiphertext := extractActualCiphertext(fileKeyCiphertext, fileKeyNonce)
fileKey, err := e2ee.DecryptFileKey(&e2ee.EncryptedKey{
Ciphertext: actualFileKeyCiphertext,
Nonce: fileKeyNonce,
}, collectionKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt file key: %w", err)
}
// Step 5: Decrypt metadata to get filename
// Use tryDecodeBase64 to handle URL-safe base64 without padding (libsodium format)
encryptedMetadataBytes, err := tryDecodeBase64(fileData.EncryptedMetadata)
if err != nil {
return nil, fmt.Errorf("failed to decode metadata: %w", err)
}
metadataNonce, metadataCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMetadataBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
decryptedMetadata, err := e2ee.DecryptWithAlgorithm(metadataCiphertext, metadataNonce, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt metadata: %w", err)
}
var metadata struct {
Filename string `json:"name"`
MimeType string `json:"mime_type"`
}
if err := json.Unmarshal(decryptedMetadata, &metadata); err != nil {
return nil, fmt.Errorf("failed to parse metadata: %w", err)
}
// Step 6: Get presigned download URL
downloadURL, err := a.GetFileDownloadURL(fileID)
if err != nil {
return nil, fmt.Errorf("failed to get download URL: %w", err)
}
// Step 6.5: Validate download URL before use (SSRF protection)
if err := inputvalidation.ValidateDownloadURL(downloadURL); err != nil {
a.logger.Error("Download URL validation failed",
zap.String("file_id", fileID),
zap.Error(err))
return nil, fmt.Errorf("download URL validation failed: %w", err)
}
// Step 7: Download encrypted file from S3 (use large download client - no timeout for big files)
a.logger.Info("Downloading encrypted file from S3", zap.String("filename", metadata.Filename))
downloadResp, err := a.httpClient.GetLargeDownload(downloadURL)
if err != nil {
return nil, fmt.Errorf("failed to download file: %w", err)
}
defer downloadResp.Body.Close()
if downloadResp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to download file from storage: status %d", downloadResp.StatusCode)
}
encryptedContent, err := io.ReadAll(downloadResp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read file content: %w", err)
}
// Step 8: Decrypt file content
decryptedContent, err := e2ee.DecryptFile(encryptedContent, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt file: %w", err)
}
// Step 9: Create local storage directory structure
dataDir, err := a.config.GetAppDataDirPath(a.ctx)
if err != nil {
return nil, fmt.Errorf("failed to get data directory: %w", err)
}
// Create files directory: <data_dir>/files/<collection_id>/
filesDir := filepath.Join(dataDir, "files", fileData.CollectionID)
if err := os.MkdirAll(filesDir, 0700); err != nil {
return nil, fmt.Errorf("failed to create files directory: %w", err)
}
// Save decrypted file
localFilePath := filepath.Join(filesDir, metadata.Filename)
if err := os.WriteFile(localFilePath, decryptedContent, 0600); err != nil {
return nil, fmt.Errorf("failed to save file: %w", err)
}
// Step 10: Update local file repository with sync status
localFile := &file.File{
ID: fileID,
CollectionID: fileData.CollectionID,
Name: metadata.Filename,
MimeType: metadata.MimeType,
FilePath: localFilePath,
DecryptedSizeInBytes: int64(len(decryptedContent)),
EncryptedSizeInBytes: fileData.EncryptedSizeInBytes,
SyncStatus: file.SyncStatusSynced,
EncryptedFileKey: file.EncryptedFileKeyData{
Ciphertext: fileData.EncryptedFileKey.Ciphertext,
Nonce: fileData.EncryptedFileKey.Nonce,
},
EncryptedMetadata: fileData.EncryptedMetadata,
FileNonce: fileData.FileNonce,
}
// Check if file already exists in local repo
existingFile, _ := a.mustGetFileRepo().Get(fileID)
if existingFile != nil {
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Warn("Failed to update local file record", zap.Error(err))
}
} else {
if err := a.mustGetFileRepo().Create(localFile); err != nil {
a.logger.Warn("Failed to create local file record", zap.Error(err))
}
}
a.logger.Info("File onloaded successfully",
zap.String("file_id", fileID),
zap.String("filename", metadata.Filename),
zap.String("local_path", localFilePath),
zap.Int("size", len(decryptedContent)))
return &OnloadFileResult{
FileID: fileID,
Filename: metadata.Filename,
LocalFilePath: localFilePath,
Size: int64(len(decryptedContent)),
Success: true,
Message: "File downloaded for offline access",
}, nil
}
// OffloadFile removes the local copy of a file while keeping it in the cloud
func (a *Application) OffloadFile(fileID string) error {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
a.logger.Info("Offloading file to cloud-only", zap.String("file_id", fileID))
// Get the file from local repository
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil {
a.logger.Error("Failed to get file from local repo", zap.Error(err))
return fmt.Errorf("file not found locally: %w", err)
}
if localFile == nil {
return fmt.Errorf("file not found in local storage")
}
if !localFile.HasLocalContent() {
a.logger.Info("File already cloud-only, nothing to offload")
return nil
}
// Delete the local file from disk
if localFile.FilePath != "" {
if err := os.Remove(localFile.FilePath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete local file", zap.Error(err), zap.String("path", localFile.FilePath))
// Continue anyway - we'll update the metadata
} else {
a.logger.Info("Deleted local file", zap.String("path", localFile.FilePath))
}
}
// Delete encrypted file if it exists
if localFile.EncryptedFilePath != "" {
if err := os.Remove(localFile.EncryptedFilePath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete encrypted file", zap.Error(err), zap.String("path", localFile.EncryptedFilePath))
}
}
// Delete thumbnail if it exists
if localFile.ThumbnailPath != "" {
if err := os.Remove(localFile.ThumbnailPath); err != nil && !os.IsNotExist(err) {
a.logger.Warn("Failed to delete thumbnail", zap.Error(err), zap.String("path", localFile.ThumbnailPath))
}
}
// Update the local file record to cloud-only status
localFile.FilePath = ""
localFile.EncryptedFilePath = ""
localFile.ThumbnailPath = ""
localFile.SyncStatus = file.SyncStatusCloudOnly
if err := a.mustGetFileRepo().Update(localFile); err != nil {
a.logger.Error("Failed to update file record", zap.Error(err))
return fmt.Errorf("failed to update file record: %w", err)
}
a.logger.Info("File offloaded successfully",
zap.String("file_id", fileID),
zap.String("filename", localFile.Name))
return nil
}
// OpenFile opens a locally stored file with the system's default application
func (a *Application) OpenFile(fileID string) error {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
a.logger.Info("Opening file", zap.String("file_id", fileID))
// Get the file from local repository
localFile, err := a.mustGetFileRepo().Get(fileID)
if err != nil {
a.logger.Error("Failed to get file from local repo", zap.Error(err))
return fmt.Errorf("file not found locally: %w", err)
}
if localFile == nil {
return fmt.Errorf("file not found in local storage")
}
if localFile.FilePath == "" {
return fmt.Errorf("file has not been downloaded for offline access")
}
// Security: Validate file path is within expected application data directory
appDataDir, err := a.config.GetAppDataDirPath(a.ctx)
if err != nil {
a.logger.Error("Failed to get app data directory", zap.Error(err))
return fmt.Errorf("failed to validate file path: %w", err)
}
if err := validatePathWithinDirectory(localFile.FilePath, appDataDir); err != nil {
a.logger.Error("File path validation failed",
zap.String("file_path", localFile.FilePath),
zap.String("expected_dir", appDataDir),
zap.Error(err))
return fmt.Errorf("invalid file path: %w", err)
}
// Check if file exists on disk
if _, err := os.Stat(localFile.FilePath); os.IsNotExist(err) {
return fmt.Errorf("file no longer exists at %s", localFile.FilePath)
}
// Open the file with the system's default application
var cmd *exec.Cmd
switch sysRuntime.GOOS {
case "darwin":
cmd = exec.Command("open", localFile.FilePath)
case "windows":
cmd = exec.Command("cmd", "/c", "start", "", localFile.FilePath)
case "linux":
cmd = exec.Command("xdg-open", localFile.FilePath)
default:
return fmt.Errorf("unsupported operating system: %s", sysRuntime.GOOS)
}
if err := cmd.Start(); err != nil {
a.logger.Error("Failed to open file", zap.Error(err), zap.String("path", localFile.FilePath))
return fmt.Errorf("failed to open file: %w", err)
}
a.logger.Info("File opened successfully",
zap.String("file_id", fileID),
zap.String("path", localFile.FilePath))
return nil
}
// validatePathWithinDirectory checks that a file path is within the expected directory.
// This is a defense-in-depth measure to prevent path traversal attacks.
func validatePathWithinDirectory(filePath, expectedDir string) error {
// Get absolute paths to handle any relative path components
absFilePath, err := filepath.Abs(filePath)
if err != nil {
return fmt.Errorf("failed to resolve file path: %w", err)
}
absExpectedDir, err := filepath.Abs(expectedDir)
if err != nil {
return fmt.Errorf("failed to resolve expected directory: %w", err)
}
// Clean paths to remove any . or .. components
absFilePath = filepath.Clean(absFilePath)
absExpectedDir = filepath.Clean(absExpectedDir)
// Ensure the expected directory ends with a separator to prevent partial matches
// e.g., /app/data should not match /app/data-other/file
if !strings.HasSuffix(absExpectedDir, string(filepath.Separator)) {
absExpectedDir = absExpectedDir + string(filepath.Separator)
}
// Check if the file path starts with the expected directory
if !strings.HasPrefix(absFilePath, absExpectedDir) && absFilePath != strings.TrimSuffix(absExpectedDir, string(filepath.Separator)) {
return fmt.Errorf("path is outside application data directory")
}
return nil
}

View file

@ -0,0 +1,401 @@
package app
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"github.com/google/uuid"
"github.com/wailsapp/wails/v2/pkg/runtime"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
)
// =============================================================================
// FILE UPLOAD OPERATIONS
// =============================================================================
// SelectFile opens a native file dialog and returns the selected file path
func (a *Application) SelectFile() (string, error) {
selection, err := runtime.OpenFileDialog(a.ctx, runtime.OpenDialogOptions{
Title: "Select File to Upload",
Filters: []runtime.FileFilter{
{DisplayName: "All Files", Pattern: "*.*"},
{DisplayName: "Images", Pattern: "*.jpg;*.jpeg;*.png;*.gif;*.webp;*.bmp"},
{DisplayName: "Documents", Pattern: "*.pdf;*.doc;*.docx;*.txt;*.md"},
{DisplayName: "Videos", Pattern: "*.mp4;*.mov;*.avi;*.mkv;*.webm"},
},
})
if err != nil {
a.logger.Error("Failed to open file dialog", zap.Error(err))
return "", fmt.Errorf("failed to open file dialog: %w", err)
}
return selection, nil
}
// FileUploadInput represents the input for uploading a file
type FileUploadInput struct {
FilePath string `json:"file_path"`
CollectionID string `json:"collection_id"`
TagIDs []string `json:"tag_ids,omitempty"` // Tag IDs to assign to this file
}
// FileUploadResult represents the result of a file upload
type FileUploadResult struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
Size int64 `json:"size"`
Success bool `json:"success"`
Message string `json:"message"`
}
// UploadFile encrypts and uploads a file to a collection
func (a *Application) UploadFile(input FileUploadInput) (*FileUploadResult, error) {
a.logger.Info("Starting file upload",
zap.String("file_path", input.FilePath),
zap.String("collection_id", input.CollectionID))
// Get current session for authentication
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get current session", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get master key from key cache
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key from cache", zap.Error(err))
return nil, fmt.Errorf("master key not available - please log in again: %w", err)
}
defer cleanup()
apiClient := a.authService.GetAPIClient()
// Step 1: Read the file from disk
fileContent, err := os.ReadFile(input.FilePath)
if err != nil {
a.logger.Error("Failed to read file", zap.Error(err))
return nil, fmt.Errorf("failed to read file: %w", err)
}
filename := filepath.Base(input.FilePath)
fileSize := int64(len(fileContent))
mimeType := http.DetectContentType(fileContent)
a.logger.Info("File read successfully",
zap.String("filename", filename),
zap.Int64("size", fileSize),
zap.String("mime_type", mimeType))
// Step 2: Get collection key (need to fetch collection first)
a.logger.Info("Step 2: Fetching collection for upload",
zap.String("collection_id", input.CollectionID),
zap.String("api_url", apiClient.GetBaseURL()+"/api/v1/collections/"+input.CollectionID))
collectionReq, err := http.NewRequestWithContext(a.ctx, "GET",
apiClient.GetBaseURL()+"/api/v1/collections/"+input.CollectionID, nil)
if err != nil {
a.logger.Error("Failed to create collection request", zap.Error(err))
return nil, fmt.Errorf("failed to create collection request: %w", err)
}
collectionReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
collectionResp, err := a.httpClient.Do(collectionReq)
if err != nil {
a.logger.Error("Failed to fetch collection", zap.Error(err))
return nil, fmt.Errorf("failed to fetch collection: %w", err)
}
defer collectionResp.Body.Close()
a.logger.Info("Step 2a: Collection fetch response", zap.Int("status", collectionResp.StatusCode))
if collectionResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(collectionResp.Body)
a.logger.Error("Failed to fetch collection - bad status",
zap.Int("status", collectionResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to fetch collection: %s", string(body))
}
var collectionData struct {
EncryptedCollectionKey struct {
Ciphertext string `json:"ciphertext"`
Nonce string `json:"nonce"`
} `json:"encrypted_collection_key"`
}
if err := json.NewDecoder(collectionResp.Body).Decode(&collectionData); err != nil {
a.logger.Error("Failed to decode collection response", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection: %w", err)
}
a.logger.Info("Step 2b: Collection data decoded",
zap.Int("ciphertext_len", len(collectionData.EncryptedCollectionKey.Ciphertext)),
zap.Int("nonce_len", len(collectionData.EncryptedCollectionKey.Nonce)))
// Decrypt collection key
collectionKeyCiphertext, err := base64.StdEncoding.DecodeString(collectionData.EncryptedCollectionKey.Ciphertext)
if err != nil {
a.logger.Error("Failed to decode collection key ciphertext", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection key ciphertext: %w", err)
}
collectionKeyNonce, err := base64.StdEncoding.DecodeString(collectionData.EncryptedCollectionKey.Nonce)
if err != nil {
a.logger.Error("Failed to decode collection key nonce", zap.Error(err))
return nil, fmt.Errorf("failed to decode collection key nonce: %w", err)
}
// Handle web frontend combined ciphertext format (nonce + encrypted_data)
actualCollectionKeyCiphertext := extractActualCiphertext(collectionKeyCiphertext, collectionKeyNonce)
a.logger.Info("Step 2c: Decrypting collection key",
zap.Int("ciphertext_bytes", len(actualCollectionKeyCiphertext)),
zap.Int("nonce_bytes", len(collectionKeyNonce)),
zap.Int("master_key_bytes", len(masterKey)))
collectionKey, err := e2ee.DecryptCollectionKey(&e2ee.EncryptedKey{
Ciphertext: actualCollectionKeyCiphertext,
Nonce: collectionKeyNonce,
}, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt collection key", zap.Error(err))
return nil, fmt.Errorf("failed to decrypt collection key: %w", err)
}
a.logger.Info("Collection key decrypted successfully", zap.Int("key_length", len(collectionKey)))
// Step 3: Generate a new file key
fileKey, err := e2ee.GenerateFileKey()
if err != nil {
return nil, fmt.Errorf("failed to generate file key: %w", err)
}
// Step 4: Encrypt file content using SecretBox (XSalsa20-Poly1305) for web frontend compatibility
encryptedContent, err := e2ee.EncryptFileSecretBox(fileContent, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt file: %w", err)
}
// Step 5: Encrypt metadata using SecretBox (XSalsa20-Poly1305) for web frontend compatibility
metadata := &e2ee.FileMetadata{
Name: filename,
MimeType: mimeType,
Size: fileSize,
}
encryptedMetadata, err := e2ee.EncryptMetadataSecretBox(metadata, fileKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt metadata: %w", err)
}
// Step 6: Encrypt file key with collection key using SecretBox for web frontend compatibility
encryptedFileKey, err := e2ee.EncryptFileKeySecretBox(fileKey, collectionKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt file key: %w", err)
}
// Step 7: Compute encrypted hash
hash := sha256.Sum256(encryptedContent)
encryptedHash := base64.StdEncoding.EncodeToString(hash[:])
// Step 8: Generate client-side file ID
fileID := uuid.New().String()
// Step 9: Create pending file request
// NOTE: The web frontend sends ciphertext and nonce as SEPARATE fields (not combined).
// The ciphertext field contains only the encrypted data (from crypto_secretbox_easy),
// and the nonce field contains the nonce separately.
pendingFileReq := map[string]interface{}{
"id": fileID,
"collection_id": input.CollectionID,
"encrypted_metadata": encryptedMetadata,
"encrypted_file_key": map[string]string{
"ciphertext": base64.StdEncoding.EncodeToString(encryptedFileKey.Ciphertext),
"nonce": base64.StdEncoding.EncodeToString(encryptedFileKey.Nonce),
},
"encryption_version": "xsalsa20-poly1305-v1",
"encrypted_hash": encryptedHash,
"expected_file_size_in_bytes": int64(len(encryptedContent)),
"content_type": mimeType,
}
// Add tag IDs if provided
if len(input.TagIDs) > 0 {
pendingFileReq["tag_ids"] = input.TagIDs
a.logger.Info("Adding tags to file upload",
zap.Int("tag_count", len(input.TagIDs)))
}
pendingBody, err := json.Marshal(pendingFileReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal pending file request: %w", err)
}
req, err := http.NewRequestWithContext(a.ctx, "POST",
apiClient.GetBaseURL()+"/api/v1/files/pending",
bytes.NewReader(pendingBody))
if err != nil {
return nil, fmt.Errorf("failed to create pending file request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+session.AccessToken)
req.Header.Set("Content-Type", "application/json")
resp, err := a.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to create pending file: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
a.logger.Error("Failed to create pending file",
zap.Int("status", resp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to create pending file: %s", string(body))
}
var pendingResp struct {
File struct {
ID string `json:"id"`
} `json:"file"`
PresignedUploadURL string `json:"presigned_upload_url"`
UploadURLExpirationTime string `json:"upload_url_expiration_time"`
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&pendingResp); err != nil {
return nil, fmt.Errorf("failed to decode pending file response: %w", err)
}
if !pendingResp.Success {
return nil, fmt.Errorf("failed to create pending file: %s", pendingResp.Message)
}
a.logger.Info("Pending file created, uploading to S3",
zap.String("file_id", pendingResp.File.ID),
zap.String("presigned_url", pendingResp.PresignedUploadURL[:50]+"..."))
// Step 10: Upload encrypted content to S3
uploadReq, err := http.NewRequestWithContext(a.ctx, "PUT",
pendingResp.PresignedUploadURL,
bytes.NewReader(encryptedContent))
if err != nil {
return nil, fmt.Errorf("failed to create upload request: %w", err)
}
uploadReq.Header.Set("Content-Type", "application/octet-stream")
uploadReq.ContentLength = int64(len(encryptedContent))
uploadResp, err := a.httpClient.DoLargeDownload(uploadReq)
if err != nil {
return nil, fmt.Errorf("failed to upload to S3: %w", err)
}
defer uploadResp.Body.Close()
if uploadResp.StatusCode != http.StatusOK && uploadResp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(uploadResp.Body)
a.logger.Error("Failed to upload to S3",
zap.Int("status", uploadResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to upload to S3: status %d", uploadResp.StatusCode)
}
a.logger.Info("File uploaded to S3, completing upload")
// Step 11: Complete the upload
completeReq := map[string]interface{}{
"actual_file_size_in_bytes": int64(len(encryptedContent)),
"upload_confirmed": true,
}
completeBody, err := json.Marshal(completeReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal complete request: %w", err)
}
completeHTTPReq, err := http.NewRequestWithContext(a.ctx, "POST",
apiClient.GetBaseURL()+"/api/v1/file/"+pendingResp.File.ID+"/complete",
bytes.NewReader(completeBody))
if err != nil {
return nil, fmt.Errorf("failed to create complete request: %w", err)
}
completeHTTPReq.Header.Set("Authorization", "Bearer "+session.AccessToken)
completeHTTPReq.Header.Set("Content-Type", "application/json")
completeResp, err := a.httpClient.Do(completeHTTPReq)
if err != nil {
return nil, fmt.Errorf("failed to complete upload: %w", err)
}
defer completeResp.Body.Close()
if completeResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(completeResp.Body)
a.logger.Error("Failed to complete upload",
zap.Int("status", completeResp.StatusCode),
zap.String("body", string(body)))
return nil, fmt.Errorf("failed to complete upload: %s", string(body))
}
var completeRespData struct {
Success bool `json:"success"`
Message string `json:"message"`
}
if err := json.NewDecoder(completeResp.Body).Decode(&completeRespData); err != nil {
return nil, fmt.Errorf("failed to decode complete response: %w", err)
}
// Save file metadata to local repository so it appears in dashboard and file list
localFile := &file.File{
ID: pendingResp.File.ID,
CollectionID: input.CollectionID,
OwnerID: session.UserID,
Name: filename,
MimeType: mimeType,
DecryptedSizeInBytes: fileSize,
EncryptedSizeInBytes: int64(len(encryptedContent)),
FilePath: input.FilePath, // Original file path
SyncStatus: file.SyncStatusSynced,
State: file.StateActive,
CreatedAt: time.Now(),
ModifiedAt: time.Now(),
LastSyncedAt: time.Now(),
}
if err := a.mustGetFileRepo().Create(localFile); err != nil {
// Log but don't fail - the upload succeeded, just local tracking failed
a.logger.Warn("Failed to save file to local repository",
zap.String("file_id", pendingResp.File.ID),
zap.Error(err))
} else {
a.logger.Info("File saved to local repository",
zap.String("file_id", pendingResp.File.ID),
zap.String("filename", filename))
// Index the file in the search index
if err := a.indexFileForSearch(pendingResp.File.ID, input.CollectionID, filename, input.TagIDs, fileSize); err != nil {
a.logger.Warn("Failed to index file in search",
zap.String("file_id", pendingResp.File.ID),
zap.Error(err))
}
}
a.logger.Info("File upload completed successfully",
zap.String("file_id", pendingResp.File.ID),
zap.String("filename", filename),
zap.Int64("size", fileSize))
return &FileUploadResult{
FileID: pendingResp.File.ID,
Filename: filename,
Size: fileSize,
Success: true,
Message: "File uploaded successfully",
}, nil
}

View file

@ -0,0 +1,225 @@
package app
import (
"encoding/base64"
"fmt"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// VerifyPassword verifies a password against stored encrypted data
func (a *Application) VerifyPassword(password string) (bool, error) {
// Get current session with encrypted data
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false, fmt.Errorf("no active session")
}
// Check if we have the encrypted data needed for verification
if session.Salt == "" || session.EncryptedMasterKey == "" {
return false, fmt.Errorf("session missing encrypted data for password verification")
}
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(session.Salt)
if err != nil {
a.logger.Error("Failed to decode salt", zap.Error(err))
return false, fmt.Errorf("invalid salt encoding")
}
encryptedMasterKeyBytes, err := base64.StdEncoding.DecodeString(session.EncryptedMasterKey)
if err != nil {
a.logger.Error("Failed to decode encrypted master key", zap.Error(err))
return false, fmt.Errorf("invalid master key encoding")
}
// Determine which KDF algorithm to use
kdfAlgorithm := session.KDFAlgorithm
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
// Try to derive KEK and decrypt master key using SecureKeyChain
// If decryption succeeds, password is correct
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
a.logger.Debug("Password verification failed - could not derive key", zap.String("email", utils.MaskEmail(session.Email)))
return false, nil // Password is incorrect, but not an error condition
}
defer keychain.Clear()
// Split nonce and ciphertext from encrypted master key
// Use auto-detection to handle both ChaCha20 (12-byte nonce) and XSalsa20 (24-byte nonce)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextAuto(encryptedMasterKeyBytes)
if err != nil {
a.logger.Error("Failed to split encrypted master key", zap.Error(err))
return false, fmt.Errorf("invalid master key format")
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
// Try to decrypt the master key into protected memory
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
a.logger.Debug("Password verification failed - incorrect password", zap.String("email", utils.MaskEmail(session.Email)))
return false, nil // Password is incorrect, but not an error condition
}
// Copy master key bytes before destroying the buffer
// We'll cache it after verification succeeds
masterKeyBytes := make([]byte, masterKey.Size())
copy(masterKeyBytes, masterKey.Bytes())
masterKey.Destroy()
// Cache the master key for the session (already decrypted, no need to re-derive)
if err := a.keyCache.StoreMasterKey(session.Email, masterKeyBytes); err != nil {
a.logger.Warn("Failed to cache master key during password verification", zap.Error(err))
// Don't fail verification if caching fails
} else {
a.logger.Info("Master key cached successfully during password verification", zap.String("email", utils.MaskEmail(session.Email)))
}
a.logger.Info("Password verified successfully", zap.String("email", utils.MaskEmail(session.Email)))
return true, nil
}
// StorePasswordForSession stores password for current session (used by PasswordPrompt)
func (a *Application) StorePasswordForSession(password string) error {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session")
}
if err := a.passwordStore.StorePassword(session.Email, password); err != nil {
a.logger.Error("Failed to store password for session", zap.String("email", utils.MaskEmail(session.Email)), zap.Error(err))
return err
}
a.logger.Info("Password re-stored in secure RAM after app restart", zap.String("email", utils.MaskEmail(session.Email)))
// Note: Master key caching is now handled in VerifyPassword()
// to avoid running PBKDF2 twice. The password verification step
// already derives KEK and decrypts the master key, so we cache it there.
// This eliminates redundant key derivation delay.
return nil
}
// GetStoredPassword retrieves the stored password for current session
func (a *Application) GetStoredPassword() (string, error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return "", fmt.Errorf("no active session")
}
return a.passwordStore.GetPassword(session.Email)
}
// HasStoredPassword checks if password is stored for current session
func (a *Application) HasStoredPassword() bool {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false
}
return a.passwordStore.HasPassword(session.Email)
}
// ClearStoredPassword clears the stored password (optional, for security-sensitive operations)
func (a *Application) ClearStoredPassword() error {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session")
}
return a.passwordStore.ClearPassword(session.Email)
}
// cacheMasterKeyFromPassword decrypts and caches the master key for the session
// This is an internal helper method used by CompleteLogin and StorePasswordForSession
func (a *Application) cacheMasterKeyFromPassword(email, password, saltBase64, encryptedMasterKeyBase64, kdfAlgorithm string) error {
// Default to PBKDF2-SHA256
if kdfAlgorithm == "" {
kdfAlgorithm = e2ee.PBKDF2Algorithm
}
// Decode base64 inputs
salt, err := base64.StdEncoding.DecodeString(saltBase64)
if err != nil {
return fmt.Errorf("invalid salt encoding: %w", err)
}
encryptedMasterKeyBytes, err := base64.StdEncoding.DecodeString(encryptedMasterKeyBase64)
if err != nil {
return fmt.Errorf("invalid master key encoding: %w", err)
}
// Create secure keychain to derive KEK using the correct KDF algorithm
keychain, err := e2ee.NewSecureKeyChainWithAlgorithm(password, salt, kdfAlgorithm)
if err != nil {
return fmt.Errorf("failed to derive KEK: %w", err)
}
defer keychain.Clear()
// Split nonce and ciphertext using 24-byte nonce (XSalsa20 secretbox format from web frontend)
masterKeyNonce, masterKeyCiphertext, err := e2ee.SplitNonceAndCiphertextSecretBox(encryptedMasterKeyBytes)
if err != nil {
return fmt.Errorf("invalid master key format: %w", err)
}
encryptedMasterKeyStruct := &e2ee.EncryptedKey{
Ciphertext: masterKeyCiphertext,
Nonce: masterKeyNonce,
}
// Decrypt master key into secure buffer (auto-detects cipher based on nonce size)
masterKey, err := keychain.DecryptMasterKeySecure(encryptedMasterKeyStruct)
if err != nil {
return fmt.Errorf("failed to decrypt master key: %w", err)
}
// CRITICAL: Copy bytes BEFORE destroying the buffer to avoid SIGBUS fault
// masterKey.Bytes() returns a pointer to LockedBuffer memory which becomes
// invalid after Destroy() is called
masterKeyBytes := make([]byte, masterKey.Size())
copy(masterKeyBytes, masterKey.Bytes())
// Now safely destroy the secure buffer
masterKey.Destroy()
// Store the copied bytes in cache
if err := a.keyCache.StoreMasterKey(email, masterKeyBytes); err != nil {
return fmt.Errorf("failed to cache master key: %w", err)
}
a.logger.Info("Master key cached successfully for session", zap.String("email", utils.MaskEmail(email)))
return nil
}
// GetCachedMasterKey retrieves the cached master key for the current session
// This is exported and can be called from frontend for file operations
// Returns the master key bytes and a cleanup function that MUST be called when done
func (a *Application) GetCachedMasterKey() ([]byte, func(), error) {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, nil, fmt.Errorf("no active session")
}
return a.keyCache.GetMasterKey(session.Email)
}
// HasCachedMasterKey checks if a master key is cached for the current session
func (a *Application) HasCachedMasterKey() bool {
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return false
}
return a.keyCache.HasMasterKey(session.Email)
}

View file

@ -0,0 +1,324 @@
// app_search.go contains the search-related application layer code.
//
// This file provides:
// - Search index initialization and rebuild logic
// - Wails bindings for frontend search functionality
// - File and collection indexing helpers
//
// The search feature uses Bleve for local full-text search. Each user has their
// own isolated search index stored in their local application data directory.
// Search results are deduplicated by filename to avoid showing the same file
// multiple times when it exists in multiple collections.
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
)
// =============================================================================
// SEARCH INDEXING OPERATIONS
// These functions are called internally when files/collections are created,
// updated, or when the index needs to be rebuilt.
// =============================================================================
// indexFileForSearch indexes a single file in the search index.
// This is called when a new file is uploaded to add it to the search index immediately.
func (a *Application) indexFileForSearch(fileID, collectionID, filename string, tags []string, size int64) error {
// Get collection name for denormalization
collectionName := ""
collectionRepo := a.getCollectionRepo()
if collectionRepo != nil {
collection, err := collectionRepo.Get(collectionID)
if err == nil && collection != nil {
collectionName = collection.Name
}
}
// Create file document for search
fileDoc := &search.FileDocument{
ID: fileID,
Filename: filename,
Description: "", // No description field in current implementation
CollectionID: collectionID,
CollectionName: collectionName,
Tags: tags,
Size: size,
CreatedAt: time.Now(),
Type: "file",
}
return a.searchService.IndexFile(fileDoc)
}
// indexCollectionForSearch indexes a collection in the search index
func (a *Application) indexCollectionForSearch(collectionID, name string, tags []string, fileCount int) error {
// Create collection document for search
collectionDoc := &search.CollectionDocument{
ID: collectionID,
Name: name,
Description: "", // No description field in current implementation
Tags: tags,
FileCount: fileCount,
CreatedAt: time.Now(),
Type: "collection",
}
return a.searchService.IndexCollection(collectionDoc)
}
// InitializeSearchIndex initializes the search index for the current user.
// This can be called manually if the index needs to be initialized.
func (a *Application) InitializeSearchIndex() error {
a.logger.Info("Manually initializing search index")
// Get current session to get user email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session found")
}
// Initialize the search service
if err := a.searchService.Initialize(a.ctx, session.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
return fmt.Errorf("failed to initialize search index: %w", err)
}
a.logger.Info("Search index initialized successfully")
// Rebuild index from local data
if err := a.rebuildSearchIndexForUser(session.Email); err != nil {
a.logger.Warn("Failed to rebuild search index", zap.Error(err))
return fmt.Errorf("failed to rebuild search index: %w", err)
}
return nil
}
// rebuildSearchIndexForUser rebuilds the entire search index from the local file repository.
// This is called on app startup and after login to ensure the search index is up-to-date.
// The rebuild process:
// 1. Lists all files from the local repository
// 2. Deduplicates files by ID (in case of repository corruption)
// 3. Skips deleted files
// 4. Passes all files to the search service for batch indexing
func (a *Application) rebuildSearchIndexForUser(userEmail string) error {
a.logger.Info("Rebuilding search index from local data", zap.String("email", userEmail))
fileRepo := a.getFileRepo()
if fileRepo == nil {
return fmt.Errorf("file repository not available")
}
// Get all local files
localFiles, err := fileRepo.List()
if err != nil {
a.logger.Error("Failed to list files for search index rebuild", zap.Error(err))
return fmt.Errorf("failed to list files: %w", err)
}
// Convert to search documents - use map to deduplicate by ID
fileDocumentsMap := make(map[string]*search.FileDocument)
for _, f := range localFiles {
// Skip deleted files
if f.State == file.StateDeleted {
continue
}
// Check for duplicates in local file repo
if _, exists := fileDocumentsMap[f.ID]; exists {
a.logger.Warn("Duplicate file found in local repository",
zap.String("id", f.ID),
zap.String("name", f.Name))
continue
}
// Get collection name if available
collectionName := ""
collectionRepo := a.getCollectionRepo()
if collectionRepo != nil {
collection, err := collectionRepo.Get(f.CollectionID)
if err == nil && collection != nil {
collectionName = collection.Name
}
}
fileDoc := &search.FileDocument{
ID: f.ID,
Filename: f.Name,
Description: "",
CollectionID: f.CollectionID,
CollectionName: collectionName,
Tags: []string{}, // Tags not stored in file entity currently
Size: f.DecryptedSizeInBytes,
CreatedAt: f.CreatedAt,
Type: "file",
}
fileDocumentsMap[f.ID] = fileDoc
}
// Convert map to slice
fileDocuments := make([]*search.FileDocument, 0, len(fileDocumentsMap))
for _, doc := range fileDocumentsMap {
fileDocuments = append(fileDocuments, doc)
}
a.logger.Info("Prepared files for indexing",
zap.Int("total_from_repo", len(localFiles)),
zap.Int("unique_files", len(fileDocuments)))
// For now, we don't index collections separately since they're fetched from cloud
// Collections will be indexed when they're explicitly created/updated
collectionDocuments := []*search.CollectionDocument{}
// Rebuild the index
if err := a.searchService.RebuildIndex(userEmail, fileDocuments, collectionDocuments); err != nil {
a.logger.Error("Failed to rebuild search index", zap.Error(err))
return fmt.Errorf("failed to rebuild search index: %w", err)
}
a.logger.Info("Search index rebuilt successfully",
zap.Int("files_indexed", len(fileDocuments)),
zap.Int("collections_indexed", len(collectionDocuments)))
return nil
}
// =============================================================================
// WAILS BINDINGS - Exposed to Frontend
// =============================================================================
// SearchInput represents the input for search
type SearchInput struct {
Query string `json:"query"`
Limit int `json:"limit,omitempty"`
}
// SearchResultData represents search results for the frontend
type SearchResultData struct {
Files []FileSearchResult `json:"files"`
Collections []CollectionSearchResult `json:"collections"`
TotalFiles int `json:"total_files"`
TotalCollections int `json:"total_collections"`
TotalHits uint64 `json:"total_hits"`
MaxScore float64 `json:"max_score"`
Query string `json:"query"`
}
// FileSearchResult represents a file in search results
type FileSearchResult struct {
ID string `json:"id"`
Filename string `json:"filename"`
CollectionID string `json:"collection_id"`
CollectionName string `json:"collection_name"`
Tags []string `json:"tags"`
Size int64 `json:"size"`
CreatedAt string `json:"created_at"`
}
// CollectionSearchResult represents a collection in search results
type CollectionSearchResult struct {
ID string `json:"id"`
Name string `json:"name"`
Tags []string `json:"tags"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
}
// Search performs a full-text search across files and collections.
// This is the main Wails binding exposed to the frontend for search functionality.
//
// Features:
// - Case-insensitive substring matching (e.g., "mesh" finds "meshtastic")
// - Deduplication by filename (same filename in multiple collections shows once)
// - Auto-initialization if search index is not ready
// - Support for Bleve query syntax (+, -, "", *, ?)
func (a *Application) Search(input SearchInput) (*SearchResultData, error) {
a.logger.Info("Performing search", zap.String("query", input.Query))
// Validate input
if input.Query == "" {
return nil, fmt.Errorf("search query cannot be empty")
}
// Set default limit if not specified
limit := input.Limit
if limit == 0 {
limit = 50
}
// Perform search
result, err := a.searchService.Search(input.Query, limit)
if err != nil {
// If search index is not initialized, try to initialize it automatically
if err.Error() == "search index not initialized" {
a.logger.Warn("Search index not initialized, attempting to initialize now")
if initErr := a.InitializeSearchIndex(); initErr != nil {
a.logger.Error("Failed to auto-initialize search index", zap.Error(initErr))
return nil, fmt.Errorf("search index not initialized. Please log out and log back in, or contact support")
}
// Retry search after initialization
result, err = a.searchService.Search(input.Query, limit)
if err != nil {
a.logger.Error("Search failed after auto-initialization", zap.String("query", input.Query), zap.Error(err))
return nil, fmt.Errorf("search failed: %w", err)
}
} else {
a.logger.Error("Search failed", zap.String("query", input.Query), zap.Error(err))
return nil, fmt.Errorf("search failed: %w", err)
}
}
// Convert to frontend format with deduplication by filename
// Only show one file per unique filename (first occurrence wins)
files := make([]FileSearchResult, 0, len(result.Files))
seenFilenames := make(map[string]bool)
for _, f := range result.Files {
// Skip if we've already seen this filename
if seenFilenames[f.Filename] {
continue
}
seenFilenames[f.Filename] = true
files = append(files, FileSearchResult{
ID: f.ID,
Filename: f.Filename,
CollectionID: f.CollectionID,
CollectionName: f.CollectionName,
Tags: f.Tags,
Size: f.Size,
CreatedAt: f.CreatedAt.Format(time.RFC3339),
})
}
collections := make([]CollectionSearchResult, 0, len(result.Collections))
for _, c := range result.Collections {
collections = append(collections, CollectionSearchResult{
ID: c.ID,
Name: c.Name,
Tags: c.Tags,
FileCount: c.FileCount,
CreatedAt: c.CreatedAt.Format(time.RFC3339),
})
}
a.logger.Info("Search completed",
zap.String("query", input.Query),
zap.Int("files_found", len(files)),
zap.Int("collections_found", len(collections)))
return &SearchResultData{
Files: files,
Collections: collections,
TotalFiles: len(files),
TotalCollections: len(collections),
TotalHits: result.TotalHits,
MaxScore: result.MaxScore,
Query: input.Query,
}, nil
}

View file

@ -0,0 +1,38 @@
package app
// GetTheme returns the current theme setting
func (a *Application) GetTheme() (string, error) {
return a.config.GetTheme(a.ctx)
}
// SetTheme updates the theme setting
func (a *Application) SetTheme(theme string) error {
return a.config.SetTheme(a.ctx, theme)
}
// GetWindowSize returns the configured window size
func (a *Application) GetWindowSize() (map[string]int, error) {
width, height, err := a.config.GetWindowSize(a.ctx)
if err != nil {
return nil, err
}
return map[string]int{
"width": width,
"height": height,
}, nil
}
// SetWindowSize updates the window size configuration
func (a *Application) SetWindowSize(width, height int) error {
return a.config.SetWindowSize(a.ctx, width, height)
}
// GetCloudProviderAddress returns the backend API URL
func (a *Application) GetCloudProviderAddress() (string, error) {
return a.config.GetCloudProviderAddress(a.ctx)
}
// SetCloudProviderAddress updates the backend API URL
func (a *Application) SetCloudProviderAddress(address string) error {
return a.config.SetCloudProviderAddress(a.ctx, address)
}

View file

@ -0,0 +1,148 @@
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
)
// SyncStatusData represents the current sync status for the frontend
type SyncStatusData struct {
IsSyncing bool `json:"is_syncing"`
LastSyncTime string `json:"last_sync_time,omitempty"`
LastSyncSuccess bool `json:"last_sync_success"`
LastSyncError string `json:"last_sync_error,omitempty"`
CollectionsSynced bool `json:"collections_synced"`
FilesSynced bool `json:"files_synced"`
FullySynced bool `json:"fully_synced"`
}
// SyncResultData represents the result of a sync operation
type SyncResultData struct {
CollectionsProcessed int `json:"collections_processed"`
CollectionsAdded int `json:"collections_added"`
CollectionsUpdated int `json:"collections_updated"`
CollectionsDeleted int `json:"collections_deleted"`
FilesProcessed int `json:"files_processed"`
FilesAdded int `json:"files_added"`
FilesUpdated int `json:"files_updated"`
FilesDeleted int `json:"files_deleted"`
Errors []string `json:"errors,omitempty"`
}
// GetSyncStatus returns the current sync status
func (a *Application) GetSyncStatus() (*SyncStatusData, error) {
status, err := a.syncService.GetSyncStatus(a.ctx)
if err != nil {
a.logger.Error("Failed to get sync status", zap.Error(err))
return nil, fmt.Errorf("failed to get sync status: %w", err)
}
return &SyncStatusData{
IsSyncing: false,
LastSyncTime: time.Now().Format(time.RFC3339),
LastSyncSuccess: true,
LastSyncError: "",
CollectionsSynced: status.CollectionsSynced,
FilesSynced: status.FilesSynced,
FullySynced: status.FullySynced,
}, nil
}
// TriggerSync triggers a full sync of collections and files
func (a *Application) TriggerSync() error {
a.logger.Info("Manual sync triggered")
// Get current session for email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get session for sync", zap.Error(err))
return fmt.Errorf("not authenticated: %w", err)
}
// Get the stored password for decryption
var password string
if a.passwordStore.HasPassword(session.Email) {
password, err = a.passwordStore.GetPassword(session.Email)
if err != nil {
a.logger.Warn("Failed to get stored password, sync will skip decryption", zap.Error(err))
}
} else {
a.logger.Warn("No stored password, sync will skip decryption")
}
input := &sync.SyncInput{
BatchSize: 50,
MaxBatches: 100,
Password: password,
}
result, err := a.syncService.SyncAll(a.ctx, input)
if err != nil {
a.logger.Error("Sync failed", zap.Error(err))
return fmt.Errorf("sync failed: %w", err)
}
a.logger.Info("Sync completed",
zap.Int("collections_added", result.CollectionsAdded),
zap.Int("files_added", result.FilesAdded),
zap.Int("errors", len(result.Errors)))
return nil
}
// TriggerSyncWithResult triggers a full sync and returns the result
func (a *Application) TriggerSyncWithResult() (*SyncResultData, error) {
a.logger.Info("Manual sync with result triggered")
// Get current session for email
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil {
a.logger.Error("Failed to get session for sync", zap.Error(err))
return nil, fmt.Errorf("not authenticated: %w", err)
}
// Get the stored password for decryption
var password string
if a.passwordStore.HasPassword(session.Email) {
password, err = a.passwordStore.GetPassword(session.Email)
if err != nil {
a.logger.Warn("Failed to get stored password, sync will skip decryption", zap.Error(err))
}
} else {
a.logger.Warn("No stored password, sync will skip decryption")
}
input := &sync.SyncInput{
BatchSize: 50,
MaxBatches: 100,
Password: password,
}
result, err := a.syncService.SyncAll(a.ctx, input)
if err != nil {
a.logger.Error("Sync failed", zap.Error(err))
return nil, fmt.Errorf("sync failed: %w", err)
}
return &SyncResultData{
CollectionsProcessed: result.CollectionsProcessed,
CollectionsAdded: result.CollectionsAdded,
CollectionsUpdated: result.CollectionsUpdated,
CollectionsDeleted: result.CollectionsDeleted,
FilesProcessed: result.FilesProcessed,
FilesAdded: result.FilesAdded,
FilesUpdated: result.FilesUpdated,
FilesDeleted: result.FilesDeleted,
Errors: result.Errors,
}, nil
}
// ResetSync resets all sync state for a fresh sync
func (a *Application) ResetSync() error {
a.logger.Info("Resetting sync state")
return a.syncService.ResetSync(a.ctx)
}

View file

@ -0,0 +1,861 @@
package app
import (
"bytes"
"encoding/base64"
"fmt"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/e2ee"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/inputvalidation"
)
// ============================================================================
// Tag Management
// ============================================================================
// TagData represents a decrypted tag for the frontend
type TagData struct {
ID string `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
CreatedAt string `json:"created_at"`
ModifiedAt string `json:"modified_at"`
Version uint64 `json:"version"`
State string `json:"state"`
}
// ListTags fetches all tags for the current user and decrypts them
func (a *Application) ListTags() ([]*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Fetch tags from API
tags, err := apiClient.ListTags(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch tags from API", zap.Error(err))
return nil, fmt.Errorf("failed to fetch tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Decrypt tags
decryptedTags := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decrypted, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Error("Failed to decrypt tag",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue // Skip tags that fail to decrypt
}
decryptedTags = append(decryptedTags, decrypted)
}
a.logger.Info("Tags fetched and decrypted successfully",
zap.Int("total", len(tags)),
zap.Int("decrypted", len(decryptedTags)))
return decryptedTags, nil
}
// decryptTag decrypts a single tag using the master key
func (a *Application) decryptTag(tag *client.Tag, masterKey []byte) (*TagData, error) {
// Decode encrypted tag key
if tag.EncryptedTagKey == nil {
return nil, fmt.Errorf("tag has no encrypted tag key")
}
// Decode base64 nonce and ciphertext
keyNonce, err := base64.StdEncoding.DecodeString(tag.EncryptedTagKey.Nonce)
if err != nil {
// Try URL-safe encoding without padding
keyNonce, err = base64.RawURLEncoding.DecodeString(tag.EncryptedTagKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key nonce: %w", err)
}
}
keyCiphertext, err := base64.StdEncoding.DecodeString(tag.EncryptedTagKey.Ciphertext)
if err != nil {
// Try URL-safe encoding without padding
keyCiphertext, err = base64.RawURLEncoding.DecodeString(tag.EncryptedTagKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key ciphertext: %w", err)
}
}
// Extract actual ciphertext (skip nonce if it's prepended)
var actualCiphertext []byte
if len(keyCiphertext) > len(keyNonce) && bytes.Equal(keyCiphertext[:len(keyNonce)], keyNonce) {
// Nonce is prepended to ciphertext
actualCiphertext = keyCiphertext[len(keyNonce):]
} else {
actualCiphertext = keyCiphertext
}
// Decrypt tag key using XSalsa20-Poly1305 (SecretBox)
tagKey, err := e2ee.DecryptTagKey(&e2ee.EncryptedKey{
Ciphertext: actualCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag key: %w", err)
}
// Decrypt tag name
name, err := decryptTagField(tag.EncryptedName, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag name: %w", err)
}
// Decrypt tag color
color, err := decryptTagField(tag.EncryptedColor, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag color: %w", err)
}
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// decryptTagField decrypts an encrypted tag field (name or color)
// Format: "ciphertext:nonce" both in base64
func decryptTagField(encryptedField string, tagKey []byte) (string, error) {
// Split by colon to get ciphertext and nonce
parts := bytes.Split([]byte(encryptedField), []byte(":"))
if len(parts) != 2 {
return "", fmt.Errorf("invalid encrypted field format (expected 'ciphertext:nonce')")
}
ciphertextB64 := string(parts[0])
nonceB64 := string(parts[1])
// Decode base64 (try URL-safe without padding first, then standard)
ciphertext, err := base64.RawURLEncoding.DecodeString(ciphertextB64)
if err != nil {
ciphertext, err = base64.StdEncoding.DecodeString(ciphertextB64)
if err != nil {
return "", fmt.Errorf("failed to decode ciphertext: %w", err)
}
}
nonce, err := base64.RawURLEncoding.DecodeString(nonceB64)
if err != nil {
nonce, err = base64.StdEncoding.DecodeString(nonceB64)
if err != nil {
return "", fmt.Errorf("failed to decode nonce: %w", err)
}
}
// Decrypt using XSalsa20-Poly1305
decrypted, err := e2ee.DecryptWithSecretBox(ciphertext, nonce, tagKey)
if err != nil {
return "", fmt.Errorf("failed to decrypt field: %w", err)
}
return string(decrypted), nil
}
// CreateTag creates a new tag with encrypted name and color
func (a *Application) CreateTag(name, color string) (*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for encryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Generate new tag key (32 bytes for XSalsa20-Poly1305)
tagKey := e2ee.GenerateKey()
// Encrypt tag name
encryptedName, err := encryptTagField(name, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag name: %w", err)
}
// Encrypt tag color
encryptedColor, err := encryptTagField(color, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag color: %w", err)
}
// Encrypt tag key with master key
encryptedTagKey, err := e2ee.EncryptTagKeySecretBox(tagKey, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag key: %w", err)
}
// Prepare API request
tagID := uuid.New().String()
now := time.Now()
input := &client.CreateTagInput{
ID: tagID,
EncryptedName: encryptedName,
EncryptedColor: encryptedColor,
EncryptedTagKey: &client.EncryptedTagKey{
Ciphertext: base64.StdEncoding.EncodeToString(append(encryptedTagKey.Nonce, encryptedTagKey.Ciphertext...)),
Nonce: base64.StdEncoding.EncodeToString(encryptedTagKey.Nonce),
KeyVersion: 1,
},
CreatedAt: now.Format(time.RFC3339),
ModifiedAt: now.Format(time.RFC3339),
Version: 1,
State: "active",
}
// Create tag via API
tag, err := apiClient.CreateTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to create tag", zap.Error(err))
return nil, fmt.Errorf("failed to create tag: %w", err)
}
a.logger.Info("Tag created successfully", zap.String("tag_id", tag.ID))
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// encryptTagField encrypts a tag field (name or color) with the tag key
// Returns format: "ciphertext:nonce" both in base64
func encryptTagField(plaintext string, tagKey []byte) (string, error) {
encrypted, err := e2ee.EncryptWithSecretBox([]byte(plaintext), tagKey)
if err != nil {
return "", err
}
// Encode to base64 (URL-safe without padding to match web app)
ciphertextB64 := base64.RawURLEncoding.EncodeToString(encrypted.Ciphertext)
nonceB64 := base64.RawURLEncoding.EncodeToString(encrypted.Nonce)
return fmt.Sprintf("%s:%s", ciphertextB64, nonceB64), nil
}
// UpdateTag updates an existing tag's name and/or color
func (a *Application) UpdateTag(tagID, name, color string) (*TagData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get current tag to retrieve encrypted tag key
currentTag, err := apiClient.GetTag(a.ctx, tagID)
if err != nil {
a.logger.Error("Failed to get current tag", zap.String("tag_id", tagID), zap.Error(err))
return nil, fmt.Errorf("failed to get current tag: %w", err)
}
// Get master key for encryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Decrypt tag key
keyNonce, err := base64.StdEncoding.DecodeString(currentTag.EncryptedTagKey.Nonce)
if err != nil {
keyNonce, err = base64.RawURLEncoding.DecodeString(currentTag.EncryptedTagKey.Nonce)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key nonce: %w", err)
}
}
keyCiphertext, err := base64.StdEncoding.DecodeString(currentTag.EncryptedTagKey.Ciphertext)
if err != nil {
keyCiphertext, err = base64.RawURLEncoding.DecodeString(currentTag.EncryptedTagKey.Ciphertext)
if err != nil {
return nil, fmt.Errorf("failed to decode tag key ciphertext: %w", err)
}
}
// Extract actual ciphertext
var actualCiphertext []byte
if len(keyCiphertext) > len(keyNonce) && bytes.Equal(keyCiphertext[:len(keyNonce)], keyNonce) {
actualCiphertext = keyCiphertext[len(keyNonce):]
} else {
actualCiphertext = keyCiphertext
}
tagKey, err := e2ee.DecryptTagKey(&e2ee.EncryptedKey{
Ciphertext: actualCiphertext,
Nonce: keyNonce,
}, masterKey)
if err != nil {
return nil, fmt.Errorf("failed to decrypt tag key: %w", err)
}
// Encrypt new name and color
encryptedName, err := encryptTagField(name, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag name: %w", err)
}
encryptedColor, err := encryptTagField(color, tagKey)
if err != nil {
return nil, fmt.Errorf("failed to encrypt tag color: %w", err)
}
// Prepare update request (must include encrypted_tag_key and other required fields)
input := &client.UpdateTagInput{
EncryptedName: encryptedName,
EncryptedColor: encryptedColor,
EncryptedTagKey: currentTag.EncryptedTagKey, // Keep existing key
CreatedAt: currentTag.CreatedAt.Format(time.RFC3339),
ModifiedAt: time.Now().Format(time.RFC3339),
Version: currentTag.Version,
State: currentTag.State,
}
// Update tag via API
tag, err := apiClient.UpdateTag(a.ctx, tagID, input)
if err != nil {
a.logger.Error("Failed to update tag", zap.String("tag_id", tagID), zap.Error(err))
return nil, fmt.Errorf("failed to update tag: %w", err)
}
a.logger.Info("Tag updated successfully", zap.String("tag_id", tag.ID))
return &TagData{
ID: tag.ID,
Name: name,
Color: color,
CreatedAt: tag.CreatedAt.Format(time.RFC3339),
ModifiedAt: tag.ModifiedAt.Format(time.RFC3339),
Version: tag.Version,
State: tag.State,
}, nil
}
// DeleteTag deletes a tag
func (a *Application) DeleteTag(tagID string) error {
a.logger.Info("DeleteTag called", zap.String("tag_id", tagID))
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
a.logger.Error("API client not available")
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
a.logger.Error("No active session", zap.Error(err))
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
a.logger.Error("Session expired")
return fmt.Errorf("session expired - please log in again")
}
a.logger.Info("Session valid, setting tokens", zap.String("tag_id", tagID))
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
a.logger.Info("Calling API DeleteTag", zap.String("tag_id", tagID))
// Delete tag via API
err = apiClient.DeleteTag(a.ctx, tagID)
if err != nil {
a.logger.Error("Failed to delete tag", zap.String("tag_id", tagID), zap.Error(err))
return fmt.Errorf("failed to delete tag: %w", err)
}
a.logger.Info("Tag deleted successfully", zap.String("tag_id", tagID))
return nil
}
// ============================================================================
// Tag Assignment Operations
// ============================================================================
// AssignTagToFile assigns a tag to a file
func (a *Application) AssignTagToFile(tagID, fileID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
input := &client.CreateTagAssignmentInput{
TagID: tagID,
EntityID: fileID,
EntityType: "file",
}
_, err = apiClient.AssignTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to assign tag to file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to assign tag: %w", err)
}
a.logger.Info("Tag assigned to file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID))
return nil
}
// UnassignTagFromFile removes a tag from a file
func (a *Application) UnassignTagFromFile(tagID, fileID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
err = apiClient.UnassignTag(a.ctx, tagID, fileID, "file")
if err != nil {
a.logger.Error("Failed to unassign tag from file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID),
zap.Error(err))
return fmt.Errorf("failed to unassign tag: %w", err)
}
a.logger.Info("Tag unassigned from file",
zap.String("tag_id", tagID),
zap.String("file_id", fileID))
return nil
}
// GetTagsForFile returns all tags assigned to a file (decrypted)
func (a *Application) GetTagsForFile(fileID string) ([]*TagData, error) {
// Validate input
if err := inputvalidation.ValidateFileID(fileID); err != nil {
return nil, err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Fetch tags for entity from API
tags, err := apiClient.GetTagsForEntity(a.ctx, fileID, "file")
if err != nil {
a.logger.Error("Failed to get tags for file",
zap.String("file_id", fileID),
zap.Error(err))
return nil, fmt.Errorf("failed to get tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Decrypt each tag
result := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decryptedTag, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt tag, skipping",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue
}
result = append(result, decryptedTag)
}
a.logger.Info("Tags fetched for file",
zap.String("file_id", fileID),
zap.Int("count", len(result)))
return result, nil
}
// ============================================================================
// Collection Tag Assignment Operations
// ============================================================================
// AssignTagToCollection assigns a tag to a collection
func (a *Application) AssignTagToCollection(tagID, collectionID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
input := &client.CreateTagAssignmentInput{
TagID: tagID,
EntityID: collectionID,
EntityType: "collection",
}
_, err = apiClient.AssignTag(a.ctx, input)
if err != nil {
a.logger.Error("Failed to assign tag to collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID),
zap.Error(err))
return fmt.Errorf("failed to assign tag: %w", err)
}
a.logger.Info("Tag assigned to collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID))
return nil
}
// UnassignTagFromCollection removes a tag from a collection
func (a *Application) UnassignTagFromCollection(tagID, collectionID string) error {
// Validate inputs
if err := inputvalidation.ValidateTagID(tagID); err != nil {
return err
}
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
err = apiClient.UnassignTag(a.ctx, tagID, collectionID, "collection")
if err != nil {
a.logger.Error("Failed to unassign tag from collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID),
zap.Error(err))
return fmt.Errorf("failed to unassign tag: %w", err)
}
a.logger.Info("Tag unassigned from collection",
zap.String("tag_id", tagID),
zap.String("collection_id", collectionID))
return nil
}
// GetTagsForCollection returns all tags assigned to a collection (decrypted)
func (a *Application) GetTagsForCollection(collectionID string) ([]*TagData, error) {
// Validate input
if err := inputvalidation.ValidateCollectionID(collectionID); err != nil {
return nil, err
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Get master key for decryption
masterKey, cleanup, err := a.keyCache.GetMasterKey(session.Email)
if err != nil {
a.logger.Error("Failed to get master key", zap.Error(err))
return nil, fmt.Errorf("failed to get master key: %w", err)
}
defer cleanup()
// Fetch tags for entity from API
tags, err := apiClient.GetTagsForEntity(a.ctx, collectionID, "collection")
if err != nil {
a.logger.Error("Failed to get tags for collection",
zap.String("collection_id", collectionID),
zap.Error(err))
return nil, fmt.Errorf("failed to get tags: %w", err)
}
if len(tags) == 0 {
return []*TagData{}, nil
}
// Decrypt each tag
result := make([]*TagData, 0, len(tags))
for _, tag := range tags {
decryptedTag, err := a.decryptTag(tag, masterKey)
if err != nil {
a.logger.Warn("Failed to decrypt tag, skipping",
zap.String("tag_id", tag.ID),
zap.Error(err))
continue
}
result = append(result, decryptedTag)
}
a.logger.Info("Tags fetched for collection",
zap.String("collection_id", collectionID),
zap.Int("count", len(result)))
return result, nil
}
// ============================================================================
// Tag Search Operations
// ============================================================================
// SearchByTagsResult represents the result of a multi-tag search
type SearchByTagsResult struct {
CollectionIDs []string `json:"collection_ids"`
FileIDs []string `json:"file_ids"`
TagCount int `json:"tag_count"`
CollectionCount int `json:"collection_count"`
FileCount int `json:"file_count"`
}
// SearchByTags searches for collections and files that have ALL the specified tags
func (a *Application) SearchByTags(tagIDs []string, limit int) (*SearchByTagsResult, error) {
a.logger.Info("SearchByTags called",
zap.Int("tag_count", len(tagIDs)),
zap.Int("limit", limit))
// Validate inputs
if len(tagIDs) == 0 {
return nil, fmt.Errorf("at least one tag ID is required")
}
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
resp, err := apiClient.SearchByTags(a.ctx, tagIDs, limit)
if err != nil {
a.logger.Error("Failed to search by tags", zap.Error(err))
return nil, fmt.Errorf("failed to search by tags: %w", err)
}
// Extract IDs only - frontend will fetch full details as needed
collectionIDs := make([]string, 0, len(resp.Collections))
for _, coll := range resp.Collections {
collectionIDs = append(collectionIDs, coll.ID)
}
fileIDs := make([]string, 0, len(resp.Files))
for _, file := range resp.Files {
fileIDs = append(fileIDs, file.ID)
}
result := &SearchByTagsResult{
CollectionIDs: collectionIDs,
FileIDs: fileIDs,
TagCount: resp.TagCount,
CollectionCount: len(collectionIDs),
FileCount: len(fileIDs),
}
a.logger.Info("SearchByTags completed",
zap.Int("collections", len(collectionIDs)),
zap.Int("files", len(fileIDs)))
return result, nil
}

View file

@ -0,0 +1,253 @@
package app
import (
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// GetUserProfile fetches the current user's profile
func (a *Application) GetUserProfile() (*client.User, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Fetch user profile from backend
user, err := apiClient.GetMe(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch user profile", zap.Error(err))
return nil, fmt.Errorf("failed to fetch profile: %w", err)
}
a.logger.Info("User profile fetched successfully",
zap.String("user_id", user.ID),
zap.String("email", utils.MaskEmail(user.Email)))
return user, nil
}
// UpdateUserProfile updates the current user's profile
func (a *Application) UpdateUserProfile(input *client.UpdateUserInput) (*client.User, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Update user profile
user, err := apiClient.UpdateMe(a.ctx, input)
if err != nil {
a.logger.Error("Failed to update user profile", zap.Error(err))
return nil, fmt.Errorf("failed to update profile: %w", err)
}
a.logger.Info("User profile updated successfully",
zap.String("user_id", user.ID),
zap.String("email", utils.MaskEmail(user.Email)))
return user, nil
}
// ============================================================================
// Blocked Emails Management
// ============================================================================
// BlockedEmailData represents a blocked email entry for the frontend
type BlockedEmailData struct {
BlockedEmail string `json:"blocked_email"`
Reason string `json:"reason"`
CreatedAt string `json:"created_at"`
}
// GetBlockedEmails fetches the list of blocked emails from the backend
func (a *Application) GetBlockedEmails() ([]*BlockedEmailData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
resp, err := apiClient.ListBlockedEmails(a.ctx)
if err != nil {
a.logger.Error("Failed to fetch blocked emails", zap.Error(err))
return nil, fmt.Errorf("failed to fetch blocked emails: %w", err)
}
// Convert to frontend format
blockedEmails := make([]*BlockedEmailData, 0, len(resp.BlockedEmails))
for _, blocked := range resp.BlockedEmails {
blockedEmails = append(blockedEmails, &BlockedEmailData{
BlockedEmail: blocked.BlockedEmail,
Reason: blocked.Reason,
CreatedAt: blocked.CreatedAt.Format(time.RFC3339),
})
}
a.logger.Info("Blocked emails fetched successfully",
zap.Int("count", len(blockedEmails)))
return blockedEmails, nil
}
// AddBlockedEmail adds an email to the blocked list
func (a *Application) AddBlockedEmail(email, reason string) (*BlockedEmailData, error) {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return nil, fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return nil, fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return nil, fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
blocked, err := apiClient.CreateBlockedEmail(a.ctx, email, reason)
if err != nil {
a.logger.Error("Failed to add blocked email",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return nil, fmt.Errorf("failed to block email: %w", err)
}
a.logger.Info("Email blocked successfully",
zap.String("blocked_email", utils.MaskEmail(email)))
return &BlockedEmailData{
BlockedEmail: blocked.BlockedEmail,
Reason: blocked.Reason,
CreatedAt: blocked.CreatedAt.Format(time.RFC3339),
}, nil
}
// RemoveBlockedEmail removes an email from the blocked list
func (a *Application) RemoveBlockedEmail(email string) error {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API
_, err = apiClient.DeleteBlockedEmail(a.ctx, email)
if err != nil {
a.logger.Error("Failed to remove blocked email",
zap.String("email", utils.MaskEmail(email)),
zap.Error(err))
return fmt.Errorf("failed to unblock email: %w", err)
}
a.logger.Info("Email unblocked successfully",
zap.String("blocked_email", utils.MaskEmail(email)))
return nil
}
// ============================================================================
// Account Deletion
// ============================================================================
// DeleteAccount deletes the current user's account
func (a *Application) DeleteAccount(password string) error {
// Get API client from auth service
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Ensure we have a valid session with tokens
session, err := a.authService.GetCurrentSession(a.ctx)
if err != nil || session == nil {
return fmt.Errorf("no active session - please log in")
}
if !session.IsValid() {
return fmt.Errorf("session expired - please log in again")
}
// Ensure tokens are set in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Call backend API to delete account
err = apiClient.DeleteMe(a.ctx, password)
if err != nil {
a.logger.Error("Failed to delete account", zap.Error(err))
return fmt.Errorf("failed to delete account: %w", err)
}
a.logger.Info("Account deleted successfully",
zap.String("user_id", utils.MaskEmail(session.Email)))
// Logout after successful deletion
_ = a.Logout()
return nil
}

View file

@ -0,0 +1,294 @@
package app
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/collection"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/file"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/utils"
)
// Application is the main Wails application struct
type Application struct {
ctx context.Context
logger *zap.Logger
config config.ConfigService
authService *auth.Service
tokenManager *tokenmanager.Manager
passwordStore *passwordstore.Service
keyCache *keycache.Service
rateLimiter *ratelimiter.Service
httpClient *httpclient.Service
syncService sync.Service
storageManager *storagemanager.Manager
securityLog *securitylog.Service
searchService search.SearchService
}
// ProvideApplication creates the Application for Wire
func ProvideApplication(
logger *zap.Logger,
configService config.ConfigService,
authService *auth.Service,
tokenManager *tokenmanager.Manager,
passwordStore *passwordstore.Service,
keyCache *keycache.Service,
rateLimiter *ratelimiter.Service,
httpClient *httpclient.Service,
syncService sync.Service,
storageManager *storagemanager.Manager,
securityLog *securitylog.Service,
searchService search.SearchService,
) *Application {
return &Application{
logger: logger,
config: configService,
authService: authService,
tokenManager: tokenManager,
passwordStore: passwordStore,
keyCache: keyCache,
rateLimiter: rateLimiter,
httpClient: httpClient,
syncService: syncService,
storageManager: storageManager,
securityLog: securityLog,
searchService: searchService,
}
}
// getFileRepo returns the file repository for the current user.
// Returns nil if no user is logged in (storage not initialized).
func (a *Application) getFileRepo() file.Repository {
return a.storageManager.GetFileRepository()
}
// mustGetFileRepo returns the file repository for the current user.
// Logs an error and returns a no-op repository if storage is not initialized.
// Use this in places where you expect the user to be logged in.
// The returned repository will never be nil - it returns a safe no-op implementation
// if the actual repository is not available.
func (a *Application) mustGetFileRepo() file.Repository {
repo := a.storageManager.GetFileRepository()
if repo == nil {
a.logger.Error("File repository not available - user storage not initialized")
return &noOpFileRepository{}
}
return repo
}
// getCollectionRepo returns the collection repository for the current user.
// Returns nil if no user is logged in (storage not initialized).
func (a *Application) getCollectionRepo() collection.Repository {
return a.storageManager.GetCollectionRepository()
}
// noOpFileRepository is a safe no-op implementation of file.Repository
// that returns empty results instead of causing nil pointer dereferences.
// This is used when the actual repository is not available (user not logged in).
type noOpFileRepository struct{}
func (r *noOpFileRepository) Get(id string) (*file.File, error) {
return nil, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) List() ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) ListByCollection(collectionID string) ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Create(f *file.File) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Update(f *file.File) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Delete(id string) error {
return fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) ListByStatus(status file.SyncStatus) ([]*file.File, error) {
return []*file.File{}, fmt.Errorf("storage not initialized - user must be logged in")
}
func (r *noOpFileRepository) Exists(id string) (bool, error) {
return false, fmt.Errorf("storage not initialized - user must be logged in")
}
// Startup is called when the app starts (Wails lifecycle hook)
func (a *Application) Startup(ctx context.Context) {
a.ctx = ctx
a.logger.Info("MapleFile desktop application started")
a.securityLog.LogAppLifecycle(securitylog.EventAppStart)
// Check if there's a valid session from a previous run
session, err := a.authService.GetCurrentSession(ctx)
if err != nil {
a.logger.Debug("No existing session on startup", zap.Error(err))
return
}
if session == nil {
a.logger.Info("No session found on startup")
return
}
if !session.IsValid() {
a.logger.Info("Session expired on startup, clearing",
zap.Time("expired_at", session.ExpiresAt))
_ = a.authService.Logout(ctx)
return
}
// Valid session found - restore it
a.logger.Info("Resuming valid session from previous run",
zap.String("user_id", session.UserID),
zap.String("email", utils.MaskEmail(session.Email)),
zap.Time("expires_at", session.ExpiresAt))
// Restore tokens to API client
if err := a.authService.RestoreSession(ctx, session); err != nil {
a.logger.Error("Failed to restore session", zap.Error(err))
return
}
// SECURITY: Validate session with server before fully restoring
// This prevents using stale/revoked sessions from previous runs
if err := a.validateSessionWithServer(ctx, session); err != nil {
a.logger.Warn("Session validation with server failed, clearing session",
zap.String("email", utils.MaskEmail(session.Email)),
zap.Error(err))
_ = a.authService.Logout(ctx)
return
}
a.logger.Info("Session validated with server successfully")
// Initialize user-specific storage for the logged-in user
if err := a.storageManager.InitializeForUser(session.Email); err != nil {
a.logger.Error("Failed to initialize user storage", zap.Error(err))
_ = a.authService.Logout(ctx)
return
}
a.logger.Info("User storage initialized",
zap.String("email", utils.MaskEmail(session.Email)))
// Initialize search index for the logged-in user
if err := a.searchService.Initialize(ctx, session.Email); err != nil {
a.logger.Error("Failed to initialize search index", zap.Error(err))
// Don't fail startup if search initialization fails - it's not critical
// The app can still function without search
} else {
a.logger.Info("Search index initialized",
zap.String("email", utils.MaskEmail(session.Email)))
// Rebuild search index from local data in the background
userEmail := session.Email // Capture email before goroutine
go func() {
if err := a.rebuildSearchIndexForUser(userEmail); err != nil {
a.logger.Warn("Failed to rebuild search index on startup", zap.Error(err))
}
}()
}
// Start token manager for automatic refresh
a.tokenManager.Start()
a.logger.Info("Token manager started for resumed session")
// Run background cleanup of deleted files
go a.cleanupDeletedFiles()
}
// validateSessionWithServer validates the stored session by making a request to the server.
// This is a security measure to ensure the session hasn't been revoked server-side.
func (a *Application) validateSessionWithServer(ctx context.Context, session *session.Session) error {
apiClient := a.authService.GetAPIClient()
if apiClient == nil {
return fmt.Errorf("API client not available")
}
// Set tokens in the API client
apiClient.SetTokens(session.AccessToken, session.RefreshToken)
// Make a lightweight request to validate the token
// GetMe is a good choice as it's a simple authenticated endpoint
_, err := apiClient.GetMe(ctx)
if err != nil {
return fmt.Errorf("server validation failed: %w", err)
}
return nil
}
// Shutdown is called when the app shuts down (Wails lifecycle hook)
func (a *Application) Shutdown(ctx context.Context) {
a.logger.Info("MapleFile desktop application shutting down")
a.securityLog.LogAppLifecycle(securitylog.EventAppShutdown)
// Calculate timeout from Wails context
timeout := 3 * time.Second
if deadline, ok := ctx.Deadline(); ok {
remaining := time.Until(deadline)
if remaining > 500*time.Millisecond {
// Leave 500ms buffer for other cleanup
timeout = remaining - 500*time.Millisecond
} else if remaining > 0 {
timeout = remaining
} else {
timeout = 100 * time.Millisecond
}
}
// Stop token manager gracefully
stopCtx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := a.tokenManager.Stop(stopCtx); err != nil {
a.logger.Error("Token manager shutdown error", zap.Error(err))
}
// Cleanup password store (destroy RAM enclaves)
a.logger.Info("Clearing all passwords from secure RAM")
a.passwordStore.Cleanup()
a.logger.Info("Password cleanup completed")
// Cleanup key cache (destroy cached master keys)
a.logger.Info("Clearing all cached master keys from secure memory")
a.keyCache.Cleanup()
a.logger.Info("Key cache cleanup completed")
// Cleanup search index
a.logger.Info("Closing search index")
if err := a.searchService.Close(); err != nil {
a.logger.Error("Search index close error", zap.Error(err))
} else {
a.logger.Info("Search index closed successfully")
}
// Cleanup user-specific storage
a.logger.Info("Cleaning up user storage")
a.storageManager.Cleanup()
a.logger.Info("User storage cleanup completed")
a.logger.Sync()
}

View file

@ -0,0 +1,227 @@
//go:build wireinject
// +build wireinject
package app
import (
"context"
"os"
"strings"
"github.com/google/wire"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
// Domain imports
sessionDomain "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
// Repository imports
sessionRepo "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/session"
// Service imports
authService "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
keyCache "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
passwordStore "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
rateLimiter "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
syncService "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
tokenManager "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
// Use case imports
sessionUC "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
)
// InitializeApplication creates a fully configured Application using Wire DI
func InitializeApplication() (*Application, error) {
wire.Build(
// Infrastructure
ProvideLogger,
config.New,
ProvideMapleFileClient,
// Session Repository (global - not user-specific)
ProvideSessionRepository,
// Storage Manager (handles user-specific storage lifecycle)
storagemanager.ProvideManager,
// Bind *storagemanager.Manager to sync.RepositoryProvider interface
wire.Bind(new(syncService.RepositoryProvider), new(*storagemanager.Manager)),
// Use Case Layer
sessionUC.ProvideCreateUseCase,
sessionUC.ProvideGetByIdUseCase,
sessionUC.ProvideDeleteUseCase,
sessionUC.ProvideSaveUseCase,
// Service Layer
authService.ProvideService,
tokenManager.ProvideManager,
passwordStore.ProvideService,
keyCache.ProvideService,
rateLimiter.ProvideService,
httpclient.ProvideService,
securitylog.ProvideService,
search.New,
// Sync Services
syncService.ProvideCollectionSyncService,
syncService.ProvideFileSyncService,
syncService.ProvideService,
// Application
ProvideApplication,
)
return nil, nil
}
// ProvideLogger creates the application logger with environment-aware configuration.
// Defaults to production mode for security. Development mode must be explicitly enabled.
func ProvideLogger() (*zap.Logger, error) {
mode := os.Getenv("MAPLEFILE_MODE")
// Only use development logger if explicitly set to "dev" or "development"
if mode == "dev" || mode == "development" {
// Development: console format, debug level, with caller and stacktrace
return zap.NewDevelopment()
}
// Default to production: JSON format, info level, no caller info, no stacktrace
// This is the secure default - production mode unless explicitly in dev
cfg := zap.NewProductionConfig()
cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
cfg.DisableCaller = true
cfg.DisableStacktrace = true
return cfg.Build()
}
// ProvideSessionRepository creates the session repository with its storage.
// Session storage is GLOBAL (not user-specific) because it stores the current login session.
func ProvideSessionRepository(logger *zap.Logger) (sessionDomain.Repository, error) {
provider, err := config.NewLevelDBConfigurationProviderForSession()
if err != nil {
return nil, err
}
sessionStorage := leveldb.NewDiskStorage(provider, logger.Named("session-storage"))
return sessionRepo.ProvideRepository(sessionStorage), nil
}
// zapLoggerAdapter adapts *zap.Logger to client.Logger interface
type zapLoggerAdapter struct {
logger *zap.Logger
}
func (a *zapLoggerAdapter) Debug(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Debug(msg, fields...)
}
func (a *zapLoggerAdapter) Info(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Info(msg, fields...)
}
func (a *zapLoggerAdapter) Warn(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Warn(msg, fields...)
}
func (a *zapLoggerAdapter) Error(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Error(msg, fields...)
}
// keysAndValuesToZapFields converts key-value pairs to zap fields
func keysAndValuesToZapFields(keysAndValues ...interface{}) []zap.Field {
fields := make([]zap.Field, 0, len(keysAndValues)/2)
for i := 0; i+1 < len(keysAndValues); i += 2 {
key, ok := keysAndValues[i].(string)
if !ok {
continue
}
fields = append(fields, zap.Any(key, keysAndValues[i+1]))
}
return fields
}
// BuildMode is set at compile time via -ldflags
// Example: go build -ldflags "-X codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/app.BuildMode=dev"
var BuildMode string
// ProvideMapleFileClient creates the backend API client
func ProvideMapleFileClient(configService config.ConfigService, logger *zap.Logger) (*client.Client, error) {
ctx := context.Background()
// Determine the API URL based on the mode
// Priority: 1) Environment variable, 2) Build-time variable, 3) Default to production
mode := os.Getenv("MAPLEFILE_MODE")
// Log the detected mode
logger.Info("Startup: checking mode configuration",
zap.String("MAPLEFILE_MODE_env", mode),
zap.String("BuildMode_compile_time", BuildMode),
)
if mode == "" {
if BuildMode != "" {
mode = BuildMode
logger.Info("Startup: using compile-time BuildMode", zap.String("mode", mode))
} else {
mode = "production" // Default to production (secure default)
logger.Info("Startup: no mode set, defaulting to production", zap.String("mode", mode))
}
}
var baseURL string
switch mode {
case "production":
baseURL = client.ProductionURL // https://maplefile.ca
case "dev", "development":
baseURL = client.LocalURL // http://localhost:8000
default:
// Fallback: check config file for custom URL
cfg, err := configService.GetConfig(ctx)
if err != nil {
return nil, err
}
baseURL = cfg.CloudProviderAddress
}
// Create logger adapter for the API client
clientLogger := &zapLoggerAdapter{logger: logger.Named("api-client")}
// Create client with the determined URL and logger
apiClient := client.New(client.Config{
BaseURL: baseURL,
Logger: clientLogger,
})
logger.Info("MapleFile API client initialized",
zap.String("mode", mode),
zap.String("base_url", baseURL),
)
// Security: Warn if using unencrypted HTTP (should only happen in dev mode)
if strings.HasPrefix(baseURL, "http://") {
logger.Warn("SECURITY WARNING: Using unencrypted HTTP connection",
zap.String("mode", mode),
zap.String("base_url", baseURL),
zap.String("recommendation", "This should only be used for local development"),
)
}
// Update the config to reflect the current backend URL (skip in production as it's immutable)
if mode != "production" {
if err := configService.SetCloudProviderAddress(ctx, baseURL); err != nil {
logger.Warn("Failed to update cloud provider address in config", zap.Error(err))
}
}
return apiClient, nil
}

View file

@ -0,0 +1,197 @@
// Code generated by Wire. DO NOT EDIT.
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
//go:build !wireinject
// +build !wireinject
package app
import (
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/maplefile/client"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/pkg/storage/leveldb"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/config"
session2 "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/domain/session"
session3 "codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/repo/session"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/auth"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/httpclient"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/keycache"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/passwordstore"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/ratelimiter"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/search"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/securitylog"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/storagemanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/sync"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/service/tokenmanager"
"codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/usecase/session"
"context"
"go.uber.org/zap"
"os"
"strings"
)
// Injectors from wire.go:
// InitializeApplication creates a fully configured Application using Wire DI
func InitializeApplication() (*Application, error) {
logger, err := ProvideLogger()
if err != nil {
return nil, err
}
configService, err := config.New()
if err != nil {
return nil, err
}
client, err := ProvideMapleFileClient(configService, logger)
if err != nil {
return nil, err
}
repository, err := ProvideSessionRepository(logger)
if err != nil {
return nil, err
}
createUseCase := session.ProvideCreateUseCase(repository)
getByIdUseCase := session.ProvideGetByIdUseCase(repository)
deleteUseCase := session.ProvideDeleteUseCase(repository)
saveUseCase := session.ProvideSaveUseCase(repository)
service := auth.ProvideService(client, createUseCase, getByIdUseCase, deleteUseCase, saveUseCase, logger)
manager := tokenmanager.ProvideManager(client, service, getByIdUseCase, logger)
passwordstoreService := passwordstore.ProvideService(logger)
keycacheService := keycache.ProvideService(logger)
ratelimiterService := ratelimiter.ProvideService()
httpclientService := httpclient.ProvideService()
storagemanagerManager := storagemanager.ProvideManager(logger)
collectionSyncService := sync.ProvideCollectionSyncService(logger, client, storagemanagerManager)
fileSyncService := sync.ProvideFileSyncService(logger, client, storagemanagerManager)
syncService := sync.ProvideService(logger, collectionSyncService, fileSyncService, storagemanagerManager)
securitylogService := securitylog.ProvideService(logger)
searchService := search.New(configService, logger)
application := ProvideApplication(logger, configService, service, manager, passwordstoreService, keycacheService, ratelimiterService, httpclientService, syncService, storagemanagerManager, securitylogService, searchService)
return application, nil
}
// wire.go:
// ProvideLogger creates the application logger with environment-aware configuration.
// Defaults to production mode for security. Development mode must be explicitly enabled.
func ProvideLogger() (*zap.Logger, error) {
mode := os.Getenv("MAPLEFILE_MODE")
if mode == "dev" || mode == "development" {
return zap.NewDevelopment()
}
cfg := zap.NewProductionConfig()
cfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
cfg.DisableCaller = true
cfg.DisableStacktrace = true
return cfg.Build()
}
// ProvideSessionRepository creates the session repository with its storage.
// Session storage is GLOBAL (not user-specific) because it stores the current login session.
func ProvideSessionRepository(logger *zap.Logger) (session2.Repository, error) {
provider, err := config.NewLevelDBConfigurationProviderForSession()
if err != nil {
return nil, err
}
sessionStorage := leveldb.NewDiskStorage(provider, logger.Named("session-storage"))
return session3.ProvideRepository(sessionStorage), nil
}
// zapLoggerAdapter adapts *zap.Logger to client.Logger interface
type zapLoggerAdapter struct {
logger *zap.Logger
}
func (a *zapLoggerAdapter) Debug(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Debug(msg, fields...)
}
func (a *zapLoggerAdapter) Info(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Info(msg, fields...)
}
func (a *zapLoggerAdapter) Warn(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Warn(msg, fields...)
}
func (a *zapLoggerAdapter) Error(msg string, keysAndValues ...interface{}) {
fields := keysAndValuesToZapFields(keysAndValues...)
a.logger.Error(msg, fields...)
}
// keysAndValuesToZapFields converts key-value pairs to zap fields
func keysAndValuesToZapFields(keysAndValues ...interface{}) []zap.Field {
fields := make([]zap.Field, 0, len(keysAndValues)/2)
for i := 0; i+1 < len(keysAndValues); i += 2 {
key, ok := keysAndValues[i].(string)
if !ok {
continue
}
fields = append(fields, zap.Any(key, keysAndValues[i+1]))
}
return fields
}
// BuildMode is set at compile time via -ldflags
// Example: go build -ldflags "-X codeberg.org/mapleopentech/monorepo/native/desktop/maplefile/internal/app.BuildMode=dev"
var BuildMode string
// ProvideMapleFileClient creates the backend API client
func ProvideMapleFileClient(configService config.ConfigService, logger *zap.Logger) (*client.Client, error) {
ctx := context.Background()
mode := os.Getenv("MAPLEFILE_MODE")
logger.Info("Startup: checking mode configuration", zap.String("MAPLEFILE_MODE_env", mode), zap.String("BuildMode_compile_time", BuildMode))
if mode == "" {
if BuildMode != "" {
mode = BuildMode
logger.Info("Startup: using compile-time BuildMode", zap.String("mode", mode))
} else {
mode = "production"
logger.Info("Startup: no mode set, defaulting to production", zap.String("mode", mode))
}
}
var baseURL string
switch mode {
case "production":
baseURL = client.ProductionURL
case "dev", "development":
baseURL = client.LocalURL
default:
cfg, err := configService.GetConfig(ctx)
if err != nil {
return nil, err
}
baseURL = cfg.CloudProviderAddress
}
clientLogger := &zapLoggerAdapter{logger: logger.Named("api-client")}
apiClient := client.New(client.Config{
BaseURL: baseURL,
Logger: clientLogger,
})
logger.Info("MapleFile API client initialized", zap.String("mode", mode), zap.String("base_url", baseURL))
if strings.HasPrefix(baseURL, "http://") {
logger.Warn("SECURITY WARNING: Using unencrypted HTTP connection", zap.String("mode", mode), zap.String("base_url", baseURL), zap.String("recommendation", "This should only be used for local development"))
}
if mode != "production" {
if err := configService.SetCloudProviderAddress(ctx, baseURL); err != nil {
logger.Warn("Failed to update cloud provider address in config", zap.Error(err))
}
}
return apiClient, nil
}