Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,372 @@
// cloud/maplefile-backend/internal/maplefile/service/dashboard/get_dashboard.go
package dashboard
import (
"context"
"encoding/base64"
"math"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/config/constants"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/storagedailyusage"
dom_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/domain/user"
file_service "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/service/file"
uc_collection "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/collection"
uc_filemetadata "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/filemetadata"
uc_storagedailyusage "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/storagedailyusage"
uc_user "codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/internal/usecase/user"
"codeberg.org/mapleopentech/monorepo/cloud/maplefile-backend/pkg/httperror"
"github.com/gocql/gocql"
)
type GetDashboardService interface {
Execute(ctx context.Context) (*GetDashboardResponseDTO, error)
}
type getDashboardServiceImpl struct {
config *config.Configuration
logger *zap.Logger
listRecentFilesService file_service.ListRecentFilesService
userGetByIDUseCase uc_user.UserGetByIDUseCase
countUserFilesUseCase uc_filemetadata.CountUserFilesUseCase
countUserFoldersUseCase uc_collection.CountUserFoldersUseCase
getStorageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase
getCollectionUseCase uc_collection.GetCollectionUseCase
}
func NewGetDashboardService(
config *config.Configuration,
logger *zap.Logger,
listRecentFilesService file_service.ListRecentFilesService,
userGetByIDUseCase uc_user.UserGetByIDUseCase,
countUserFilesUseCase uc_filemetadata.CountUserFilesUseCase,
countUserFoldersUseCase uc_collection.CountUserFoldersUseCase,
getStorageTrendUseCase uc_storagedailyusage.GetStorageDailyUsageTrendUseCase,
getCollectionUseCase uc_collection.GetCollectionUseCase,
) GetDashboardService {
logger = logger.Named("GetDashboardService")
return &getDashboardServiceImpl{
config: config,
logger: logger,
listRecentFilesService: listRecentFilesService,
userGetByIDUseCase: userGetByIDUseCase,
countUserFilesUseCase: countUserFilesUseCase,
countUserFoldersUseCase: countUserFoldersUseCase,
getStorageTrendUseCase: getStorageTrendUseCase,
getCollectionUseCase: getCollectionUseCase,
}
}
func (svc *getDashboardServiceImpl) Execute(ctx context.Context) (*GetDashboardResponseDTO, error) {
//
// STEP 1: Get user ID from context
//
userID, ok := ctx.Value(constants.SessionUserID).(gocql.UUID)
if !ok {
svc.logger.Error("Failed getting user ID from context")
return nil, httperror.NewForInternalServerErrorWithSingleField("message", "Authentication context error")
}
//
// STEP 2: Validation
//
e := make(map[string]string)
if userID.String() == "" {
e["user_id"] = "User ID is required"
}
if len(e) != 0 {
svc.logger.Warn("Failed validating get dashboard",
zap.Any("error", e))
return nil, httperror.NewForBadRequest(&e)
}
//
// STEP 3: Get user information for storage data
//
user, err := svc.userGetByIDUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to get user for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
if user == nil {
svc.logger.Warn("User not found for dashboard",
zap.String("user_id", userID.String()))
return nil, httperror.NewForNotFoundWithSingleField("user_id", "User not found")
}
//
// STEP 4: Get file count
//
fileCountResp, err := svc.countUserFilesUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to count user files for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
//
// STEP 5: Get folder count (folders only, not albums)
//
folderCountResp, err := svc.countUserFoldersUseCase.Execute(ctx, userID)
if err != nil {
svc.logger.Error("Failed to count user folders for dashboard",
zap.String("user_id", userID.String()),
zap.Error(err))
return nil, err
}
// Debug logging for folder count
svc.logger.Debug("Folder count debug info",
zap.String("user_id", userID.String()),
zap.Int("total_folders_returned", folderCountResp.TotalFolders))
//
// STEP 6: Get storage usage trend (last 7 days)
//
trendReq := &uc_storagedailyusage.GetStorageDailyUsageTrendRequest{
UserID: userID,
TrendPeriod: "7days",
}
storageTrend, err := svc.getStorageTrendUseCase.Execute(ctx, trendReq)
if err != nil {
svc.logger.Warn("Failed to get storage trend for dashboard, using empty trend",
zap.String("user_id", userID.String()),
zap.Error(err))
// Don't fail the entire dashboard for trend data
storageTrend = nil
}
//
// STEP 7: Get recent files using the working Recent Files Service
//
var recentFiles []file_service.RecentFileResponseDTO
recentFilesResp, err := svc.listRecentFilesService.Execute(ctx, nil, 5)
if err != nil {
svc.logger.Warn("Failed to get recent files for dashboard, using empty list",
zap.String("user_id", userID.String()),
zap.Error(err))
// Don't fail the entire dashboard for recent files
recentFiles = []file_service.RecentFileResponseDTO{}
} else {
recentFiles = recentFilesResp.Files
}
//
// STEP 8: Fetch collection keys for recent files
// This allows clients to decrypt file metadata without making additional API calls
//
collectionKeys := svc.fetchCollectionKeysForFiles(ctx, recentFiles)
//
// STEP 9: Build dashboard response
//
dashboard := &DashboardDataDTO{
Summary: svc.buildSummary(user, fileCountResp.TotalFiles, folderCountResp.TotalFolders, storageTrend), // Pass storageTrend to calculate actual storage
StorageUsageTrend: svc.buildStorageUsageTrend(storageTrend),
RecentFiles: recentFiles,
CollectionKeys: collectionKeys,
}
response := &GetDashboardResponseDTO{
Dashboard: dashboard,
Success: true,
Message: "Dashboard data retrieved successfully",
}
svc.logger.Info("Dashboard data retrieved successfully",
zap.String("user_id", userID.String()),
zap.Int("total_files", fileCountResp.TotalFiles),
zap.Int("total_folders", folderCountResp.TotalFolders), // CHANGED: Use TotalFolders
zap.Int("recent_files_count", len(recentFiles)))
return response, nil
}
func (svc *getDashboardServiceImpl) buildSummary(user *dom_user.User, totalFiles, totalFolders int, storageTrend *storagedailyusage.StorageUsageTrend) SummaryDTO {
// Calculate storage from the most recent daily usage data
var storageUsedBytes int64 = 0
// Debug logging for storage trend
if storageTrend != nil {
svc.logger.Debug("Storage trend received in buildSummary",
zap.Int("daily_usages_count", len(storageTrend.DailyUsages)),
zap.Int64("total_added", storageTrend.TotalAdded),
zap.Int64("net_change", storageTrend.NetChange))
if len(storageTrend.DailyUsages) > 0 {
// Get the most recent day's total bytes (last element in the sorted array)
mostRecentDay := storageTrend.DailyUsages[len(storageTrend.DailyUsages)-1]
storageUsedBytes = mostRecentDay.TotalBytes
// BUGFIX: Ensure storage never goes negative
// This can happen if deletion events exceed actual storage (edge case with storage tracking)
if storageUsedBytes < 0 {
svc.logger.Warn("Storage used bytes is negative, resetting to 0",
zap.Int64("negative_value", storageUsedBytes),
zap.Time("usage_day", mostRecentDay.UsageDay))
storageUsedBytes = 0
}
svc.logger.Debug("Using storage from most recent day",
zap.Time("usage_day", mostRecentDay.UsageDay),
zap.Int64("total_bytes", mostRecentDay.TotalBytes),
zap.Int64("total_add_bytes", mostRecentDay.TotalAddBytes),
zap.Int64("total_remove_bytes", mostRecentDay.TotalRemoveBytes))
} else {
svc.logger.Debug("No daily usage entries found in storage trend")
}
} else {
svc.logger.Debug("Storage trend is nil")
}
var storageLimitBytes int64 = 10 * 1024 * 1024 * 1024 // 10GB default limit
// Convert storage used to human-readable format
storageUsed := svc.convertBytesToStorageAmount(storageUsedBytes)
storageLimit := svc.convertBytesToStorageAmount(storageLimitBytes)
// Calculate storage percentage with proper rounding
storagePercentage := 0
if storageLimitBytes > 0 {
percentage := (float64(storageUsedBytes) / float64(storageLimitBytes)) * 100
// Use math.Round for proper rounding instead of truncation
storagePercentage = int(math.Round(percentage))
// If there's actual usage but percentage rounds to 0, show at least 1%
if storagePercentage == 0 && storageUsedBytes > 0 {
storagePercentage = 1
}
}
// Debug logging for storage calculation
svc.logger.Debug("Storage calculation debug",
zap.Int64("storage_used_bytes", storageUsedBytes),
zap.Int64("storage_limit_bytes", storageLimitBytes),
zap.Int("calculated_percentage", storagePercentage))
return SummaryDTO{
TotalFiles: totalFiles,
TotalFolders: totalFolders, // Now this will be actual folders only
StorageUsed: storageUsed,
StorageLimit: storageLimit,
StorageUsagePercentage: storagePercentage,
}
}
func (svc *getDashboardServiceImpl) buildStorageUsageTrend(trend *storagedailyusage.StorageUsageTrend) StorageUsageTrendDTO {
if trend == nil || len(trend.DailyUsages) == 0 {
return StorageUsageTrendDTO{
Period: "Last 7 days",
DataPoints: []DataPointDTO{},
}
}
dataPoints := make([]DataPointDTO, len(trend.DailyUsages))
for i, daily := range trend.DailyUsages {
dataPoints[i] = DataPointDTO{
Date: daily.UsageDay.Format("2006-01-02"),
Usage: svc.convertBytesToStorageAmount(daily.TotalBytes),
}
}
return StorageUsageTrendDTO{
Period: "Last 7 days",
DataPoints: dataPoints,
}
}
func (svc *getDashboardServiceImpl) convertBytesToStorageAmount(bytes int64) StorageAmountDTO {
const (
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
)
switch {
case bytes >= TB:
return StorageAmountDTO{
Value: float64(bytes) / TB,
Unit: "TB",
}
case bytes >= GB:
return StorageAmountDTO{
Value: float64(bytes) / GB,
Unit: "GB",
}
case bytes >= MB:
return StorageAmountDTO{
Value: float64(bytes) / MB,
Unit: "MB",
}
case bytes >= KB:
return StorageAmountDTO{
Value: float64(bytes) / KB,
Unit: "KB",
}
default:
return StorageAmountDTO{
Value: float64(bytes),
Unit: "B",
}
}
}
// fetchCollectionKeysForFiles fetches the encrypted collection keys for the collections
// referenced by the recent files. This allows clients to decrypt file metadata without
// making additional API calls for each collection.
func (svc *getDashboardServiceImpl) fetchCollectionKeysForFiles(ctx context.Context, files []file_service.RecentFileResponseDTO) []CollectionKeyDTO {
if len(files) == 0 {
return nil
}
// Collect unique collection IDs from the files
collectionIDSet := make(map[string]gocql.UUID)
for _, f := range files {
collectionIDStr := f.CollectionID.String()
if _, exists := collectionIDSet[collectionIDStr]; !exists {
collectionIDSet[collectionIDStr] = f.CollectionID
}
}
// Fetch each unique collection and extract its encrypted key
collectionKeys := make([]CollectionKeyDTO, 0, len(collectionIDSet))
for collectionIDStr, collectionID := range collectionIDSet {
collection, err := svc.getCollectionUseCase.Execute(ctx, collectionID)
if err != nil {
svc.logger.Warn("Failed to fetch collection for dashboard collection keys",
zap.String("collection_id", collectionIDStr),
zap.Error(err))
continue
}
if collection == nil {
svc.logger.Warn("Collection not found for dashboard collection keys",
zap.String("collection_id", collectionIDStr))
continue
}
// Only include if we have the encrypted collection key
if collection.EncryptedCollectionKey != nil && len(collection.EncryptedCollectionKey.Ciphertext) > 0 {
collectionKeys = append(collectionKeys, CollectionKeyDTO{
CollectionID: collectionIDStr,
EncryptedCollectionKey: base64.StdEncoding.EncodeToString(collection.EncryptedCollectionKey.Ciphertext),
EncryptedCollectionKeyNonce: base64.StdEncoding.EncodeToString(collection.EncryptedCollectionKey.Nonce),
})
}
}
svc.logger.Debug("Fetched collection keys for dashboard",
zap.Int("unique_collections", len(collectionIDSet)),
zap.Int("keys_returned", len(collectionKeys)))
return collectionKeys
}