Initial commit: Open sourcing all of the Maple Open Technologies code.

This commit is contained in:
Bartlomiej Mika 2025-12-02 14:33:08 -05:00
commit 755d54a99d
2010 changed files with 448675 additions and 0 deletions

View file

@ -0,0 +1,33 @@
package cache
import (
"context"
"fmt"
"codeberg.org/mapleopentech/monorepo/cloud/maplepress-backend/config"
"github.com/redis/go-redis/v9"
"go.uber.org/zap"
)
// ProvideRedisClient creates a new Redis client
func ProvideRedisClient(cfg *config.Config, logger *zap.Logger) (*redis.Client, error) {
logger.Info("connecting to Redis",
zap.String("host", cfg.Cache.Host),
zap.Int("port", cfg.Cache.Port))
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%d", cfg.Cache.Host, cfg.Cache.Port),
Password: cfg.Cache.Password,
DB: cfg.Cache.DB,
})
// Test connection
ctx := context.Background()
if err := client.Ping(ctx).Err(); err != nil {
return nil, fmt.Errorf("failed to connect to Redis: %w", err)
}
logger.Info("successfully connected to Redis")
return client, nil
}

View file

@ -0,0 +1,121 @@
// File Path: monorepo/cloud/maplepress-backend/pkg/storage/database/cassandra/cassandra.go
package database
import (
"fmt"
"strings"
"time"
"codeberg.org/mapleopentech/monorepo/cloud/maplepress-backend/config"
"github.com/gocql/gocql"
"go.uber.org/zap"
)
// gocqlLogger wraps zap logger to filter out noisy gocql warnings
type gocqlLogger struct {
logger *zap.Logger
}
// Print implements gocql's Logger interface
func (l *gocqlLogger) Print(v ...interface{}) {
msg := fmt.Sprint(v...)
// Filter out noisy "invalid peer" warnings from Cassandra gossip
// These are harmless and occur due to Docker networking
if strings.Contains(msg, "Found invalid peer") {
return
}
// Log other messages at debug level
l.logger.Debug(msg)
}
// Printf implements gocql's Logger interface
func (l *gocqlLogger) Printf(format string, v ...interface{}) {
msg := fmt.Sprintf(format, v...)
// Filter out noisy "invalid peer" warnings from Cassandra gossip
if strings.Contains(msg, "Found invalid peer") {
return
}
// Log other messages at debug level
l.logger.Debug(msg)
}
// Println implements gocql's Logger interface
func (l *gocqlLogger) Println(v ...interface{}) {
msg := fmt.Sprintln(v...)
// Filter out noisy "invalid peer" warnings from Cassandra gossip
if strings.Contains(msg, "Found invalid peer") {
return
}
// Log other messages at debug level
l.logger.Debug(msg)
}
// ProvideCassandraSession creates a new Cassandra session
func ProvideCassandraSession(cfg *config.Config, logger *zap.Logger) (*gocql.Session, error) {
logger.Info("⏳ Connecting to Cassandra...",
zap.Strings("hosts", cfg.Database.Hosts),
zap.String("keyspace", cfg.Database.Keyspace))
// Create cluster configuration
cluster := gocql.NewCluster(cfg.Database.Hosts...)
cluster.Keyspace = cfg.Database.Keyspace
cluster.Consistency = parseConsistency(cfg.Database.Consistency)
cluster.ProtoVersion = 4
cluster.ConnectTimeout = 10 * time.Second
cluster.Timeout = 10 * time.Second
cluster.NumConns = 2
// Set custom logger to filter out noisy warnings
cluster.Logger = &gocqlLogger{logger: logger.Named("gocql")}
// Retry policy
cluster.RetryPolicy = &gocql.ExponentialBackoffRetryPolicy{
NumRetries: 3,
Min: 1 * time.Second,
Max: 10 * time.Second,
}
// Create session
session, err := cluster.CreateSession()
if err != nil {
return nil, fmt.Errorf("failed to connect to Cassandra: %w", err)
}
logger.Info("✓ Cassandra connected",
zap.String("consistency", cfg.Database.Consistency),
zap.Int("connections", cluster.NumConns))
return session, nil
}
// parseConsistency converts string consistency level to gocql.Consistency
func parseConsistency(consistency string) gocql.Consistency {
switch consistency {
case "ANY":
return gocql.Any
case "ONE":
return gocql.One
case "TWO":
return gocql.Two
case "THREE":
return gocql.Three
case "QUORUM":
return gocql.Quorum
case "ALL":
return gocql.All
case "LOCAL_QUORUM":
return gocql.LocalQuorum
case "EACH_QUORUM":
return gocql.EachQuorum
case "LOCAL_ONE":
return gocql.LocalOne
default:
return gocql.Quorum // Default to QUORUM
}
}

View file

@ -0,0 +1,199 @@
package database
import (
"fmt"
"github.com/gocql/gocql"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/cassandra"
_ "github.com/golang-migrate/migrate/v4/source/file"
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplepress-backend/config"
)
// silentGocqlLogger filters out noisy "invalid peer" warnings from gocql
type silentGocqlLogger struct{}
func (l *silentGocqlLogger) Print(v ...interface{}) {
// Silently discard all gocql logs including "invalid peer" warnings
}
func (l *silentGocqlLogger) Printf(format string, v ...interface{}) {
// Silently discard all gocql logs including "invalid peer" warnings
}
func (l *silentGocqlLogger) Println(v ...interface{}) {
// Silently discard all gocql logs including "invalid peer" warnings
}
// Migrator handles database schema migrations
// This encapsulates all migration logic and makes it testable
type Migrator struct {
config *config.Config
logger *zap.Logger
}
// NewMigrator creates a new migration manager
func NewMigrator(cfg *config.Config, logger *zap.Logger) *Migrator {
if logger == nil {
// Create a no-op logger if none provided (for backward compatibility)
logger = zap.NewNop()
}
return &Migrator{
config: cfg,
logger: logger,
}
}
// Up runs all pending migrations with dirty state recovery
func (m *Migrator) Up() error {
// Ensure keyspace exists before running migrations
m.logger.Debug("Ensuring keyspace exists...")
if err := m.ensureKeyspaceExists(); err != nil {
return fmt.Errorf("failed to ensure keyspace exists: %w", err)
}
m.logger.Debug("Creating migrator...")
migrateInstance, err := m.createMigrate()
if err != nil {
return fmt.Errorf("failed to create migrator: %w", err)
}
defer migrateInstance.Close()
m.logger.Debug("Checking migration version...")
version, dirty, err := migrateInstance.Version()
if err != nil && err != migrate.ErrNilVersion {
return fmt.Errorf("failed to get migration version: %w", err)
}
if dirty {
m.logger.Warn("Database is in dirty state, attempting to force clean state",
zap.Uint("version", uint(version)))
if err := migrateInstance.Force(int(version)); err != nil {
return fmt.Errorf("failed to force clean migration state: %w", err)
}
}
// Run migrations
if err := migrateInstance.Up(); err != nil && err != migrate.ErrNoChange {
return fmt.Errorf("failed to run migrations: %w", err)
}
// Get final version
finalVersion, _, err := migrateInstance.Version()
if err != nil && err != migrate.ErrNilVersion {
m.logger.Warn("Could not get final migration version", zap.Error(err))
} else if err != migrate.ErrNilVersion {
m.logger.Debug("Database migrations completed successfully",
zap.Uint("version", uint(finalVersion)))
} else {
m.logger.Debug("Database migrations completed successfully (no migrations applied)")
}
return nil
}
// Down rolls back the last migration
// Useful for development and rollback scenarios
func (m *Migrator) Down() error {
migrateInstance, err := m.createMigrate()
if err != nil {
return fmt.Errorf("failed to create migrator: %w", err)
}
defer migrateInstance.Close()
if err := migrateInstance.Steps(-1); err != nil {
return fmt.Errorf("failed to rollback migration: %w", err)
}
return nil
}
// Version returns the current migration version
func (m *Migrator) Version() (uint, bool, error) {
migrateInstance, err := m.createMigrate()
if err != nil {
return 0, false, fmt.Errorf("failed to create migrator: %w", err)
}
defer migrateInstance.Close()
return migrateInstance.Version()
}
// ForceVersion forces the migration version (useful for fixing dirty states)
func (m *Migrator) ForceVersion(version int) error {
migrateInstance, err := m.createMigrate()
if err != nil {
return fmt.Errorf("failed to create migrator: %w", err)
}
defer migrateInstance.Close()
if err := migrateInstance.Force(version); err != nil {
return fmt.Errorf("failed to force version %d: %w", version, err)
}
m.logger.Info("Successfully forced migration version", zap.Int("version", version))
return nil
}
// createMigrate creates a migrate instance with proper configuration
func (m *Migrator) createMigrate() (*migrate.Migrate, error) {
// Set global gocql logger to suppress "invalid peer" warnings
// This affects the internal gocql connections used by golang-migrate
gocql.Logger = &silentGocqlLogger{}
// Build Cassandra connection string
// Format: cassandra://host:port/keyspace?consistency=level
databaseURL := fmt.Sprintf("cassandra://%s/%s?consistency=%s",
m.config.Database.Hosts[0], // Use first host for migrations
m.config.Database.Keyspace,
m.config.Database.Consistency,
)
// Create migrate instance
migrateInstance, err := migrate.New(m.config.Database.MigrationsPath, databaseURL)
if err != nil {
return nil, fmt.Errorf("failed to initialize migrate: %w", err)
}
return migrateInstance, nil
}
// ensureKeyspaceExists creates the keyspace if it doesn't exist
// This must be done before running migrations since golang-migrate requires the keyspace to exist
func (m *Migrator) ensureKeyspaceExists() error {
// Create cluster configuration without keyspace
cluster := gocql.NewCluster(m.config.Database.Hosts...)
cluster.Port = 9042
cluster.Consistency = gocql.Quorum
cluster.ProtoVersion = 4
// Suppress noisy "invalid peer" warnings from gocql
// Use a minimal logger that discards these harmless Docker networking warnings
cluster.Logger = &silentGocqlLogger{}
// Create session to system keyspace
session, err := cluster.CreateSession()
if err != nil {
return fmt.Errorf("failed to connect to Cassandra: %w", err)
}
defer session.Close()
// Create keyspace if it doesn't exist
replicationFactor := m.config.Database.Replication
createKeyspaceQuery := fmt.Sprintf(`
CREATE KEYSPACE IF NOT EXISTS %s
WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': %d}
AND durable_writes = true
`, m.config.Database.Keyspace, replicationFactor)
m.logger.Debug("Creating keyspace if it doesn't exist",
zap.String("keyspace", m.config.Database.Keyspace))
if err := session.Query(createKeyspaceQuery).Exec(); err != nil {
return fmt.Errorf("failed to create keyspace: %w", err)
}
m.logger.Debug("Keyspace is ready", zap.String("keyspace", m.config.Database.Keyspace))
return nil
}

View file

@ -0,0 +1,54 @@
package s3
type S3ObjectStorageConfigurationProvider interface {
GetAccessKey() string
GetSecretKey() string
GetEndpoint() string
GetRegion() string
GetBucketName() string
GetIsPublicBucket() bool
}
type s3ObjectStorageConfigurationProviderImpl struct {
accessKey string
secretKey string
endpoint string
region string
bucketName string
isPublicBucket bool
}
func NewS3ObjectStorageConfigurationProvider(accessKey, secretKey, endpoint, region, bucketName string, isPublicBucket bool) S3ObjectStorageConfigurationProvider {
return &s3ObjectStorageConfigurationProviderImpl{
accessKey: accessKey,
secretKey: secretKey,
endpoint: endpoint,
region: region,
bucketName: bucketName,
isPublicBucket: isPublicBucket,
}
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetAccessKey() string {
return s.accessKey
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetSecretKey() string {
return s.secretKey
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetEndpoint() string {
return s.endpoint
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetRegion() string {
return s.region
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetBucketName() string {
return s.bucketName
}
func (s *s3ObjectStorageConfigurationProviderImpl) GetIsPublicBucket() bool {
return s.isPublicBucket
}

View file

@ -0,0 +1,23 @@
package s3
import (
"go.uber.org/zap"
"codeberg.org/mapleopentech/monorepo/cloud/maplepress-backend/config"
)
// ProvideS3ObjectStorage provides an S3 object storage instance
func ProvideS3ObjectStorage(cfg *config.Config, logger *zap.Logger) S3ObjectStorage {
// Create configuration provider
configProvider := NewS3ObjectStorageConfigurationProvider(
cfg.AWS.AccessKey,
cfg.AWS.SecretKey,
cfg.AWS.Endpoint,
cfg.AWS.Region,
cfg.AWS.BucketName,
false, // Default to private bucket
)
// Return new S3 storage instance
return NewObjectStorage(configProvider, logger)
}

View file

@ -0,0 +1,508 @@
// monorepo/cloud/maplefileapps-backend/pkg/storage/object/s3/s3.go
package s3
import (
"bytes"
"context"
"errors"
"io"
"mime/multipart"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"go.uber.org/zap"
)
// ACL constants for public and private objects
const (
ACLPrivate = "private"
ACLPublicRead = "public-read"
)
type S3ObjectStorage interface {
UploadContent(ctx context.Context, objectKey string, content []byte) error
UploadContentWithVisibility(ctx context.Context, objectKey string, content []byte, isPublic bool) error
UploadContentFromMulipart(ctx context.Context, objectKey string, file multipart.File) error
UploadContentFromMulipartWithVisibility(ctx context.Context, objectKey string, file multipart.File, isPublic bool) error
BucketExists(ctx context.Context, bucketName string) (bool, error)
DeleteByKeys(ctx context.Context, key []string) error
Cut(ctx context.Context, sourceObjectKey string, destinationObjectKey string) error
CutWithVisibility(ctx context.Context, sourceObjectKey string, destinationObjectKey string, isPublic bool) error
Copy(ctx context.Context, sourceObjectKey string, destinationObjectKey string) error
CopyWithVisibility(ctx context.Context, sourceObjectKey string, destinationObjectKey string, isPublic bool) error
GetBinaryData(ctx context.Context, objectKey string) (io.ReadCloser, error)
DownloadToLocalfile(ctx context.Context, objectKey string, filePath string) (string, error)
ListAllObjects(ctx context.Context) (*s3.ListObjectsOutput, error)
FindMatchingObjectKey(s3Objects *s3.ListObjectsOutput, partialKey string) string
IsPublicBucket() bool
// GeneratePresignedUploadURL creates a presigned URL for uploading objects
GeneratePresignedUploadURL(ctx context.Context, key string, duration time.Duration) (string, error)
GetDownloadablePresignedURL(ctx context.Context, key string, duration time.Duration) (string, error)
ObjectExists(ctx context.Context, key string) (bool, error)
GetObjectSize(ctx context.Context, key string) (int64, error)
}
type s3ObjectStorage struct {
S3Client *s3.Client
PresignClient *s3.PresignClient
Logger *zap.Logger
BucketName string
IsPublic bool
}
// NewObjectStorage connects to a specific S3 bucket instance and returns a connected
// instance structure.
func NewObjectStorage(s3Config S3ObjectStorageConfigurationProvider, logger *zap.Logger) S3ObjectStorage {
logger = logger.Named("s3-object-storage")
// DEVELOPERS NOTE:
// How can I use the AWS SDK v2 for Go with DigitalOcean Spaces? via https://stackoverflow.com/a/74284205
logger.Info("⏳ Connecting to S3-compatible storage...",
zap.String("endpoint", s3Config.GetEndpoint()),
zap.String("bucket", s3Config.GetBucketName()),
zap.String("region", s3Config.GetRegion()))
// STEP 1: initialize the custom `endpoint` we will connect to.
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...any) (aws.Endpoint, error) {
return aws.Endpoint{
URL: s3Config.GetEndpoint(),
}, nil
})
// STEP 2: Configure.
sdkConfig, err := config.LoadDefaultConfig(
context.TODO(), config.WithRegion(s3Config.GetRegion()),
config.WithEndpointResolverWithOptions(customResolver),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(s3Config.GetAccessKey(), s3Config.GetSecretKey(), "")),
)
if err != nil {
logger.Fatal("S3ObjectStorage failed loading default config", zap.Error(err)) // We need to crash the program at start to satisfy google wire requirement of having no errors.
}
// STEP 3\: Load up s3 instance.
s3Client := s3.NewFromConfig(sdkConfig)
// Create our storage handler.
s3Storage := &s3ObjectStorage{
S3Client: s3Client,
PresignClient: s3.NewPresignClient(s3Client),
Logger: logger,
BucketName: s3Config.GetBucketName(),
IsPublic: s3Config.GetIsPublicBucket(),
}
logger.Debug("Verifying bucket exists...")
// STEP 4: Connect to the s3 bucket instance and confirm that bucket exists.
doesExist, err := s3Storage.BucketExists(context.TODO(), s3Config.GetBucketName())
if err != nil {
logger.Fatal("S3ObjectStorage failed checking if bucket exists",
zap.String("bucket", s3Config.GetBucketName()),
zap.Error(err)) // We need to crash the program at start to satisfy google wire requirement of having no errors.
}
if !doesExist {
logger.Fatal("S3ObjectStorage failed - bucket does not exist",
zap.String("bucket", s3Config.GetBucketName())) // We need to crash the program at start to satisfy google wire requirement of having no errors.
}
logger.Info("✓ S3-compatible storage connected",
zap.String("bucket", s3Config.GetBucketName()),
zap.Bool("public", s3Config.GetIsPublicBucket()))
// Return our s3 storage handler.
return s3Storage
}
// IsPublicBucket returns whether the bucket is configured as public by default
func (s *s3ObjectStorage) IsPublicBucket() bool {
return s.IsPublic
}
// UploadContent uploads content using the default bucket visibility setting
func (s *s3ObjectStorage) UploadContent(ctx context.Context, objectKey string, content []byte) error {
return s.UploadContentWithVisibility(ctx, objectKey, content, s.IsPublic)
}
// UploadContentWithVisibility uploads content with specified visibility (public or private)
func (s *s3ObjectStorage) UploadContentWithVisibility(ctx context.Context, objectKey string, content []byte, isPublic bool) error {
acl := ACLPrivate
if isPublic {
acl = ACLPublicRead
}
s.Logger.Debug("Uploading content with visibility",
zap.String("objectKey", objectKey),
zap.Bool("isPublic", isPublic),
zap.String("acl", acl))
_, err := s.S3Client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader(content),
ACL: types.ObjectCannedACL(acl),
})
if err != nil {
s.Logger.Error("Failed to upload content",
zap.String("objectKey", objectKey),
zap.Bool("isPublic", isPublic),
zap.Any("error", err))
return err
}
return nil
}
// UploadContentFromMulipart uploads file using the default bucket visibility setting
func (s *s3ObjectStorage) UploadContentFromMulipart(ctx context.Context, objectKey string, file multipart.File) error {
return s.UploadContentFromMulipartWithVisibility(ctx, objectKey, file, s.IsPublic)
}
// UploadContentFromMulipartWithVisibility uploads a multipart file with specified visibility
func (s *s3ObjectStorage) UploadContentFromMulipartWithVisibility(ctx context.Context, objectKey string, file multipart.File, isPublic bool) error {
acl := ACLPrivate
if isPublic {
acl = ACLPublicRead
}
s.Logger.Debug("Uploading multipart file with visibility",
zap.String("objectKey", objectKey),
zap.Bool("isPublic", isPublic),
zap.String("acl", acl))
// Create the S3 upload input parameters
params := &s3.PutObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objectKey),
Body: file,
ACL: types.ObjectCannedACL(acl),
}
// Perform the file upload to S3
_, err := s.S3Client.PutObject(ctx, params)
if err != nil {
s.Logger.Error("Failed to upload multipart file",
zap.String("objectKey", objectKey),
zap.Bool("isPublic", isPublic),
zap.Any("error", err))
return err
}
return nil
}
func (s *s3ObjectStorage) BucketExists(ctx context.Context, bucketName string) (bool, error) {
// Note: https://docs.aws.amazon.com/code-library/latest/ug/go_2_s3_code_examples.html#actions
_, err := s.S3Client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucketName),
})
exists := true
if err != nil {
var apiError smithy.APIError
if errors.As(err, &apiError) {
switch apiError.(type) {
case *types.NotFound:
s.Logger.Debug("Bucket is available", zap.String("bucket", bucketName))
exists = false
err = nil
default:
s.Logger.Error("Either you don't have access to bucket or another error occurred",
zap.String("bucket", bucketName),
zap.Error(err))
}
}
}
return exists, err
}
func (s *s3ObjectStorage) GetDownloadablePresignedURL(ctx context.Context, key string, duration time.Duration) (string, error) {
// DEVELOPERS NOTE:
// AWS S3 Bucket — presigned URL APIs with Go (2022) via https://ronen-niv.medium.com/aws-s3-handling-presigned-urls-2718ab247d57
presignedUrl, err := s.PresignClient.PresignGetObject(context.Background(),
&s3.GetObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(key),
ResponseContentDisposition: aws.String("attachment"), // This field allows the file to download it directly from your browser
},
s3.WithPresignExpires(duration))
if err != nil {
return "", err
}
return presignedUrl.URL, nil
}
func (s *s3ObjectStorage) DeleteByKeys(ctx context.Context, objectKeys []string) error {
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
defer cancel()
var objectIds []types.ObjectIdentifier
for _, key := range objectKeys {
objectIds = append(objectIds, types.ObjectIdentifier{Key: aws.String(key)})
}
_, err := s.S3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(s.BucketName),
Delete: &types.Delete{Objects: objectIds},
})
if err != nil {
s.Logger.Error("Couldn't delete objects from bucket",
zap.String("bucket", s.BucketName),
zap.Error(err))
}
return err
}
// Cut moves a file using the default bucket visibility setting
func (s *s3ObjectStorage) Cut(ctx context.Context, sourceObjectKey string, destinationObjectKey string) error {
return s.CutWithVisibility(ctx, sourceObjectKey, destinationObjectKey, s.IsPublic)
}
// CutWithVisibility moves a file with specified visibility
func (s *s3ObjectStorage) CutWithVisibility(ctx context.Context, sourceObjectKey string, destinationObjectKey string, isPublic bool) error {
ctx, cancel := context.WithTimeout(ctx, 60*time.Second) // Increase timout so it runs longer then usual to handle this unique case.
defer cancel()
// First copy the object with the desired visibility
if err := s.CopyWithVisibility(ctx, sourceObjectKey, destinationObjectKey, isPublic); err != nil {
return err
}
// Delete the original object
_, deleteErr := s.S3Client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(sourceObjectKey),
})
if deleteErr != nil {
s.Logger.Error("Failed to delete original object:", zap.Any("deleteErr", deleteErr))
return deleteErr
}
s.Logger.Debug("Original object deleted.")
return nil
}
// Copy copies a file using the default bucket visibility setting
func (s *s3ObjectStorage) Copy(ctx context.Context, sourceObjectKey string, destinationObjectKey string) error {
return s.CopyWithVisibility(ctx, sourceObjectKey, destinationObjectKey, s.IsPublic)
}
// CopyWithVisibility copies a file with specified visibility
func (s *s3ObjectStorage) CopyWithVisibility(ctx context.Context, sourceObjectKey string, destinationObjectKey string, isPublic bool) error {
ctx, cancel := context.WithTimeout(ctx, 60*time.Second) // Increase timout so it runs longer then usual to handle this unique case.
defer cancel()
acl := ACLPrivate
if isPublic {
acl = ACLPublicRead
}
s.Logger.Debug("Copying object with visibility",
zap.String("sourceKey", sourceObjectKey),
zap.String("destinationKey", destinationObjectKey),
zap.Bool("isPublic", isPublic),
zap.String("acl", acl))
_, copyErr := s.S3Client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(s.BucketName),
CopySource: aws.String(s.BucketName + "/" + sourceObjectKey),
Key: aws.String(destinationObjectKey),
ACL: types.ObjectCannedACL(acl),
})
if copyErr != nil {
s.Logger.Error("Failed to copy object:",
zap.String("sourceKey", sourceObjectKey),
zap.String("destinationKey", destinationObjectKey),
zap.Bool("isPublic", isPublic),
zap.Any("copyErr", copyErr))
return copyErr
}
s.Logger.Debug("Object copied successfully.")
return nil
}
// GetBinaryData function will return the binary data for the particular key.
func (s *s3ObjectStorage) GetBinaryData(ctx context.Context, objectKey string) (io.ReadCloser, error) {
input := &s3.GetObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objectKey),
}
s3object, err := s.S3Client.GetObject(ctx, input)
if err != nil {
return nil, err
}
return s3object.Body, nil
}
func (s *s3ObjectStorage) DownloadToLocalfile(ctx context.Context, objectKey string, filePath string) (string, error) {
responseBin, err := s.GetBinaryData(ctx, objectKey)
if err != nil {
return filePath, err
}
out, err := os.Create(filePath)
if err != nil {
return filePath, err
}
defer out.Close()
_, err = io.Copy(out, responseBin)
if err != nil {
return "", err
}
return filePath, err
}
func (s *s3ObjectStorage) ListAllObjects(ctx context.Context) (*s3.ListObjectsOutput, error) {
input := &s3.ListObjectsInput{
Bucket: aws.String(s.BucketName),
}
objects, err := s.S3Client.ListObjects(ctx, input)
if err != nil {
return nil, err
}
return objects, nil
}
// Function will iterate over all the s3 objects to match the partial key with
// the actual key found in the S3 bucket.
func (s *s3ObjectStorage) FindMatchingObjectKey(s3Objects *s3.ListObjectsOutput, partialKey string) string {
for _, obj := range s3Objects.Contents {
match := strings.Contains(*obj.Key, partialKey)
// If a match happens then it means we have found the ACTUAL KEY in the
// s3 objects inside the bucket.
if match == true {
return *obj.Key
}
}
return ""
}
// GeneratePresignedUploadURL creates a presigned URL for uploading objects to S3
func (s *s3ObjectStorage) GeneratePresignedUploadURL(ctx context.Context, key string, duration time.Duration) (string, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
// Create PutObjectInput without ACL to avoid requiring x-amz-acl header
putObjectInput := &s3.PutObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(key),
// Removed ACL field - files inherit bucket's default privacy settings.
}
presignedUrl, err := s.PresignClient.PresignPutObject(ctx, putObjectInput, s3.WithPresignExpires(duration))
if err != nil {
s.Logger.Error("Failed to generate presigned upload URL",
zap.String("key", key),
zap.Duration("duration", duration),
zap.Error(err))
return "", err
}
s.Logger.Debug("Generated presigned upload URL",
zap.String("key", key),
zap.Duration("duration", duration))
return presignedUrl.URL, nil
}
// ObjectExists checks if an object exists at the given key using HeadObject
func (s *s3ObjectStorage) ObjectExists(ctx context.Context, key string) (bool, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err := s.S3Client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(key),
})
if err != nil {
var apiError smithy.APIError
if errors.As(err, &apiError) {
switch apiError.(type) {
case *types.NotFound:
// Object doesn't exist
s.Logger.Debug("Object does not exist",
zap.String("key", key))
return false, nil
case *types.NoSuchKey:
// Object doesn't exist
s.Logger.Debug("Object does not exist (NoSuchKey)",
zap.String("key", key))
return false, nil
default:
// Some other error occurred
s.Logger.Error("Error checking object existence",
zap.String("key", key),
zap.Error(err))
return false, err
}
}
// Non-API error
s.Logger.Error("Error checking object existence",
zap.String("key", key),
zap.Error(err))
return false, err
}
s.Logger.Debug("Object exists",
zap.String("key", key))
return true, nil
}
// GetObjectSize returns the size of an object at the given key using HeadObject
func (s *s3ObjectStorage) GetObjectSize(ctx context.Context, key string) (int64, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
result, err := s.S3Client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(key),
})
if err != nil {
var apiError smithy.APIError
if errors.As(err, &apiError) {
switch apiError.(type) {
case *types.NotFound:
s.Logger.Debug("Object not found when getting size",
zap.String("key", key))
return 0, errors.New("object not found")
case *types.NoSuchKey:
s.Logger.Debug("Object not found when getting size (NoSuchKey)",
zap.String("key", key))
return 0, errors.New("object not found")
default:
s.Logger.Error("Error getting object size",
zap.String("key", key),
zap.Error(err))
return 0, err
}
}
s.Logger.Error("Error getting object size",
zap.String("key", key),
zap.Error(err))
return 0, err
}
// Let's use aws.ToInt64 which handles both pointer and non-pointer cases
size := aws.ToInt64(result.ContentLength)
s.Logger.Debug("Retrieved object size",
zap.String("key", key),
zap.Int64("size", size))
return size, nil
}