gitignore fix + missing storage management
All checks were successful
ci/woodpecker/push/bff Pipeline was successful
ci/woodpecker/push/billing_fees Pipeline was successful
ci/woodpecker/push/db Pipeline was successful
ci/woodpecker/push/chain_gateway Pipeline was successful
ci/woodpecker/push/fx/1 Pipeline was successful
ci/woodpecker/push/fx/2 Pipeline was successful
ci/woodpecker/push/nats Pipeline was successful
ci/woodpecker/push/ledger Pipeline was successful
ci/woodpecker/push/notification Pipeline was successful
ci/woodpecker/push/payments_orchestrator Pipeline was successful
All checks were successful
ci/woodpecker/push/bff Pipeline was successful
ci/woodpecker/push/billing_fees Pipeline was successful
ci/woodpecker/push/db Pipeline was successful
ci/woodpecker/push/chain_gateway Pipeline was successful
ci/woodpecker/push/fx/1 Pipeline was successful
ci/woodpecker/push/fx/2 Pipeline was successful
ci/woodpecker/push/nats Pipeline was successful
ci/woodpecker/push/ledger Pipeline was successful
ci/woodpecker/push/notification Pipeline was successful
ci/woodpecker/push/payments_orchestrator Pipeline was successful
This commit is contained in:
2
api/billing/fees/.gitignore
vendored
2
api/billing/fees/.gitignore
vendored
@@ -1,3 +1,3 @@
|
|||||||
internal/generated
|
internal/generated
|
||||||
.gocache
|
.gocache
|
||||||
app
|
/app
|
||||||
|
|||||||
6
api/server/.gitignore
vendored
6
api/server/.gitignore
vendored
@@ -1,3 +1,3 @@
|
|||||||
storage
|
/app
|
||||||
app
|
/server
|
||||||
server
|
/storage
|
||||||
|
|||||||
136
api/server/internal/server/fileserviceimp/storage/awss3.go
Normal file
136
api/server/internal/server/fileserviceimp/storage/awss3.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/config"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||||
|
"github.com/tech/sendico/pkg/api/http/response"
|
||||||
|
"github.com/tech/sendico/pkg/mlogger"
|
||||||
|
"github.com/tech/sendico/pkg/mservice"
|
||||||
|
storageconfig "github.com/tech/sendico/server/internal/server/fileserviceimp/storage/config"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AWSS3Storage struct {
|
||||||
|
logger mlogger.Logger
|
||||||
|
s3Client *s3.Client
|
||||||
|
bucketName string
|
||||||
|
directory string
|
||||||
|
service mservice.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *AWSS3Storage) Delete(ctx context.Context, objID string) error {
|
||||||
|
fullPath := filepath.Join(storage.directory, objID)
|
||||||
|
_, err := storage.s3Client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucketName),
|
||||||
|
Key: aws.String(fullPath),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Failed to delete file from AWS S3", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for object to be deleted
|
||||||
|
waiter := s3.NewObjectNotExistsWaiter(storage.s3Client)
|
||||||
|
err = waiter.Wait(ctx, &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucketName),
|
||||||
|
Key: aws.String(fullPath),
|
||||||
|
}, 30) // 30 second timeout
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Error occurred while waiting for S3 file deletion", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *AWSS3Storage) s3URL(fullPath string) string {
|
||||||
|
return fmt.Sprintf("https://%s.s3.amazonaws.com/%s", storage.bucketName, fullPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *AWSS3Storage) Save(ctx context.Context, file io.Reader, objID string) (string, error) {
|
||||||
|
fullPath := filepath.Join(storage.directory, objID)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
_, err := io.Copy(buf, file)
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Failed to read file content", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.s3Client.PutObject(ctx, &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucketName),
|
||||||
|
Key: aws.String(fullPath),
|
||||||
|
Body: bytes.NewReader(buf.Bytes()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Failed to upload file to S3", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
s3URL := storage.s3URL(fullPath)
|
||||||
|
storage.logger.Info("File upload complete", zap.String("obj_ref", objID), zap.String("s3_url", s3URL))
|
||||||
|
return s3URL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *AWSS3Storage) Get(ctx context.Context, objID string) http.HandlerFunc {
|
||||||
|
storage.logger.Warn("Indirect access to the object should be avoided", zap.String("obj_ref", objID))
|
||||||
|
fullPath := filepath.Join(storage.directory, objID)
|
||||||
|
_, err := storage.s3Client.GetObject(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucketName),
|
||||||
|
Key: aws.String(fullPath),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Failed to get file from S3", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return response.NotFound(storage.logger, storage.service, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
res := func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Redirect(w, r, storage.s3URL(fullPath), http.StatusFound)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAWSS3Storage(logger mlogger.Logger, service mservice.Type, directory string, cfg storageconfig.AWSS3SConfig) (*AWSS3Storage, error) {
|
||||||
|
region := os.Getenv(cfg.RegionEnv)
|
||||||
|
accessKeyID := os.Getenv(cfg.AccessKeyIDEnv)
|
||||||
|
secretAccessKey := os.Getenv(cfg.SecretAccessKeyEnv)
|
||||||
|
bucketName := os.Getenv(cfg.BucketNameEnv)
|
||||||
|
|
||||||
|
// Create AWS config with static credentials
|
||||||
|
awsConfig, err := config.LoadDefaultConfig(context.Background(),
|
||||||
|
config.WithRegion(region),
|
||||||
|
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||||
|
accessKeyID,
|
||||||
|
secretAccessKey,
|
||||||
|
"",
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Failed to create AWS config", zap.Error(err), zap.String("bucket", bucketName),
|
||||||
|
zap.String("access_key_id", accessKeyID), zap.String("region", region))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create S3 client
|
||||||
|
s3Client := s3.NewFromConfig(awsConfig)
|
||||||
|
|
||||||
|
res := &AWSS3Storage{
|
||||||
|
logger: logger.Named("aws_s3").Named(directory),
|
||||||
|
s3Client: s3Client,
|
||||||
|
bucketName: bucketName,
|
||||||
|
directory: directory,
|
||||||
|
service: service,
|
||||||
|
}
|
||||||
|
res.logger.Info("Storage installed", zap.String("bucket", bucketName), zap.String("region", region),
|
||||||
|
zap.String("access_key_id", accessKeyID))
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
type AWSS3SConfig struct {
|
||||||
|
AccessKeyIDEnv string `mapstructure:"access_key_id_env" yaml:"access_key_id_env"`
|
||||||
|
SecretAccessKeyEnv string `mapstructure:"secret_access_key_env" yaml:"secret_access_key_env"`
|
||||||
|
RegionEnv string `mapstructure:"region_env" yaml:"region_env"`
|
||||||
|
BucketNameEnv string `mapstructure:"bucket_name_env" yaml:"bucket_name_env"`
|
||||||
|
}
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
type LocalFSSConfig struct {
|
||||||
|
RootPath string `mapstructure:"root_path" yaml:"root_path"`
|
||||||
|
}
|
||||||
29
api/server/internal/server/fileserviceimp/storage/factory.go
Normal file
29
api/server/internal/server/fileserviceimp/storage/factory.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
|
"github.com/tech/sendico/pkg/merrors"
|
||||||
|
"github.com/tech/sendico/pkg/mlogger"
|
||||||
|
"github.com/tech/sendico/pkg/mservice"
|
||||||
|
"github.com/tech/sendico/server/interface/api"
|
||||||
|
fsc "github.com/tech/sendico/server/interface/services/fileservice/config"
|
||||||
|
"github.com/tech/sendico/server/internal/server/fileserviceimp/storage/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Create(logger mlogger.Logger, a api.API, service mservice.Type, directory, subDir string) (FileManager, error) {
|
||||||
|
if a.Config().Storage.Driver == fsc.LocalFS {
|
||||||
|
var conf config.LocalFSSConfig
|
||||||
|
if err := mapstructure.Decode(a.Config().Storage.Settings, &conf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return CreateLocalFileStorage(logger, service, directory, subDir, a.DomainProvider(), conf)
|
||||||
|
}
|
||||||
|
if a.Config().Storage.Driver == fsc.AwsS3 {
|
||||||
|
var conf config.AWSS3SConfig
|
||||||
|
if err := mapstructure.Decode(a.Config().Storage.Settings, &conf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return CreateAWSS3Storage(logger, service, directory, conf)
|
||||||
|
}
|
||||||
|
return nil, merrors.Internal("Unknown storage driver: " + string(a.Config().Storage.Driver))
|
||||||
|
}
|
||||||
147
api/server/internal/server/fileserviceimp/storage/localfs.go
Normal file
147
api/server/internal/server/fileserviceimp/storage/localfs.go
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/tech/sendico/pkg/api/http/response"
|
||||||
|
"github.com/tech/sendico/pkg/domainprovider"
|
||||||
|
"github.com/tech/sendico/pkg/merrors"
|
||||||
|
"github.com/tech/sendico/pkg/mlogger"
|
||||||
|
"github.com/tech/sendico/pkg/mservice"
|
||||||
|
"github.com/tech/sendico/pkg/mutil/fr"
|
||||||
|
"github.com/tech/sendico/server/internal/server/fileserviceimp/storage/config"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LocalStorage struct {
|
||||||
|
logger mlogger.Logger
|
||||||
|
storageDir string
|
||||||
|
subDir string
|
||||||
|
directory string
|
||||||
|
dp domainprovider.DomainProvider
|
||||||
|
service mservice.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *LocalStorage) Delete(ctx context.Context, objID string) error {
|
||||||
|
// Check if context is cancelled
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := filepath.Join(storage.storageDir, objID)
|
||||||
|
if err := os.Remove(filePath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
storage.logger.Debug("File not found", zap.String("obj_ref", objID))
|
||||||
|
return merrors.NoData("file_not_found")
|
||||||
|
}
|
||||||
|
storage.logger.Warn("Error occurred while accesing file", zap.Error(err), zap.String("storage", storage.storageDir), zap.String("obj_ref", objID))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *LocalStorage) Save(ctx context.Context, file io.Reader, objID string) (string, error) {
|
||||||
|
// Check if context is cancelled
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return "", ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := filepath.Join(storage.storageDir, objID)
|
||||||
|
dst, err := os.Create(filePath)
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Error occurred while creating file", zap.Error(err), zap.String("storage", storage.storageDir), zap.String("obj_ref", objID))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer fr.CloseFile(storage.logger, dst)
|
||||||
|
|
||||||
|
// Use a goroutine to copy the file and monitor context cancellation
|
||||||
|
errCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(dst, file)
|
||||||
|
errCh <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for either completion or context cancellation
|
||||||
|
select {
|
||||||
|
case err := <-errCh:
|
||||||
|
if err != nil {
|
||||||
|
storage.logger.Warn("Error occurred while saving file", zap.Error(err), zap.String("obj_ref", objID))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context was cancelled, clean up the partial file
|
||||||
|
os.Remove(filePath)
|
||||||
|
return "", ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.dp.GetAPILink(storage.directory, storage.subDir, objID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *LocalStorage) Get(ctx context.Context, objRef string) http.HandlerFunc {
|
||||||
|
// Check if context is cancelled
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return response.Internal(storage.logger, storage.service, ctx.Err())
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := filepath.Join(storage.storageDir, objRef)
|
||||||
|
if _, err := os.Stat(filePath); err != nil {
|
||||||
|
storage.logger.Warn("Failed to access file", zap.Error(err), zap.String("storage", storage.storageDir), zap.String("obj_ref", objRef))
|
||||||
|
return response.Internal(storage.logger, storage.service, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Check if the request context is cancelled
|
||||||
|
select {
|
||||||
|
case <-r.Context().Done():
|
||||||
|
storage.logger.Warn("Request canceleed", zap.Error(r.Context().Err()), zap.String("obj_ref", objRef))
|
||||||
|
http.Error(w, "Request cancelled", http.StatusRequestTimeout)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
http.ServeFile(w, r, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureDir(dirName string) error {
|
||||||
|
info, err := os.Stat(dirName)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return os.MkdirAll(dirName, 0o755)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: dirName, Err: os.ErrExist}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateLocalFileStorage(logger mlogger.Logger, service mservice.Type, directory, subDir string, dp domainprovider.DomainProvider, cfg config.LocalFSSConfig) (*LocalStorage, error) {
|
||||||
|
dir := filepath.Join(cfg.RootPath, directory)
|
||||||
|
if err := ensureDir(dir); err != nil {
|
||||||
|
logger.Warn("Failed to check directory availability", zap.Error(err), zap.String("dir", dir))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res := &LocalStorage{
|
||||||
|
logger: logger.Named("lfs").Named(directory),
|
||||||
|
storageDir: dir,
|
||||||
|
directory: directory,
|
||||||
|
subDir: subDir,
|
||||||
|
dp: dp,
|
||||||
|
service: service,
|
||||||
|
}
|
||||||
|
res.logger.Info("Storage installed", zap.String("root_path", cfg.RootPath), zap.String("directory", directory))
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,544 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/tech/sendico/pkg/mservice"
|
||||||
|
"github.com/tech/sendico/server/internal/server/fileserviceimp/storage/config"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mock domain provider for testing
|
||||||
|
type mockDomainProvider struct{}
|
||||||
|
|
||||||
|
func (m *mockDomainProvider) GetAPILink(linkElem ...string) (string, error) {
|
||||||
|
if len(linkElem) == 0 {
|
||||||
|
return "", fmt.Errorf("no link elements provided")
|
||||||
|
}
|
||||||
|
return "/api/v1/files/" + linkElem[len(linkElem)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDomainProvider) GetFullLink(linkElem ...string) (string, error) {
|
||||||
|
if len(linkElem) == 0 {
|
||||||
|
return "", fmt.Errorf("no link elements provided")
|
||||||
|
}
|
||||||
|
return "https://test.local/api/v1/files/" + linkElem[len(linkElem)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupTestStorage(t *testing.T) (*LocalStorage, string, func()) {
|
||||||
|
// Create temporary directory for testing
|
||||||
|
tempDir, err := os.MkdirTemp("", "storage_test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create logger
|
||||||
|
logger := zap.NewNop()
|
||||||
|
|
||||||
|
// Create storage
|
||||||
|
storage := &LocalStorage{
|
||||||
|
logger: logger.Named("lfs").Named("test"),
|
||||||
|
storageDir: tempDir,
|
||||||
|
subDir: "test",
|
||||||
|
directory: "test",
|
||||||
|
dp: &mockDomainProvider{},
|
||||||
|
service: mservice.Storage,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return cleanup function
|
||||||
|
cleanup := func() {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage, tempDir, cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupBenchmarkStorage(b *testing.B) (*LocalStorage, string, func()) {
|
||||||
|
// Create temporary directory for testing
|
||||||
|
tempDir, err := os.MkdirTemp("", "storage_bench")
|
||||||
|
require.NoError(b, err)
|
||||||
|
|
||||||
|
// Create logger
|
||||||
|
logger := zap.NewNop()
|
||||||
|
|
||||||
|
// Create storage
|
||||||
|
storage := &LocalStorage{
|
||||||
|
logger: logger.Named("lfs").Named("test"),
|
||||||
|
storageDir: tempDir,
|
||||||
|
subDir: "test",
|
||||||
|
directory: "test",
|
||||||
|
dp: &mockDomainProvider{},
|
||||||
|
service: mservice.Storage,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return cleanup function
|
||||||
|
cleanup := func() {
|
||||||
|
os.RemoveAll(tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage, tempDir, cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Save(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
content string
|
||||||
|
objID string
|
||||||
|
wantURL string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "save simple file",
|
||||||
|
content: "test content",
|
||||||
|
objID: "test.txt",
|
||||||
|
wantURL: "/api/v1/files/test.txt",
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "save with special characters",
|
||||||
|
content: "special content",
|
||||||
|
objID: "test-file_123.txt",
|
||||||
|
wantURL: "/api/v1/files/test-file_123.txt",
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "save empty file",
|
||||||
|
content: "",
|
||||||
|
objID: "empty.txt",
|
||||||
|
wantURL: "/api/v1/files/empty.txt",
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
reader := strings.NewReader(tt.content)
|
||||||
|
|
||||||
|
url, err := storage.Save(ctx, reader, tt.objID)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, tt.wantURL, url)
|
||||||
|
|
||||||
|
// Verify file was actually saved
|
||||||
|
filePath := filepath.Join(tempDir, tt.objID)
|
||||||
|
content, err := os.ReadFile(filePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, tt.content, string(content))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Save_ContextCancellation(t *testing.T) {
|
||||||
|
storage, _, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a context that's already cancelled
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
reader := strings.NewReader("test content")
|
||||||
|
url, err := storage.Save(ctx, reader, "test.txt")
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, context.Canceled, err)
|
||||||
|
assert.Empty(t, url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Save_ContextTimeout(t *testing.T) {
|
||||||
|
storage, _, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a context with a very short timeout
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Wait a bit to ensure timeout
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
|
||||||
|
reader := strings.NewReader("test content")
|
||||||
|
url, err := storage.Save(ctx, reader, "test.txt")
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, context.DeadlineExceeded, err)
|
||||||
|
assert.Empty(t, url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Delete(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a test file
|
||||||
|
testFile := filepath.Join(tempDir, "test.txt")
|
||||||
|
err := os.WriteFile(testFile, []byte("test content"), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
objID string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "delete existing file",
|
||||||
|
objID: "test.txt",
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "delete non-existent file",
|
||||||
|
objID: "nonexistent.txt",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
err := storage.Delete(ctx, tt.objID)
|
||||||
|
|
||||||
|
if tt.wantErr {
|
||||||
|
assert.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify file was actually deleted
|
||||||
|
filePath := filepath.Join(tempDir, tt.objID)
|
||||||
|
_, err = os.Stat(filePath)
|
||||||
|
assert.True(t, os.IsNotExist(err))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Delete_ContextCancellation(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a test file
|
||||||
|
testFile := filepath.Join(tempDir, "test.txt")
|
||||||
|
err := os.WriteFile(testFile, []byte("test content"), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a context that's already cancelled
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
err = storage.Delete(ctx, "test.txt")
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, context.Canceled, err)
|
||||||
|
|
||||||
|
// File should still exist since operation was cancelled
|
||||||
|
_, err = os.Stat(testFile)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Get(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a test file
|
||||||
|
testContent := "test file content"
|
||||||
|
testFile := filepath.Join(tempDir, "test.txt")
|
||||||
|
err := os.WriteFile(testFile, []byte(testContent), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
objID string
|
||||||
|
wantStatusCode int
|
||||||
|
wantContent string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "get existing file",
|
||||||
|
objID: "test.txt",
|
||||||
|
wantStatusCode: http.StatusOK,
|
||||||
|
wantContent: testContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "get non-existent file",
|
||||||
|
objID: "nonexistent.txt",
|
||||||
|
wantStatusCode: http.StatusInternalServerError,
|
||||||
|
wantContent: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
handler := storage.Get(ctx, tt.objID)
|
||||||
|
|
||||||
|
// Create test request
|
||||||
|
req := httptest.NewRequest("GET", "/files/"+tt.objID, nil)
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
handler.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
assert.Equal(t, tt.wantStatusCode, w.Code)
|
||||||
|
if tt.wantContent != "" {
|
||||||
|
assert.Equal(t, tt.wantContent, w.Body.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Get_ContextCancellation(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a test file
|
||||||
|
testFile := filepath.Join(tempDir, "test.txt")
|
||||||
|
err := os.WriteFile(testFile, []byte("test content"), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a context that's already cancelled
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
handler := storage.Get(ctx, "test.txt")
|
||||||
|
|
||||||
|
// Create test request
|
||||||
|
req := httptest.NewRequest("GET", "/files/test.txt", nil)
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
handler.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusInternalServerError, w.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_Get_RequestContextCancellation(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a test file
|
||||||
|
testFile := filepath.Join(tempDir, "test.txt")
|
||||||
|
err := os.WriteFile(testFile, []byte("test content"), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
handler := storage.Get(ctx, "test.txt")
|
||||||
|
|
||||||
|
// Create test request with cancelled context
|
||||||
|
req := httptest.NewRequest("GET", "/files/test.txt", nil)
|
||||||
|
reqCtx, cancel := context.WithCancel(req.Context())
|
||||||
|
req = req.WithContext(reqCtx)
|
||||||
|
cancel() // Cancel the request context
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
handler.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusRequestTimeout, w.Code)
|
||||||
|
assert.Contains(t, w.Body.String(), "Request cancelled")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateLocalFileStorage(t *testing.T) {
|
||||||
|
// Create temporary directory for testing
|
||||||
|
tempDir, err := os.MkdirTemp("", "storage_test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
logger := zap.NewNop()
|
||||||
|
cfg := config.LocalFSSConfig{
|
||||||
|
RootPath: tempDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage, err := CreateLocalFileStorage(logger, mservice.Storage, "test", "sub", &mockDomainProvider{}, cfg)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, storage)
|
||||||
|
assert.Equal(t, filepath.Join(tempDir, "test"), storage.storageDir)
|
||||||
|
assert.Equal(t, "test", storage.directory)
|
||||||
|
assert.Equal(t, "sub", storage.subDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateLocalFileStorage_InvalidPath(t *testing.T) {
|
||||||
|
logger := zap.NewNop()
|
||||||
|
cfg := config.LocalFSSConfig{
|
||||||
|
RootPath: "/invalid/path/that/does/not/exist/and/should/fail",
|
||||||
|
}
|
||||||
|
|
||||||
|
storage, err := CreateLocalFileStorage(logger, mservice.Storage, "test", "sub", &mockDomainProvider{}, cfg)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, storage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_ConcurrentOperations(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Test concurrent saves
|
||||||
|
t.Run("concurrent saves", func(t *testing.T) {
|
||||||
|
const numGoroutines = 10
|
||||||
|
errCh := make(chan error, numGoroutines)
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
go func(id int) {
|
||||||
|
ctx := context.Background()
|
||||||
|
content := strings.NewReader(fmt.Sprintf("content %d", id))
|
||||||
|
_, err := storage.Save(ctx, content, fmt.Sprintf("file_%d.txt", id))
|
||||||
|
errCh <- err
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect results
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
err := <-errCh
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all files were created
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
filePath := filepath.Join(tempDir, fmt.Sprintf("file_%d.txt", i))
|
||||||
|
_, err := os.Stat(filePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test concurrent deletes
|
||||||
|
t.Run("concurrent deletes", func(t *testing.T) {
|
||||||
|
// Create files to delete
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
filePath := filepath.Join(tempDir, fmt.Sprintf("delete_%d.txt", i))
|
||||||
|
err := os.WriteFile(filePath, []byte("content"), 0o644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
const numGoroutines = 5
|
||||||
|
errCh := make(chan error, numGoroutines)
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
go func(id int) {
|
||||||
|
ctx := context.Background()
|
||||||
|
err := storage.Delete(ctx, fmt.Sprintf("delete_%d.txt", id))
|
||||||
|
errCh <- err
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect results
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
err := <-errCh
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all files were deleted
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
filePath := filepath.Join(tempDir, fmt.Sprintf("delete_%d.txt", i))
|
||||||
|
_, err := os.Stat(filePath)
|
||||||
|
assert.True(t, os.IsNotExist(err))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_LargeFile(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Create a large content (1MB)
|
||||||
|
largeContent := strings.Repeat("a", 1024*1024)
|
||||||
|
reader := strings.NewReader(largeContent)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
url, err := storage.Save(ctx, reader, "large.txt")
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "/api/v1/files/large.txt", url)
|
||||||
|
|
||||||
|
// Verify file size
|
||||||
|
filePath := filepath.Join(tempDir, "large.txt")
|
||||||
|
info, err := os.Stat(filePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(1024*1024), info.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalStorage_SpecialCharacters(t *testing.T) {
|
||||||
|
storage, tempDir, cleanup := setupTestStorage(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Test with special characters in filename
|
||||||
|
specialNames := []string{
|
||||||
|
"file with spaces.txt",
|
||||||
|
"file-with-dashes.txt",
|
||||||
|
"file_with_underscores.txt",
|
||||||
|
"file.with.dots.txt",
|
||||||
|
"file(1).txt",
|
||||||
|
"file[1].txt",
|
||||||
|
"file{1}.txt",
|
||||||
|
"file@#$%.txt",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range specialNames {
|
||||||
|
t.Run("special characters: "+name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
content := strings.NewReader("test content")
|
||||||
|
url, err := storage.Save(ctx, content, name)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "/api/v1/files/"+name, url)
|
||||||
|
|
||||||
|
// Verify file exists
|
||||||
|
filePath := filepath.Join(tempDir, name)
|
||||||
|
_, err = os.Stat(filePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark tests
|
||||||
|
func BenchmarkLocalStorage_Save(b *testing.B) {
|
||||||
|
storage, _, cleanup := setupBenchmarkStorage(b)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
content := strings.Repeat("test content ", 1000) // ~13KB
|
||||||
|
reader := strings.NewReader(content)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, err := storage.Save(ctx, reader, fmt.Sprintf("bench_%d.txt", i))
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
reader.Reset(content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLocalStorage_Delete(b *testing.B) {
|
||||||
|
storage, tempDir, cleanup := setupBenchmarkStorage(b)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Pre-create files for deletion
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
filePath := filepath.Join(tempDir, fmt.Sprintf("bench_delete_%d.txt", i))
|
||||||
|
err := os.WriteFile(filePath, []byte("content"), 0o644)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ctx := context.Background()
|
||||||
|
err := storage.Delete(ctx, fmt.Sprintf("bench_delete_%d.txt", i))
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
13
api/server/internal/server/fileserviceimp/storage/storage.go
Normal file
13
api/server/internal/server/fileserviceimp/storage/storage.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FileManager interface {
|
||||||
|
Save(ctx context.Context, file io.Reader, objID string) (string, error)
|
||||||
|
Get(ctx context.Context, objID string) http.HandlerFunc
|
||||||
|
Delete(ctx context.Context, objID string) error
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user