callbacks service draft
This commit is contained in:
28
api/edge/callbacks/internal/appversion/version.go
Normal file
28
api/edge/callbacks/internal/appversion/version.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package appversion
|
||||
|
||||
import (
|
||||
"github.com/tech/sendico/pkg/version"
|
||||
vf "github.com/tech/sendico/pkg/version/factory"
|
||||
)
|
||||
|
||||
// Build information. Populated at build-time.
|
||||
var (
|
||||
Version string
|
||||
Revision string
|
||||
Branch string
|
||||
BuildUser string
|
||||
BuildDate string
|
||||
)
|
||||
|
||||
func Create() version.Printer {
|
||||
info := version.Info{
|
||||
Program: "Sendico Edge Callbacks Service",
|
||||
Revision: Revision,
|
||||
Branch: Branch,
|
||||
BuildUser: BuildUser,
|
||||
BuildDate: BuildDate,
|
||||
Version: Version,
|
||||
}
|
||||
|
||||
return vf.Create(&info)
|
||||
}
|
||||
182
api/edge/callbacks/internal/config/module.go
Normal file
182
api/edge/callbacks/internal/config/module.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/db"
|
||||
"github.com/tech/sendico/pkg/messaging"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultShutdownTimeoutSeconds = 15
|
||||
defaultMetricsAddress = ":9420"
|
||||
defaultIngestStream = "CALLBACKS"
|
||||
defaultIngestSubject = "callbacks.events"
|
||||
defaultIngestDurable = "callbacks-ingest"
|
||||
defaultIngestBatchSize = 32
|
||||
defaultIngestFetchTimeoutMS = 2000
|
||||
defaultIngestIdleSleepMS = 500
|
||||
defaultTaskCollection = "callback_tasks"
|
||||
defaultInboxCollection = "callback_inbox"
|
||||
defaultEndpointsCollection = "webhook_endpoints"
|
||||
defaultWorkerConcurrency = 8
|
||||
defaultWorkerPollIntervalMS = 200
|
||||
defaultLockTTLSeconds = 30
|
||||
defaultRequestTimeoutMS = 10000
|
||||
defaultMaxAttempts = 8
|
||||
defaultMinDelayMS = 1000
|
||||
defaultMaxDelayMS = 300000
|
||||
defaultJitterRatio = 0.20
|
||||
defaultDNSResolveTimeoutMS = 2000
|
||||
defaultSecretsVaultField = "value"
|
||||
)
|
||||
|
||||
// Loader parses callbacks service configuration.
|
||||
type Loader interface {
|
||||
Load(path string) (*Config, error)
|
||||
}
|
||||
|
||||
// Config is the full callbacks service configuration.
|
||||
type Config struct {
|
||||
Runtime *RuntimeConfig `yaml:"runtime"`
|
||||
Metrics *MetricsConfig `yaml:"metrics"`
|
||||
Database *db.Config `yaml:"database"`
|
||||
Messaging *messaging.Config `yaml:"messaging"`
|
||||
Ingest IngestConfig `yaml:"ingest"`
|
||||
Delivery DeliveryConfig `yaml:"delivery"`
|
||||
Security SecurityConfig `yaml:"security"`
|
||||
Secrets SecretsConfig `yaml:"secrets"`
|
||||
}
|
||||
|
||||
// RuntimeConfig contains process lifecycle settings.
|
||||
type RuntimeConfig struct {
|
||||
ShutdownTimeoutSeconds int `yaml:"shutdown_timeout_seconds"`
|
||||
}
|
||||
|
||||
func (c *RuntimeConfig) ShutdownTimeout() time.Duration {
|
||||
if c == nil || c.ShutdownTimeoutSeconds <= 0 {
|
||||
return defaultShutdownTimeoutSeconds * time.Second
|
||||
}
|
||||
return time.Duration(c.ShutdownTimeoutSeconds) * time.Second
|
||||
}
|
||||
|
||||
// MetricsConfig configures observability endpoints.
|
||||
type MetricsConfig struct {
|
||||
Address string `yaml:"address"`
|
||||
}
|
||||
|
||||
func (c *MetricsConfig) ListenAddress() string {
|
||||
if c == nil || c.Address == "" {
|
||||
return defaultMetricsAddress
|
||||
}
|
||||
return c.Address
|
||||
}
|
||||
|
||||
// IngestConfig configures JetStream ingestion.
|
||||
type IngestConfig struct {
|
||||
Stream string `yaml:"stream"`
|
||||
Subject string `yaml:"subject"`
|
||||
Durable string `yaml:"durable"`
|
||||
BatchSize int `yaml:"batch_size"`
|
||||
FetchTimeoutMS int `yaml:"fetch_timeout_ms"`
|
||||
IdleSleepMS int `yaml:"idle_sleep_ms"`
|
||||
}
|
||||
|
||||
func (c *IngestConfig) FetchTimeout() time.Duration {
|
||||
if c.FetchTimeoutMS <= 0 {
|
||||
return time.Duration(defaultIngestFetchTimeoutMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.FetchTimeoutMS) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *IngestConfig) IdleSleep() time.Duration {
|
||||
if c.IdleSleepMS <= 0 {
|
||||
return time.Duration(defaultIngestIdleSleepMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.IdleSleepMS) * time.Millisecond
|
||||
}
|
||||
|
||||
// DeliveryConfig controls dispatcher behavior.
|
||||
type DeliveryConfig struct {
|
||||
WorkerConcurrency int `yaml:"worker_concurrency"`
|
||||
WorkerPollMS int `yaml:"worker_poll_ms"`
|
||||
LockTTLSeconds int `yaml:"lock_ttl_seconds"`
|
||||
RequestTimeoutMS int `yaml:"request_timeout_ms"`
|
||||
MaxAttempts int `yaml:"max_attempts"`
|
||||
MinDelayMS int `yaml:"min_delay_ms"`
|
||||
MaxDelayMS int `yaml:"max_delay_ms"`
|
||||
JitterRatio float64 `yaml:"jitter_ratio"`
|
||||
}
|
||||
|
||||
func (c *DeliveryConfig) WorkerPollInterval() time.Duration {
|
||||
if c.WorkerPollMS <= 0 {
|
||||
return time.Duration(defaultWorkerPollIntervalMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.WorkerPollMS) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *DeliveryConfig) LockTTL() time.Duration {
|
||||
if c.LockTTLSeconds <= 0 {
|
||||
return time.Duration(defaultLockTTLSeconds) * time.Second
|
||||
}
|
||||
return time.Duration(c.LockTTLSeconds) * time.Second
|
||||
}
|
||||
|
||||
func (c *DeliveryConfig) RequestTimeout() time.Duration {
|
||||
if c.RequestTimeoutMS <= 0 {
|
||||
return time.Duration(defaultRequestTimeoutMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.RequestTimeoutMS) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *DeliveryConfig) MinDelay() time.Duration {
|
||||
if c.MinDelayMS <= 0 {
|
||||
return time.Duration(defaultMinDelayMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.MinDelayMS) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *DeliveryConfig) MaxDelay() time.Duration {
|
||||
if c.MaxDelayMS <= 0 {
|
||||
return time.Duration(defaultMaxDelayMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.MaxDelayMS) * time.Millisecond
|
||||
}
|
||||
|
||||
// SecurityConfig controls outbound callback safety checks.
|
||||
type SecurityConfig struct {
|
||||
RequireHTTPS bool `yaml:"require_https"`
|
||||
AllowedHosts []string `yaml:"allowed_hosts"`
|
||||
AllowedPorts []int `yaml:"allowed_ports"`
|
||||
DNSResolveTimeout int `yaml:"dns_resolve_timeout_ms"`
|
||||
}
|
||||
|
||||
func (c *SecurityConfig) DNSResolveTimeoutMS() time.Duration {
|
||||
if c.DNSResolveTimeout <= 0 {
|
||||
return time.Duration(defaultDNSResolveTimeoutMS) * time.Millisecond
|
||||
}
|
||||
return time.Duration(c.DNSResolveTimeout) * time.Millisecond
|
||||
}
|
||||
|
||||
// SecretsConfig controls secret lookup behavior.
|
||||
type SecretsConfig struct {
|
||||
CacheTTLSeconds int `yaml:"cache_ttl_seconds"`
|
||||
Static map[string]string `yaml:"static"`
|
||||
Vault VaultSecretsConfig `yaml:"vault"`
|
||||
}
|
||||
|
||||
// VaultSecretsConfig controls Vault KV secret resolution.
|
||||
type VaultSecretsConfig struct {
|
||||
Address string `yaml:"address"`
|
||||
TokenEnv string `yaml:"token_env"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
MountPath string `yaml:"mount_path"`
|
||||
DefaultField string `yaml:"default_field"`
|
||||
}
|
||||
|
||||
func (c *SecretsConfig) CacheTTL() time.Duration {
|
||||
if c == nil || c.CacheTTLSeconds <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(c.CacheTTLSeconds) * time.Second
|
||||
}
|
||||
162
api/edge/callbacks/internal/config/service.go
Normal file
162
api/edge/callbacks/internal/config/service.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
logger mlogger.Logger
|
||||
}
|
||||
|
||||
// New creates a configuration loader.
|
||||
func New(logger mlogger.Logger) Loader {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &service{logger: logger.Named("config")}
|
||||
}
|
||||
|
||||
func (s *service) Load(path string) (*Config, error) {
|
||||
if strings.TrimSpace(path) == "" {
|
||||
return nil, merrors.InvalidArgument("config path is required", "path")
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to read config file", zap.String("path", path), zap.Error(err))
|
||||
return nil, merrors.InternalWrap(err, "failed to read callbacks config")
|
||||
}
|
||||
|
||||
cfg := &Config{}
|
||||
if err := yaml.Unmarshal(data, cfg); err != nil {
|
||||
s.logger.Error("Failed to parse config yaml", zap.String("path", path), zap.Error(err))
|
||||
return nil, merrors.InternalWrap(err, "failed to parse callbacks config")
|
||||
}
|
||||
|
||||
s.applyDefaults(cfg)
|
||||
if err := s.validate(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (s *service) applyDefaults(cfg *Config) {
|
||||
if cfg.Runtime == nil {
|
||||
cfg.Runtime = &RuntimeConfig{ShutdownTimeoutSeconds: defaultShutdownTimeoutSeconds}
|
||||
}
|
||||
|
||||
if cfg.Metrics == nil {
|
||||
cfg.Metrics = &MetricsConfig{Address: defaultMetricsAddress}
|
||||
} else if strings.TrimSpace(cfg.Metrics.Address) == "" {
|
||||
cfg.Metrics.Address = defaultMetricsAddress
|
||||
}
|
||||
|
||||
if strings.TrimSpace(cfg.Ingest.Stream) == "" {
|
||||
cfg.Ingest.Stream = defaultIngestStream
|
||||
}
|
||||
if strings.TrimSpace(cfg.Ingest.Subject) == "" {
|
||||
cfg.Ingest.Subject = defaultIngestSubject
|
||||
}
|
||||
if strings.TrimSpace(cfg.Ingest.Durable) == "" {
|
||||
cfg.Ingest.Durable = defaultIngestDurable
|
||||
}
|
||||
if cfg.Ingest.BatchSize <= 0 {
|
||||
cfg.Ingest.BatchSize = defaultIngestBatchSize
|
||||
}
|
||||
if cfg.Ingest.FetchTimeoutMS <= 0 {
|
||||
cfg.Ingest.FetchTimeoutMS = defaultIngestFetchTimeoutMS
|
||||
}
|
||||
if cfg.Ingest.IdleSleepMS <= 0 {
|
||||
cfg.Ingest.IdleSleepMS = defaultIngestIdleSleepMS
|
||||
}
|
||||
|
||||
if cfg.Delivery.WorkerConcurrency <= 0 {
|
||||
cfg.Delivery.WorkerConcurrency = defaultWorkerConcurrency
|
||||
}
|
||||
if cfg.Delivery.WorkerPollMS <= 0 {
|
||||
cfg.Delivery.WorkerPollMS = defaultWorkerPollIntervalMS
|
||||
}
|
||||
if cfg.Delivery.LockTTLSeconds <= 0 {
|
||||
cfg.Delivery.LockTTLSeconds = defaultLockTTLSeconds
|
||||
}
|
||||
if cfg.Delivery.RequestTimeoutMS <= 0 {
|
||||
cfg.Delivery.RequestTimeoutMS = defaultRequestTimeoutMS
|
||||
}
|
||||
if cfg.Delivery.MaxAttempts <= 0 {
|
||||
cfg.Delivery.MaxAttempts = defaultMaxAttempts
|
||||
}
|
||||
if cfg.Delivery.MinDelayMS <= 0 {
|
||||
cfg.Delivery.MinDelayMS = defaultMinDelayMS
|
||||
}
|
||||
if cfg.Delivery.MaxDelayMS <= 0 {
|
||||
cfg.Delivery.MaxDelayMS = defaultMaxDelayMS
|
||||
}
|
||||
if cfg.Delivery.JitterRatio <= 0 {
|
||||
cfg.Delivery.JitterRatio = defaultJitterRatio
|
||||
}
|
||||
if cfg.Delivery.JitterRatio > 1 {
|
||||
cfg.Delivery.JitterRatio = 1
|
||||
}
|
||||
|
||||
if cfg.Security.DNSResolveTimeout <= 0 {
|
||||
cfg.Security.DNSResolveTimeout = defaultDNSResolveTimeoutMS
|
||||
}
|
||||
if len(cfg.Security.AllowedPorts) == 0 {
|
||||
cfg.Security.AllowedPorts = []int{443}
|
||||
}
|
||||
if !cfg.Security.RequireHTTPS {
|
||||
cfg.Security.RequireHTTPS = true
|
||||
}
|
||||
|
||||
if cfg.Secrets.Static == nil {
|
||||
cfg.Secrets.Static = map[string]string{}
|
||||
}
|
||||
if strings.TrimSpace(cfg.Secrets.Vault.DefaultField) == "" {
|
||||
cfg.Secrets.Vault.DefaultField = defaultSecretsVaultField
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) validate(cfg *Config) error {
|
||||
if cfg.Database == nil {
|
||||
return merrors.InvalidArgument("database configuration is required", "database")
|
||||
}
|
||||
if cfg.Messaging == nil {
|
||||
return merrors.InvalidArgument("messaging configuration is required", "messaging")
|
||||
}
|
||||
if strings.TrimSpace(string(cfg.Messaging.Driver)) == "" {
|
||||
return merrors.InvalidArgument("messaging.driver is required", "messaging.driver")
|
||||
}
|
||||
if cfg.Delivery.MinDelay() > cfg.Delivery.MaxDelay() {
|
||||
return merrors.InvalidArgument("delivery min delay must be <= max delay", "delivery.min_delay_ms", "delivery.max_delay_ms")
|
||||
}
|
||||
if cfg.Delivery.MaxAttempts < 1 {
|
||||
return merrors.InvalidArgument("delivery.max_attempts must be > 0", "delivery.max_attempts")
|
||||
}
|
||||
if cfg.Ingest.BatchSize < 1 {
|
||||
return merrors.InvalidArgument("ingest.batch_size must be > 0", "ingest.batch_size")
|
||||
}
|
||||
vaultAddress := strings.TrimSpace(cfg.Secrets.Vault.Address)
|
||||
vaultTokenEnv := strings.TrimSpace(cfg.Secrets.Vault.TokenEnv)
|
||||
vaultMountPath := strings.TrimSpace(cfg.Secrets.Vault.MountPath)
|
||||
hasVault := vaultAddress != "" || vaultTokenEnv != "" || vaultMountPath != ""
|
||||
if hasVault {
|
||||
if vaultAddress == "" {
|
||||
return merrors.InvalidArgument("secrets.vault.address is required when vault settings are configured", "secrets.vault.address")
|
||||
}
|
||||
if vaultTokenEnv == "" {
|
||||
return merrors.InvalidArgument("secrets.vault.token_env is required when vault settings are configured", "secrets.vault.token_env")
|
||||
}
|
||||
if vaultMountPath == "" {
|
||||
return merrors.InvalidArgument("secrets.vault.mount_path is required when vault settings are configured", "secrets.vault.mount_path")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
27
api/edge/callbacks/internal/delivery/classifier.go
Normal file
27
api/edge/callbacks/internal/delivery/classifier.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package delivery
|
||||
|
||||
import "net/http"
|
||||
|
||||
type outcome string
|
||||
|
||||
const (
|
||||
outcomeDelivered outcome = "delivered"
|
||||
outcomeRetry outcome = "retry"
|
||||
outcomeFailed outcome = "failed"
|
||||
)
|
||||
|
||||
func classify(statusCode int, reqErr error) outcome {
|
||||
if reqErr != nil {
|
||||
return outcomeRetry
|
||||
}
|
||||
if statusCode >= http.StatusOK && statusCode < http.StatusMultipleChoices {
|
||||
return outcomeDelivered
|
||||
}
|
||||
if statusCode == http.StatusTooManyRequests || statusCode == http.StatusRequestTimeout {
|
||||
return outcomeRetry
|
||||
}
|
||||
if statusCode >= http.StatusInternalServerError {
|
||||
return outcomeRetry
|
||||
}
|
||||
return outcomeFailed
|
||||
}
|
||||
48
api/edge/callbacks/internal/delivery/module.go
Normal file
48
api/edge/callbacks/internal/delivery/module.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package delivery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/retry"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/security"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/signing"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
// Observer captures delivery metrics.
|
||||
type Observer interface {
|
||||
ObserveDelivery(result string, statusCode int, duration time.Duration)
|
||||
}
|
||||
|
||||
// Config controls delivery worker runtime.
|
||||
type Config struct {
|
||||
WorkerConcurrency int
|
||||
WorkerPoll time.Duration
|
||||
LockTTL time.Duration
|
||||
RequestTimeout time.Duration
|
||||
JitterRatio float64
|
||||
}
|
||||
|
||||
// Dependencies configure delivery dispatcher.
|
||||
type Dependencies struct {
|
||||
Logger mlogger.Logger
|
||||
Config Config
|
||||
Tasks storage.TaskRepo
|
||||
Retry retry.Policy
|
||||
Security security.Validator
|
||||
Signer signing.Signer
|
||||
Observer Observer
|
||||
}
|
||||
|
||||
// Service executes callback delivery tasks.
|
||||
type Service interface {
|
||||
Start(ctx context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
// New creates delivery service.
|
||||
func New(deps Dependencies) (Service, error) {
|
||||
return newService(deps)
|
||||
}
|
||||
263
api/edge/callbacks/internal/delivery/service.go
Normal file
263
api/edge/callbacks/internal/delivery/service.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package delivery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/signing"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const responseDrainLimit = 64 * 1024
|
||||
|
||||
type service struct {
|
||||
logger mlogger.Logger
|
||||
cfg Config
|
||||
tasks storage.TaskRepo
|
||||
retry interface {
|
||||
NextAttempt(attempt int, now time.Time, minDelay, maxDelay time.Duration, jitterRatio float64) time.Time
|
||||
}
|
||||
security interface {
|
||||
ValidateURL(ctx context.Context, target string) error
|
||||
}
|
||||
signer signing.Signer
|
||||
obs Observer
|
||||
client *http.Client
|
||||
|
||||
cancel context.CancelFunc
|
||||
once sync.Once
|
||||
stop sync.Once
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func newService(deps Dependencies) (Service, error) {
|
||||
if deps.Tasks == nil {
|
||||
return nil, merrors.InvalidArgument("delivery: task repo is required", "tasks")
|
||||
}
|
||||
if deps.Retry == nil {
|
||||
return nil, merrors.InvalidArgument("delivery: retry policy is required", "retry")
|
||||
}
|
||||
if deps.Security == nil {
|
||||
return nil, merrors.InvalidArgument("delivery: security validator is required", "security")
|
||||
}
|
||||
if deps.Signer == nil {
|
||||
return nil, merrors.InvalidArgument("delivery: signer is required", "signer")
|
||||
}
|
||||
|
||||
logger := deps.Logger
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
cfg := deps.Config
|
||||
if cfg.WorkerConcurrency <= 0 {
|
||||
cfg.WorkerConcurrency = 1
|
||||
}
|
||||
if cfg.WorkerPoll <= 0 {
|
||||
cfg.WorkerPoll = 200 * time.Millisecond
|
||||
}
|
||||
if cfg.LockTTL <= 0 {
|
||||
cfg.LockTTL = 30 * time.Second
|
||||
}
|
||||
if cfg.RequestTimeout <= 0 {
|
||||
cfg.RequestTimeout = 10 * time.Second
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
MaxIdleConns: 200,
|
||||
MaxIdleConnsPerHost: 32,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: time.Second,
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
CheckRedirect: func(*http.Request, []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
}
|
||||
|
||||
return &service{
|
||||
logger: logger.Named("delivery"),
|
||||
cfg: cfg,
|
||||
tasks: deps.Tasks,
|
||||
retry: deps.Retry,
|
||||
security: deps.Security,
|
||||
signer: deps.Signer,
|
||||
obs: deps.Observer,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *service) Start(ctx context.Context) {
|
||||
s.once.Do(func() {
|
||||
runCtx := ctx
|
||||
if runCtx == nil {
|
||||
runCtx = context.Background()
|
||||
}
|
||||
runCtx, s.cancel = context.WithCancel(runCtx)
|
||||
|
||||
for i := 0; i < s.cfg.WorkerConcurrency; i++ {
|
||||
workerID := "worker-" + strconv.Itoa(i+1)
|
||||
s.wg.Add(1)
|
||||
go func(id string) {
|
||||
defer s.wg.Done()
|
||||
s.runWorker(runCtx, id)
|
||||
}(workerID)
|
||||
}
|
||||
s.logger.Info("Delivery workers started", zap.Int("workers", s.cfg.WorkerConcurrency))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) Stop() {
|
||||
s.stop.Do(func() {
|
||||
if s.cancel != nil {
|
||||
s.cancel()
|
||||
}
|
||||
s.wg.Wait()
|
||||
s.logger.Info("Delivery workers stopped")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) runWorker(ctx context.Context, workerID string) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
task, err := s.tasks.LockNextTask(ctx, now, workerID, s.cfg.LockTTL)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to lock next task", zap.String("worker_id", workerID), zap.Error(err))
|
||||
time.Sleep(s.cfg.WorkerPoll)
|
||||
continue
|
||||
}
|
||||
if task == nil {
|
||||
time.Sleep(s.cfg.WorkerPoll)
|
||||
continue
|
||||
}
|
||||
|
||||
s.handleTask(ctx, workerID, task)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) handleTask(ctx context.Context, workerID string, task *storage.Task) {
|
||||
started := time.Now()
|
||||
statusCode := 0
|
||||
result := "failed"
|
||||
attempt := task.Attempt + 1
|
||||
|
||||
defer func() {
|
||||
if s.obs != nil {
|
||||
s.obs.ObserveDelivery(result, statusCode, time.Since(started))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := s.security.ValidateURL(ctx, task.EndpointURL); err != nil {
|
||||
result = "blocked"
|
||||
_ = s.tasks.MarkFailed(ctx, task.ID, attempt, err.Error(), statusCode, time.Now().UTC())
|
||||
return
|
||||
}
|
||||
|
||||
timeout := task.RequestTimeout
|
||||
if timeout <= 0 {
|
||||
timeout = s.cfg.RequestTimeout
|
||||
}
|
||||
|
||||
signed, err := s.signer.Sign(ctx, task.SigningMode, task.SecretRef, task.Payload, time.Now().UTC())
|
||||
if err != nil {
|
||||
result = "sign_error"
|
||||
_ = s.tasks.MarkFailed(ctx, task.ID, attempt, err.Error(), statusCode, time.Now().UTC())
|
||||
return
|
||||
}
|
||||
|
||||
reqCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(reqCtx, http.MethodPost, task.EndpointURL, bytes.NewReader(signed.Body))
|
||||
if err != nil {
|
||||
result = "request_error"
|
||||
_ = s.tasks.MarkFailed(ctx, task.ID, attempt, err.Error(), statusCode, time.Now().UTC())
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
for key, val := range task.Headers {
|
||||
req.Header.Set(key, val)
|
||||
}
|
||||
for key, val := range signed.Headers {
|
||||
req.Header.Set(key, val)
|
||||
}
|
||||
|
||||
resp, reqErr := s.client.Do(req)
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
_, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, responseDrainLimit))
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
|
||||
out := classify(statusCode, reqErr)
|
||||
now := time.Now().UTC()
|
||||
switch out {
|
||||
case outcomeDelivered:
|
||||
result = string(outcomeDelivered)
|
||||
if err := s.tasks.MarkDelivered(ctx, task.ID, statusCode, time.Since(started), now); err != nil {
|
||||
s.logger.Warn("Failed to mark task delivered", zap.String("worker_id", workerID), zap.String("task_id", task.ID.Hex()), zap.Error(err))
|
||||
}
|
||||
case outcomeRetry:
|
||||
if attempt < task.MaxAttempts {
|
||||
next := s.retry.NextAttempt(attempt, now, task.MinDelay, task.MaxDelay, s.cfg.JitterRatio)
|
||||
result = string(outcomeRetry)
|
||||
lastErr := stringifyErr(reqErr)
|
||||
if reqErr == nil && statusCode > 0 {
|
||||
lastErr = "upstream returned retryable status"
|
||||
}
|
||||
if err := s.tasks.MarkRetry(ctx, task.ID, attempt, next, lastErr, statusCode, now); err != nil {
|
||||
s.logger.Warn("Failed to mark task retry", zap.String("worker_id", workerID), zap.String("task_id", task.ID.Hex()), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
result = string(outcomeFailed)
|
||||
lastErr := stringifyErr(reqErr)
|
||||
if reqErr == nil && statusCode > 0 {
|
||||
lastErr = "upstream returned retryable status but max attempts reached"
|
||||
}
|
||||
if err := s.tasks.MarkFailed(ctx, task.ID, attempt, lastErr, statusCode, now); err != nil {
|
||||
s.logger.Warn("Failed to mark task failed", zap.String("worker_id", workerID), zap.String("task_id", task.ID.Hex()), zap.Error(err))
|
||||
}
|
||||
}
|
||||
default:
|
||||
result = string(outcomeFailed)
|
||||
lastErr := stringifyErr(reqErr)
|
||||
if reqErr == nil && statusCode > 0 {
|
||||
lastErr = "upstream returned non-retryable status"
|
||||
}
|
||||
if err := s.tasks.MarkFailed(ctx, task.ID, attempt, lastErr, statusCode, now); err != nil {
|
||||
s.logger.Warn("Failed to mark task failed", zap.String("worker_id", workerID), zap.String("task_id", task.ID.Hex()), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stringifyErr(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return "request canceled"
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return "request timeout"
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
33
api/edge/callbacks/internal/events/module.go
Normal file
33
api/edge/callbacks/internal/events/module.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Envelope is the canonical incoming event envelope.
|
||||
type Envelope struct {
|
||||
EventID string `json:"event_id"`
|
||||
Type string `json:"type"`
|
||||
ClientID string `json:"client_id"`
|
||||
OccurredAt time.Time `json:"occurred_at"`
|
||||
PublishedAt time.Time `json:"published_at,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// Service parses incoming messages and builds outbound payload bytes.
|
||||
type Service interface {
|
||||
Parse(data []byte) (*Envelope, error)
|
||||
BuildPayload(ctx context.Context, envelope *Envelope) ([]byte, error)
|
||||
}
|
||||
|
||||
// Payload is the stable outbound JSON body.
|
||||
type Payload struct {
|
||||
EventID string `json:"event_id"`
|
||||
Type string `json:"type"`
|
||||
ClientID string `json:"client_id"`
|
||||
OccurredAt string `json:"occurred_at"`
|
||||
PublishedAt string `json:"published_at,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
86
api/edge/callbacks/internal/events/service.go
Normal file
86
api/edge/callbacks/internal/events/service.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type parserService struct {
|
||||
logger mlogger.Logger
|
||||
}
|
||||
|
||||
// New creates event parser/payload builder service.
|
||||
func New(logger mlogger.Logger) Service {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
return &parserService{logger: logger.Named("events")}
|
||||
}
|
||||
|
||||
func (s *parserService) Parse(data []byte) (*Envelope, error) {
|
||||
if len(data) == 0 {
|
||||
return nil, merrors.InvalidArgument("event payload is empty", "data")
|
||||
}
|
||||
|
||||
var envelope Envelope
|
||||
if err := json.Unmarshal(data, &envelope); err != nil {
|
||||
return nil, merrors.InvalidArgumentWrap(err, "event payload is not valid JSON", "data")
|
||||
}
|
||||
|
||||
if strings.TrimSpace(envelope.EventID) == "" {
|
||||
return nil, merrors.InvalidArgument("event_id is required", "event_id")
|
||||
}
|
||||
if strings.TrimSpace(envelope.Type) == "" {
|
||||
return nil, merrors.InvalidArgument("type is required", "type")
|
||||
}
|
||||
if strings.TrimSpace(envelope.ClientID) == "" {
|
||||
return nil, merrors.InvalidArgument("client_id is required", "client_id")
|
||||
}
|
||||
if envelope.OccurredAt.IsZero() {
|
||||
return nil, merrors.InvalidArgument("occurred_at is required", "occurred_at")
|
||||
}
|
||||
if len(envelope.Data) == 0 {
|
||||
envelope.Data = []byte("{}")
|
||||
}
|
||||
|
||||
envelope.EventID = strings.TrimSpace(envelope.EventID)
|
||||
envelope.Type = strings.TrimSpace(envelope.Type)
|
||||
envelope.ClientID = strings.TrimSpace(envelope.ClientID)
|
||||
envelope.OccurredAt = envelope.OccurredAt.UTC()
|
||||
if !envelope.PublishedAt.IsZero() {
|
||||
envelope.PublishedAt = envelope.PublishedAt.UTC()
|
||||
}
|
||||
|
||||
return &envelope, nil
|
||||
}
|
||||
|
||||
func (s *parserService) BuildPayload(_ context.Context, envelope *Envelope) ([]byte, error) {
|
||||
if envelope == nil {
|
||||
return nil, merrors.InvalidArgument("event envelope is required", "envelope")
|
||||
}
|
||||
|
||||
payload := Payload{
|
||||
EventID: envelope.EventID,
|
||||
Type: envelope.Type,
|
||||
ClientID: envelope.ClientID,
|
||||
OccurredAt: envelope.OccurredAt.UTC().Format(time.RFC3339Nano),
|
||||
Data: envelope.Data,
|
||||
}
|
||||
if !envelope.PublishedAt.IsZero() {
|
||||
payload.PublishedAt = envelope.PublishedAt.UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to marshal callback payload", zap.Error(err), zap.String("event_id", envelope.EventID))
|
||||
return nil, merrors.InternalWrap(err, "failed to marshal callback payload")
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
51
api/edge/callbacks/internal/ingest/module.go
Normal file
51
api/edge/callbacks/internal/ingest/module.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package ingest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/events"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/subscriptions"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
// Observer captures ingest metrics.
|
||||
type Observer interface {
|
||||
ObserveIngest(result string, duration time.Duration)
|
||||
}
|
||||
|
||||
// Config contains JetStream ingest settings.
|
||||
type Config struct {
|
||||
Stream string
|
||||
Subject string
|
||||
Durable string
|
||||
BatchSize int
|
||||
FetchTimeout time.Duration
|
||||
IdleSleep time.Duration
|
||||
}
|
||||
|
||||
// Dependencies configure the ingest service.
|
||||
type Dependencies struct {
|
||||
Logger mlogger.Logger
|
||||
JetStream nats.JetStreamContext
|
||||
Config Config
|
||||
Events events.Service
|
||||
Resolver subscriptions.Resolver
|
||||
InboxRepo storage.InboxRepo
|
||||
TaskRepo storage.TaskRepo
|
||||
TaskDefaults storage.TaskDefaults
|
||||
Observer Observer
|
||||
}
|
||||
|
||||
// Service runs JetStream ingest workers.
|
||||
type Service interface {
|
||||
Start(ctx context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
// New creates ingest service.
|
||||
func New(deps Dependencies) (Service, error) {
|
||||
return newService(deps)
|
||||
}
|
||||
204
api/edge/callbacks/internal/ingest/service.go
Normal file
204
api/edge/callbacks/internal/ingest/service.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package ingest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
logger mlogger.Logger
|
||||
js nats.JetStreamContext
|
||||
cfg Config
|
||||
deps Dependencies
|
||||
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
once sync.Once
|
||||
stop sync.Once
|
||||
}
|
||||
|
||||
func newService(deps Dependencies) (Service, error) {
|
||||
if deps.JetStream == nil {
|
||||
return nil, merrors.InvalidArgument("ingest: jetstream context is required", "jetstream")
|
||||
}
|
||||
if deps.Events == nil {
|
||||
return nil, merrors.InvalidArgument("ingest: events service is required", "events")
|
||||
}
|
||||
if deps.Resolver == nil {
|
||||
return nil, merrors.InvalidArgument("ingest: subscriptions resolver is required", "resolver")
|
||||
}
|
||||
if deps.InboxRepo == nil {
|
||||
return nil, merrors.InvalidArgument("ingest: inbox repo is required", "inboxRepo")
|
||||
}
|
||||
if deps.TaskRepo == nil {
|
||||
return nil, merrors.InvalidArgument("ingest: task repo is required", "taskRepo")
|
||||
}
|
||||
if strings.TrimSpace(deps.Config.Subject) == "" {
|
||||
return nil, merrors.InvalidArgument("ingest: subject is required", "config.subject")
|
||||
}
|
||||
if strings.TrimSpace(deps.Config.Durable) == "" {
|
||||
return nil, merrors.InvalidArgument("ingest: durable is required", "config.durable")
|
||||
}
|
||||
if deps.Config.BatchSize <= 0 {
|
||||
deps.Config.BatchSize = 1
|
||||
}
|
||||
if deps.Config.FetchTimeout <= 0 {
|
||||
deps.Config.FetchTimeout = 2 * time.Second
|
||||
}
|
||||
if deps.Config.IdleSleep <= 0 {
|
||||
deps.Config.IdleSleep = 500 * time.Millisecond
|
||||
}
|
||||
|
||||
logger := deps.Logger
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &service{
|
||||
logger: logger.Named("ingest"),
|
||||
js: deps.JetStream,
|
||||
cfg: deps.Config,
|
||||
deps: deps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *service) Start(ctx context.Context) {
|
||||
s.once.Do(func() {
|
||||
runCtx := ctx
|
||||
if runCtx == nil {
|
||||
runCtx = context.Background()
|
||||
}
|
||||
runCtx, s.cancel = context.WithCancel(runCtx)
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.run(runCtx)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) Stop() {
|
||||
s.stop.Do(func() {
|
||||
if s.cancel != nil {
|
||||
s.cancel()
|
||||
}
|
||||
s.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) run(ctx context.Context) {
|
||||
subOpts := []nats.SubOpt{}
|
||||
if stream := strings.TrimSpace(s.cfg.Stream); stream != "" {
|
||||
subOpts = append(subOpts, nats.BindStream(stream))
|
||||
}
|
||||
|
||||
sub, err := s.js.PullSubscribe(strings.TrimSpace(s.cfg.Subject), strings.TrimSpace(s.cfg.Durable), subOpts...)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to start JetStream subscription", zap.String("subject", s.cfg.Subject), zap.String("durable", s.cfg.Durable), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.Info("Ingest consumer started", zap.String("subject", s.cfg.Subject), zap.String("durable", s.cfg.Durable), zap.Int("batch_size", s.cfg.BatchSize))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.logger.Info("Ingest consumer stopped")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
msgs, err := sub.Fetch(s.cfg.BatchSize, nats.MaxWait(s.cfg.FetchTimeout))
|
||||
if err != nil {
|
||||
if errors.Is(err, nats.ErrTimeout) {
|
||||
time.Sleep(s.cfg.IdleSleep)
|
||||
continue
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
s.logger.Warn("Failed to fetch JetStream messages", zap.Error(err))
|
||||
time.Sleep(s.cfg.IdleSleep)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
s.handleMessage(ctx, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) handleMessage(ctx context.Context, msg *nats.Msg) {
|
||||
start := time.Now()
|
||||
result := "ok"
|
||||
nak := false
|
||||
|
||||
defer func() {
|
||||
if s.deps.Observer != nil {
|
||||
s.deps.Observer.ObserveIngest(result, time.Since(start))
|
||||
}
|
||||
|
||||
var ackErr error
|
||||
if nak {
|
||||
ackErr = msg.Nak()
|
||||
} else {
|
||||
ackErr = msg.Ack()
|
||||
}
|
||||
if ackErr != nil {
|
||||
s.logger.Warn("Failed to ack ingest message", zap.Bool("nak", nak), zap.Error(ackErr))
|
||||
}
|
||||
}()
|
||||
|
||||
envelope, err := s.deps.Events.Parse(msg.Data)
|
||||
if err != nil {
|
||||
result = "invalid_event"
|
||||
nak = false
|
||||
return
|
||||
}
|
||||
|
||||
inserted, err := s.deps.InboxRepo.TryInsert(ctx, envelope.EventID, envelope.ClientID, envelope.Type, time.Now().UTC())
|
||||
if err != nil {
|
||||
result = "inbox_error"
|
||||
nak = true
|
||||
return
|
||||
}
|
||||
if !inserted {
|
||||
result = "duplicate"
|
||||
nak = false
|
||||
return
|
||||
}
|
||||
|
||||
endpoints, err := s.deps.Resolver.Resolve(ctx, envelope.ClientID, envelope.Type)
|
||||
if err != nil {
|
||||
result = "resolve_error"
|
||||
nak = true
|
||||
return
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
result = "no_endpoints"
|
||||
nak = false
|
||||
return
|
||||
}
|
||||
|
||||
payload, err := s.deps.Events.BuildPayload(ctx, envelope)
|
||||
if err != nil {
|
||||
result = "payload_error"
|
||||
nak = true
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.deps.TaskRepo.UpsertTasks(ctx, envelope.EventID, endpoints, payload, s.deps.TaskDefaults, time.Now().UTC()); err != nil {
|
||||
result = "task_error"
|
||||
nak = true
|
||||
return
|
||||
}
|
||||
}
|
||||
36
api/edge/callbacks/internal/ops/module.go
Normal file
36
api/edge/callbacks/internal/ops/module.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/api/routers/health"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
// Observer records service metrics.
|
||||
type Observer interface {
|
||||
ObserveIngest(result string, duration time.Duration)
|
||||
ObserveDelivery(result string, statusCode int, duration time.Duration)
|
||||
}
|
||||
|
||||
// HTTPServer exposes /metrics and /health.
|
||||
type HTTPServer interface {
|
||||
SetStatus(status health.ServiceStatus)
|
||||
Close(ctx context.Context)
|
||||
}
|
||||
|
||||
// HTTPServerConfig configures observability endpoint.
|
||||
type HTTPServerConfig struct {
|
||||
Address string
|
||||
}
|
||||
|
||||
// NewObserver creates process metrics observer.
|
||||
func NewObserver() Observer {
|
||||
return newObserver()
|
||||
}
|
||||
|
||||
// NewHTTPServer creates observability HTTP server.
|
||||
func NewHTTPServer(logger mlogger.Logger, cfg HTTPServerConfig) (HTTPServer, error) {
|
||||
return newHTTPServer(logger, cfg)
|
||||
}
|
||||
119
api/edge/callbacks/internal/ops/server.go
Normal file
119
api/edge/callbacks/internal/ops/server.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/tech/sendico/pkg/api/routers"
|
||||
"github.com/tech/sendico/pkg/api/routers/health"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddress = ":9420"
|
||||
readHeaderTimeout = 5 * time.Second
|
||||
defaultShutdownWindow = 5 * time.Second
|
||||
)
|
||||
|
||||
type httpServer struct {
|
||||
logger mlogger.Logger
|
||||
server *http.Server
|
||||
health routers.Health
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newHTTPServer(logger mlogger.Logger, cfg HTTPServerConfig) (HTTPServer, error) {
|
||||
if logger == nil {
|
||||
return nil, merrors.InvalidArgument("ops: logger is nil")
|
||||
}
|
||||
|
||||
address := strings.TrimSpace(cfg.Address)
|
||||
if address == "" {
|
||||
address = defaultAddress
|
||||
}
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
metricsLogger := logger.Named("ops")
|
||||
var healthRouter routers.Health
|
||||
hr, err := routers.NewHealthRouter(metricsLogger, r, "")
|
||||
if err != nil {
|
||||
metricsLogger.Warn("Failed to initialise health router", zap.Error(err))
|
||||
} else {
|
||||
hr.SetStatus(health.SSStarting)
|
||||
healthRouter = hr
|
||||
}
|
||||
|
||||
httpSrv := &http.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
ReadHeaderTimeout: readHeaderTimeout,
|
||||
}
|
||||
|
||||
wrapper := &httpServer{
|
||||
logger: metricsLogger,
|
||||
server: httpSrv,
|
||||
health: healthRouter,
|
||||
timeout: defaultShutdownWindow,
|
||||
}
|
||||
|
||||
go func() {
|
||||
metricsLogger.Info("Prometheus endpoint listening", zap.String("address", address))
|
||||
serveErr := httpSrv.ListenAndServe()
|
||||
if serveErr != nil && !errors.Is(serveErr, http.ErrServerClosed) {
|
||||
metricsLogger.Error("Prometheus endpoint stopped unexpectedly", zap.Error(serveErr))
|
||||
if healthRouter != nil {
|
||||
healthRouter.SetStatus(health.SSTerminating)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return wrapper, nil
|
||||
}
|
||||
|
||||
func (s *httpServer) SetStatus(status health.ServiceStatus) {
|
||||
if s == nil || s.health == nil {
|
||||
return
|
||||
}
|
||||
s.health.SetStatus(status)
|
||||
}
|
||||
|
||||
func (s *httpServer) Close(ctx context.Context) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.health != nil {
|
||||
s.health.SetStatus(health.SSTerminating)
|
||||
s.health.Finish()
|
||||
s.health = nil
|
||||
}
|
||||
|
||||
if s.server == nil {
|
||||
return
|
||||
}
|
||||
|
||||
shutdownCtx := ctx
|
||||
if shutdownCtx == nil {
|
||||
shutdownCtx = context.Background()
|
||||
}
|
||||
if s.timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
shutdownCtx, cancel = context.WithTimeout(shutdownCtx, s.timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if err := s.server.Shutdown(shutdownCtx); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
s.logger.Warn("Failed to stop metrics server", zap.Error(err))
|
||||
} else {
|
||||
s.logger.Info("Metrics server stopped")
|
||||
}
|
||||
}
|
||||
75
api/edge/callbacks/internal/ops/service.go
Normal file
75
api/edge/callbacks/internal/ops/service.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricsOnce sync.Once
|
||||
ingestTotal *prometheus.CounterVec
|
||||
ingestLatency *prometheus.HistogramVec
|
||||
deliveryTotal *prometheus.CounterVec
|
||||
deliveryLatency *prometheus.HistogramVec
|
||||
)
|
||||
|
||||
type observer struct{}
|
||||
|
||||
func newObserver() Observer {
|
||||
initMetrics()
|
||||
return observer{}
|
||||
}
|
||||
|
||||
func initMetrics() {
|
||||
metricsOnce.Do(func() {
|
||||
ingestTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "ingest_total",
|
||||
Help: "Total ingest attempts by result",
|
||||
}, []string{"result"})
|
||||
|
||||
ingestLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "ingest_duration_seconds",
|
||||
Help: "Ingest latency in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"result"})
|
||||
|
||||
deliveryTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "delivery_total",
|
||||
Help: "Total delivery attempts by result and status code",
|
||||
}, []string{"result", "status_code"})
|
||||
|
||||
deliveryLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "delivery_duration_seconds",
|
||||
Help: "Delivery latency in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"result"})
|
||||
})
|
||||
}
|
||||
|
||||
func (observer) ObserveIngest(result string, duration time.Duration) {
|
||||
if result == "" {
|
||||
result = "unknown"
|
||||
}
|
||||
ingestTotal.WithLabelValues(result).Inc()
|
||||
ingestLatency.WithLabelValues(result).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
func (observer) ObserveDelivery(result string, statusCode int, duration time.Duration) {
|
||||
if result == "" {
|
||||
result = "unknown"
|
||||
}
|
||||
deliveryTotal.WithLabelValues(result, strconv.Itoa(statusCode)).Inc()
|
||||
deliveryLatency.WithLabelValues(result).Observe(duration.Seconds())
|
||||
}
|
||||
8
api/edge/callbacks/internal/retry/module.go
Normal file
8
api/edge/callbacks/internal/retry/module.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package retry
|
||||
|
||||
import "time"
|
||||
|
||||
// Policy computes retry schedules.
|
||||
type Policy interface {
|
||||
NextAttempt(attempt int, now time.Time, minDelay, maxDelay time.Duration, jitterRatio float64) time.Time
|
||||
}
|
||||
59
api/edge/callbacks/internal/retry/service.go
Normal file
59
api/edge/callbacks/internal/retry/service.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
mu sync.Mutex
|
||||
rnd *rand.Rand
|
||||
}
|
||||
|
||||
// New creates retry policy service.
|
||||
func New() Policy {
|
||||
return &service{rnd: rand.New(rand.NewSource(time.Now().UnixNano()))}
|
||||
}
|
||||
|
||||
func (s *service) NextAttempt(attempt int, now time.Time, minDelay, maxDelay time.Duration, jitterRatio float64) time.Time {
|
||||
if attempt < 1 {
|
||||
attempt = 1
|
||||
}
|
||||
if minDelay <= 0 {
|
||||
minDelay = time.Second
|
||||
}
|
||||
if maxDelay < minDelay {
|
||||
maxDelay = minDelay
|
||||
}
|
||||
|
||||
base := float64(minDelay)
|
||||
delay := time.Duration(base * math.Pow(2, float64(attempt-1)))
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
|
||||
if jitterRatio > 0 {
|
||||
if jitterRatio > 1 {
|
||||
jitterRatio = 1
|
||||
}
|
||||
maxJitter := int64(float64(delay) * jitterRatio)
|
||||
if maxJitter > 0 {
|
||||
s.mu.Lock()
|
||||
jitter := s.rnd.Int63n((maxJitter * 2) + 1)
|
||||
s.mu.Unlock()
|
||||
delta := jitter - maxJitter
|
||||
delay += time.Duration(delta)
|
||||
}
|
||||
}
|
||||
|
||||
if delay < minDelay {
|
||||
delay = minDelay
|
||||
}
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
|
||||
return now.UTC().Add(delay)
|
||||
}
|
||||
33
api/edge/callbacks/internal/secrets/module.go
Normal file
33
api/edge/callbacks/internal/secrets/module.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package secrets
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"github.com/tech/sendico/pkg/vault/kv"
|
||||
)
|
||||
|
||||
// Provider resolves secrets by reference.
|
||||
type Provider interface {
|
||||
GetSecret(ctx context.Context, ref string) (string, error)
|
||||
}
|
||||
|
||||
// VaultOptions configure Vault KV secret resolution.
|
||||
type VaultOptions struct {
|
||||
Config kv.Config
|
||||
DefaultField string
|
||||
}
|
||||
|
||||
// Options configure secret lookup behavior.
|
||||
type Options struct {
|
||||
Logger mlogger.Logger
|
||||
Static map[string]string
|
||||
CacheTTL time.Duration
|
||||
Vault VaultOptions
|
||||
}
|
||||
|
||||
// New creates secrets provider.
|
||||
func New(opts Options) (Provider, error) {
|
||||
return newProvider(opts)
|
||||
}
|
||||
224
api/edge/callbacks/internal/secrets/service.go
Normal file
224
api/edge/callbacks/internal/secrets/service.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package secrets
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"github.com/tech/sendico/pkg/vault/kv"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVaultField = "value"
|
||||
vaultRefPrefix = "vault:"
|
||||
)
|
||||
|
||||
type cacheEntry struct {
|
||||
value string
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
type provider struct {
|
||||
logger mlogger.Logger
|
||||
static map[string]string
|
||||
ttl time.Duration
|
||||
vault kv.Client
|
||||
vaultEnabled bool
|
||||
vaultDefField string
|
||||
|
||||
mu sync.RWMutex
|
||||
cache map[string]cacheEntry
|
||||
}
|
||||
|
||||
func newProvider(opts Options) (Provider, error) {
|
||||
logger := opts.Logger
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
static := map[string]string{}
|
||||
for k, v := range opts.Static {
|
||||
key := strings.TrimSpace(k)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
static[key] = v
|
||||
}
|
||||
|
||||
vaultField := strings.TrimSpace(opts.Vault.DefaultField)
|
||||
if vaultField == "" {
|
||||
vaultField = defaultVaultField
|
||||
}
|
||||
|
||||
var vaultClient kv.Client
|
||||
vaultEnabled := false
|
||||
hasVaultConfig := strings.TrimSpace(opts.Vault.Config.Address) != "" ||
|
||||
strings.TrimSpace(opts.Vault.Config.TokenEnv) != "" ||
|
||||
strings.TrimSpace(opts.Vault.Config.MountPath) != ""
|
||||
if hasVaultConfig {
|
||||
client, err := kv.New(kv.Options{
|
||||
Logger: logger.Named("vault"),
|
||||
Config: opts.Vault.Config,
|
||||
Component: "callbacks secrets",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vaultClient = client
|
||||
vaultEnabled = true
|
||||
}
|
||||
|
||||
return &provider{
|
||||
logger: logger.Named("secrets"),
|
||||
static: static,
|
||||
ttl: opts.CacheTTL,
|
||||
vault: vaultClient,
|
||||
vaultEnabled: vaultEnabled,
|
||||
vaultDefField: vaultField,
|
||||
cache: map[string]cacheEntry{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *provider) GetSecret(ctx context.Context, ref string) (string, error) {
|
||||
key := strings.TrimSpace(ref)
|
||||
if key == "" {
|
||||
return "", merrors.InvalidArgument("secret reference is required", "secret_ref")
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if value, ok := p.fromCache(key); ok {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
value, err := p.resolve(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return "", merrors.NoData("secret reference resolved to empty value")
|
||||
}
|
||||
|
||||
p.toCache(key, value)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (p *provider) resolve(ctx context.Context, key string) (string, error) {
|
||||
if value, ok := p.static[key]; ok {
|
||||
return value, nil
|
||||
}
|
||||
if strings.HasPrefix(key, "env:") {
|
||||
envKey := strings.TrimSpace(strings.TrimPrefix(key, "env:"))
|
||||
if envKey == "" {
|
||||
return "", merrors.InvalidArgument("secret env reference is invalid", "secret_ref")
|
||||
}
|
||||
value := strings.TrimSpace(os.Getenv(envKey))
|
||||
if value == "" {
|
||||
return "", merrors.NoData("secret env variable not set: " + envKey)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(key), vaultRefPrefix) && !p.vaultEnabled {
|
||||
return "", merrors.InvalidArgument("vault secret reference provided but vault is not configured", "secret_ref")
|
||||
}
|
||||
if p.vaultEnabled {
|
||||
value, resolved, err := p.resolveVault(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resolved {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", merrors.NoData("secret reference not found: " + key)
|
||||
}
|
||||
|
||||
func (p *provider) resolveVault(ctx context.Context, ref string) (string, bool, error) {
|
||||
path, field, resolved, err := parseVaultRef(ref, p.vaultDefField)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if !resolved {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
value, err := p.vault.GetString(ctx, path, field)
|
||||
if err != nil {
|
||||
p.logger.Warn("Failed to resolve vault secret", zap.String("path", path), zap.String("field", field), zap.Error(err))
|
||||
return "", true, err
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func parseVaultRef(ref, defaultField string) (string, string, bool, error) {
|
||||
raw := strings.TrimSpace(ref)
|
||||
lowered := strings.ToLower(raw)
|
||||
explicit := false
|
||||
if strings.HasPrefix(lowered, vaultRefPrefix) {
|
||||
explicit = true
|
||||
raw = strings.TrimSpace(raw[len(vaultRefPrefix):])
|
||||
}
|
||||
|
||||
if !explicit && !strings.Contains(raw, "/") && !strings.Contains(raw, "#") {
|
||||
return "", "", false, nil
|
||||
}
|
||||
|
||||
field := strings.TrimSpace(defaultField)
|
||||
if field == "" {
|
||||
field = defaultVaultField
|
||||
}
|
||||
|
||||
if idx := strings.Index(raw, "#"); idx >= 0 {
|
||||
field = strings.TrimSpace(raw[idx+1:])
|
||||
raw = strings.TrimSpace(raw[:idx])
|
||||
if field == "" {
|
||||
return "", "", false, merrors.InvalidArgument("vault secret field is required", "secret_ref")
|
||||
}
|
||||
}
|
||||
|
||||
path := strings.Trim(strings.TrimSpace(raw), "/")
|
||||
if path == "" {
|
||||
return "", "", false, merrors.InvalidArgument("vault secret path is required", "secret_ref")
|
||||
}
|
||||
|
||||
return path, field, true, nil
|
||||
}
|
||||
|
||||
func (p *provider) fromCache(key string) (string, bool) {
|
||||
if p.ttl <= 0 {
|
||||
return "", false
|
||||
}
|
||||
p.mu.RLock()
|
||||
entry, ok := p.cache[key]
|
||||
p.mu.RUnlock()
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if time.Now().After(entry.expiresAt) {
|
||||
p.mu.Lock()
|
||||
delete(p.cache, key)
|
||||
p.mu.Unlock()
|
||||
return "", false
|
||||
}
|
||||
return entry.value, true
|
||||
}
|
||||
|
||||
func (p *provider) toCache(key, value string) {
|
||||
if p.ttl <= 0 {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.cache[key] = cacheEntry{
|
||||
value: value,
|
||||
expiresAt: time.Now().Add(p.ttl),
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
16
api/edge/callbacks/internal/security/module.go
Normal file
16
api/edge/callbacks/internal/security/module.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package security
|
||||
|
||||
import "context"
|
||||
|
||||
// Config controls URL validation and SSRF checks.
|
||||
type Config struct {
|
||||
RequireHTTPS bool
|
||||
AllowedHosts []string
|
||||
AllowedPorts []int
|
||||
DNSResolveTimeout int
|
||||
}
|
||||
|
||||
// Validator validates outbound callback URLs.
|
||||
type Validator interface {
|
||||
ValidateURL(ctx context.Context, target string) error
|
||||
}
|
||||
163
api/edge/callbacks/internal/security/service.go
Normal file
163
api/edge/callbacks/internal/security/service.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
requireHTTPS bool
|
||||
allowedHosts map[string]struct{}
|
||||
allowedPorts map[int]struct{}
|
||||
dnsTimeout time.Duration
|
||||
resolver *net.Resolver
|
||||
}
|
||||
|
||||
// New creates URL validator.
|
||||
func New(cfg Config) Validator {
|
||||
hosts := make(map[string]struct{}, len(cfg.AllowedHosts))
|
||||
for _, host := range cfg.AllowedHosts {
|
||||
h := strings.ToLower(strings.TrimSpace(host))
|
||||
if h == "" {
|
||||
continue
|
||||
}
|
||||
hosts[h] = struct{}{}
|
||||
}
|
||||
ports := make(map[int]struct{}, len(cfg.AllowedPorts))
|
||||
for _, port := range cfg.AllowedPorts {
|
||||
if port > 0 {
|
||||
ports[port] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.Duration(cfg.DNSResolveTimeout) * time.Millisecond
|
||||
if timeout <= 0 {
|
||||
timeout = 2 * time.Second
|
||||
}
|
||||
|
||||
return &service{
|
||||
requireHTTPS: cfg.RequireHTTPS,
|
||||
allowedHosts: hosts,
|
||||
allowedPorts: ports,
|
||||
dnsTimeout: timeout,
|
||||
resolver: net.DefaultResolver,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) ValidateURL(ctx context.Context, target string) error {
|
||||
parsed, err := url.Parse(strings.TrimSpace(target))
|
||||
if err != nil {
|
||||
return merrors.InvalidArgumentWrap(err, "invalid callback URL", "url")
|
||||
}
|
||||
if parsed == nil || parsed.Host == "" {
|
||||
return merrors.InvalidArgument("callback URL host is required", "url")
|
||||
}
|
||||
if parsed.User != nil {
|
||||
return merrors.InvalidArgument("callback URL credentials are not allowed", "url")
|
||||
}
|
||||
if s.requireHTTPS && !strings.EqualFold(parsed.Scheme, "https") {
|
||||
return merrors.InvalidArgument("callback URL must use HTTPS", "url")
|
||||
}
|
||||
|
||||
host := strings.ToLower(strings.TrimSpace(parsed.Hostname()))
|
||||
if host == "" {
|
||||
return merrors.InvalidArgument("callback URL host is empty", "url")
|
||||
}
|
||||
if len(s.allowedHosts) > 0 {
|
||||
if _, ok := s.allowedHosts[host]; !ok {
|
||||
return merrors.InvalidArgument("callback host is not in allowlist", "url.host")
|
||||
}
|
||||
}
|
||||
|
||||
port, err := resolvePort(parsed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s.allowedPorts) > 0 {
|
||||
if _, ok := s.allowedPorts[port]; !ok {
|
||||
return merrors.InvalidArgument("callback URL port is not allowed", "url.port")
|
||||
}
|
||||
}
|
||||
|
||||
if addr, addrErr := netip.ParseAddr(host); addrErr == nil {
|
||||
if isBlocked(addr) {
|
||||
return merrors.InvalidArgument("callback URL resolves to blocked IP range", "url")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
lookupCtx := ctx
|
||||
if lookupCtx == nil {
|
||||
lookupCtx = context.Background()
|
||||
}
|
||||
lookupCtx, cancel := context.WithTimeout(lookupCtx, s.dnsTimeout)
|
||||
defer cancel()
|
||||
|
||||
ips, err := s.resolver.LookupIPAddr(lookupCtx, host)
|
||||
if err != nil {
|
||||
return merrors.InternalWrap(err, "failed to resolve callback host")
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
return merrors.InvalidArgument("callback host did not resolve", "url.host")
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.IP == nil {
|
||||
continue
|
||||
}
|
||||
addr, ok := netip.AddrFromSlice(ip.IP)
|
||||
if ok && isBlocked(addr) {
|
||||
return merrors.InvalidArgument("callback URL resolves to blocked IP range", "url.host")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolvePort(parsed *url.URL) (int, error) {
|
||||
if parsed == nil {
|
||||
return 0, merrors.InvalidArgument("callback URL is required", "url")
|
||||
}
|
||||
portStr := strings.TrimSpace(parsed.Port())
|
||||
if portStr == "" {
|
||||
if strings.EqualFold(parsed.Scheme, "https") {
|
||||
return 443, nil
|
||||
}
|
||||
if strings.EqualFold(parsed.Scheme, "http") {
|
||||
return 80, nil
|
||||
}
|
||||
return 0, merrors.InvalidArgument("callback URL scheme is not supported", "url.scheme")
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil || port <= 0 || port > 65535 {
|
||||
return 0, merrors.InvalidArgument("callback URL port is invalid", "url.port")
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func isBlocked(ip netip.Addr) bool {
|
||||
if !ip.IsValid() {
|
||||
return true
|
||||
}
|
||||
if ip.IsLoopback() || ip.IsPrivate() || ip.IsMulticast() || ip.IsUnspecified() {
|
||||
return true
|
||||
}
|
||||
if ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Block common cloud metadata endpoint.
|
||||
if ip.Is4() && ip.String() == "169.254.169.254" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
271
api/edge/callbacks/internal/server/internal/serverimp.go
Normal file
271
api/edge/callbacks/internal/server/internal/serverimp.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package serverimp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/config"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/delivery"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/events"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/ingest"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/ops"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/retry"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/secrets"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/security"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/signing"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/subscriptions"
|
||||
"github.com/tech/sendico/pkg/api/routers/health"
|
||||
"github.com/tech/sendico/pkg/db"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
msg "github.com/tech/sendico/pkg/messaging"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"github.com/tech/sendico/pkg/vault/kv"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const defaultShutdownTimeout = 15 * time.Second
|
||||
|
||||
type jetStreamProvider interface {
|
||||
JetStream() nats.JetStreamContext
|
||||
}
|
||||
|
||||
func Create(logger mlogger.Logger, file string, debug bool) (*Imp, error) {
|
||||
return &Imp{
|
||||
logger: logger.Named("server"),
|
||||
file: file,
|
||||
debug: debug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *Imp) Start() error {
|
||||
i.initStopChannels()
|
||||
defer i.closeDone()
|
||||
|
||||
loader := config.New(i.logger)
|
||||
cfg, err := loader.Load(i.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.config = cfg
|
||||
|
||||
observer := ops.NewObserver()
|
||||
metricsSrv, err := ops.NewHTTPServer(i.logger, ops.HTTPServerConfig{Address: cfg.Metrics.ListenAddress()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.opServer = metricsSrv
|
||||
i.opServer.SetStatus(health.SSStarting)
|
||||
|
||||
conn, err := db.ConnectMongo(i.logger.Named("mongo"), cfg.Database)
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
i.mongoConn = conn
|
||||
|
||||
repo, err := storage.New(i.logger, conn)
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
resolver, err := subscriptions.New(subscriptions.Dependencies{EndpointRepo: repo.Endpoints()})
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
securityValidator := security.New(security.Config{
|
||||
RequireHTTPS: cfg.Security.RequireHTTPS,
|
||||
AllowedHosts: cfg.Security.AllowedHosts,
|
||||
AllowedPorts: cfg.Security.AllowedPorts,
|
||||
DNSResolveTimeout: int(cfg.Security.DNSResolveTimeoutMS() / time.Millisecond),
|
||||
})
|
||||
|
||||
secretProvider, err := secrets.New(secrets.Options{
|
||||
Logger: i.logger,
|
||||
Static: cfg.Secrets.Static,
|
||||
CacheTTL: cfg.Secrets.CacheTTL(),
|
||||
Vault: secrets.VaultOptions{
|
||||
Config: kv.Config{
|
||||
Address: cfg.Secrets.Vault.Address,
|
||||
TokenEnv: cfg.Secrets.Vault.TokenEnv,
|
||||
Namespace: cfg.Secrets.Vault.Namespace,
|
||||
MountPath: cfg.Secrets.Vault.MountPath,
|
||||
},
|
||||
DefaultField: cfg.Secrets.Vault.DefaultField,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
signer, err := signing.New(signing.Dependencies{Logger: i.logger, Provider: secretProvider})
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
|
||||
retryPolicy := retry.New()
|
||||
eventSvc := events.New(i.logger)
|
||||
|
||||
broker, err := msg.CreateMessagingBroker(i.logger.Named("messaging"), cfg.Messaging)
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
i.broker = broker
|
||||
|
||||
jsProvider, ok := broker.(jetStreamProvider)
|
||||
if !ok || jsProvider.JetStream() == nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return merrors.Internal("callbacks: messaging broker does not provide JetStream")
|
||||
}
|
||||
|
||||
ingestSvc, err := ingest.New(ingest.Dependencies{
|
||||
Logger: i.logger,
|
||||
JetStream: jsProvider.JetStream(),
|
||||
Config: ingest.Config{
|
||||
Stream: cfg.Ingest.Stream,
|
||||
Subject: cfg.Ingest.Subject,
|
||||
Durable: cfg.Ingest.Durable,
|
||||
BatchSize: cfg.Ingest.BatchSize,
|
||||
FetchTimeout: cfg.Ingest.FetchTimeout(),
|
||||
IdleSleep: cfg.Ingest.IdleSleep(),
|
||||
},
|
||||
Events: eventSvc,
|
||||
Resolver: resolver,
|
||||
InboxRepo: repo.Inbox(),
|
||||
TaskRepo: repo.Tasks(),
|
||||
TaskDefaults: deliveryTaskDefaults(cfg),
|
||||
Observer: observer,
|
||||
})
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
i.ingest = ingestSvc
|
||||
|
||||
deliverySvc, err := delivery.New(delivery.Dependencies{
|
||||
Logger: i.logger,
|
||||
Config: delivery.Config{
|
||||
WorkerConcurrency: cfg.Delivery.WorkerConcurrency,
|
||||
WorkerPoll: cfg.Delivery.WorkerPollInterval(),
|
||||
LockTTL: cfg.Delivery.LockTTL(),
|
||||
RequestTimeout: cfg.Delivery.RequestTimeout(),
|
||||
JitterRatio: cfg.Delivery.JitterRatio,
|
||||
},
|
||||
Tasks: repo.Tasks(),
|
||||
Retry: retryPolicy,
|
||||
Security: securityValidator,
|
||||
Signer: signer,
|
||||
Observer: observer,
|
||||
})
|
||||
if err != nil {
|
||||
i.shutdownRuntime(context.Background())
|
||||
return err
|
||||
}
|
||||
i.delivery = deliverySvc
|
||||
|
||||
runCtx, cancel := context.WithCancel(context.Background())
|
||||
i.runCancel = cancel
|
||||
i.ingest.Start(runCtx)
|
||||
i.delivery.Start(runCtx)
|
||||
i.opServer.SetStatus(health.SSRunning)
|
||||
|
||||
i.logger.Info("Callbacks service ready",
|
||||
zap.String("subject", cfg.Ingest.Subject),
|
||||
zap.String("stream", cfg.Ingest.Stream),
|
||||
zap.Int("workers", cfg.Delivery.WorkerConcurrency),
|
||||
)
|
||||
|
||||
<-i.stopCh
|
||||
i.logger.Info("Callbacks service stop signal received")
|
||||
i.shutdownRuntime(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Imp) Shutdown() {
|
||||
i.signalStop()
|
||||
if i.doneCh != nil {
|
||||
<-i.doneCh
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Imp) initStopChannels() {
|
||||
if i.stopCh == nil {
|
||||
i.stopCh = make(chan struct{})
|
||||
}
|
||||
if i.doneCh == nil {
|
||||
i.doneCh = make(chan struct{})
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Imp) signalStop() {
|
||||
i.stopOnce.Do(func() {
|
||||
if i.stopCh != nil {
|
||||
close(i.stopCh)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (i *Imp) closeDone() {
|
||||
i.doneOnce.Do(func() {
|
||||
if i.doneCh != nil {
|
||||
close(i.doneCh)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (i *Imp) shutdownRuntime(ctx context.Context) {
|
||||
i.shutdown.Do(func() {
|
||||
if i.opServer != nil {
|
||||
i.opServer.SetStatus(health.SSTerminating)
|
||||
}
|
||||
if i.runCancel != nil {
|
||||
i.runCancel()
|
||||
}
|
||||
if i.ingest != nil {
|
||||
i.ingest.Stop()
|
||||
}
|
||||
if i.delivery != nil {
|
||||
i.delivery.Stop()
|
||||
}
|
||||
if i.opServer != nil {
|
||||
i.opServer.Close(ctx)
|
||||
i.opServer = nil
|
||||
}
|
||||
|
||||
if i.mongoConn != nil {
|
||||
timeout := i.shutdownTimeout()
|
||||
shutdownCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
if err := i.mongoConn.Disconnect(shutdownCtx); err != nil {
|
||||
i.logger.Warn("Failed to close MongoDB connection", zap.Error(err))
|
||||
}
|
||||
i.mongoConn = nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (i *Imp) shutdownTimeout() time.Duration {
|
||||
if i.config != nil && i.config.Runtime != nil {
|
||||
return i.config.Runtime.ShutdownTimeout()
|
||||
}
|
||||
return defaultShutdownTimeout
|
||||
}
|
||||
|
||||
func deliveryTaskDefaults(cfg *config.Config) storage.TaskDefaults {
|
||||
if cfg == nil {
|
||||
return storage.TaskDefaults{}
|
||||
}
|
||||
return storage.TaskDefaults{
|
||||
MaxAttempts: cfg.Delivery.MaxAttempts,
|
||||
MinDelay: cfg.Delivery.MinDelay(),
|
||||
MaxDelay: cfg.Delivery.MaxDelay(),
|
||||
RequestTimeout: cfg.Delivery.RequestTimeout(),
|
||||
}
|
||||
}
|
||||
37
api/edge/callbacks/internal/server/internal/types.go
Normal file
37
api/edge/callbacks/internal/server/internal/types.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package serverimp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/config"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/delivery"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/ingest"
|
||||
"github.com/tech/sendico/edge/callbacks/internal/ops"
|
||||
"github.com/tech/sendico/pkg/db"
|
||||
mb "github.com/tech/sendico/pkg/messaging/broker"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
type Imp struct {
|
||||
logger mlogger.Logger
|
||||
file string
|
||||
debug bool
|
||||
|
||||
config *config.Config
|
||||
|
||||
mongoConn *db.MongoConnection
|
||||
broker mb.Broker
|
||||
|
||||
ingest ingest.Service
|
||||
delivery delivery.Service
|
||||
|
||||
opServer ops.HTTPServer
|
||||
|
||||
runCancel context.CancelFunc
|
||||
shutdown sync.Once
|
||||
stopOnce sync.Once
|
||||
doneOnce sync.Once
|
||||
stopCh chan struct{}
|
||||
doneCh chan struct{}
|
||||
}
|
||||
11
api/edge/callbacks/internal/server/server.go
Normal file
11
api/edge/callbacks/internal/server/server.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
serverimp "github.com/tech/sendico/edge/callbacks/internal/server/internal"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"github.com/tech/sendico/pkg/server"
|
||||
)
|
||||
|
||||
func Create(logger mlogger.Logger, file string, debug bool) (server.Application, error) {
|
||||
return serverimp.Create(logger, file, debug)
|
||||
}
|
||||
36
api/edge/callbacks/internal/signing/module.go
Normal file
36
api/edge/callbacks/internal/signing/module.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/secrets"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
const (
|
||||
ModeNone = "none"
|
||||
ModeHMACSHA256 = "hmac_sha256"
|
||||
)
|
||||
|
||||
// SignedPayload is what gets sent over HTTP.
|
||||
type SignedPayload struct {
|
||||
Body []byte
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// Signer signs callback payloads.
|
||||
type Signer interface {
|
||||
Sign(ctx context.Context, mode, secretRef string, payload []byte, now time.Time) (*SignedPayload, error)
|
||||
}
|
||||
|
||||
// Dependencies configures signer service.
|
||||
type Dependencies struct {
|
||||
Logger mlogger.Logger
|
||||
Provider secrets.Provider
|
||||
}
|
||||
|
||||
// New creates signer service.
|
||||
func New(deps Dependencies) (Signer, error) {
|
||||
return newService(deps)
|
||||
}
|
||||
80
api/edge/callbacks/internal/signing/service.go
Normal file
80
api/edge/callbacks/internal/signing/service.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/secrets"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
logger mlogger.Logger
|
||||
provider secrets.Provider
|
||||
}
|
||||
|
||||
func newService(deps Dependencies) (Signer, error) {
|
||||
if deps.Provider == nil {
|
||||
return nil, merrors.InvalidArgument("signing: secrets provider is required", "provider")
|
||||
}
|
||||
logger := deps.Logger
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &service{
|
||||
logger: logger.Named("signing"),
|
||||
provider: deps.Provider,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *service) Sign(ctx context.Context, mode, secretRef string, payload []byte, now time.Time) (*SignedPayload, error) {
|
||||
normalizedMode := strings.ToLower(strings.TrimSpace(mode))
|
||||
if normalizedMode == "" {
|
||||
normalizedMode = ModeNone
|
||||
}
|
||||
|
||||
switch normalizedMode {
|
||||
case ModeNone:
|
||||
return &SignedPayload{
|
||||
Body: append([]byte(nil), payload...),
|
||||
Headers: map[string]string{},
|
||||
}, nil
|
||||
case ModeHMACSHA256:
|
||||
if strings.TrimSpace(secretRef) == "" {
|
||||
return nil, merrors.InvalidArgument("signing: secret reference is required for hmac", "secret_ref")
|
||||
}
|
||||
secret, err := s.provider.GetSecret(ctx, secretRef)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to load signing secret", zap.String("secret_ref", secretRef), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ts := now.UTC().Format(time.RFC3339Nano)
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
message := append([]byte(ts+"."), payload...)
|
||||
if _, err := mac.Write(message); err != nil {
|
||||
return nil, merrors.InternalWrap(err, "signing: failed to compute hmac")
|
||||
}
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
||||
return &SignedPayload{
|
||||
Body: append([]byte(nil), payload...),
|
||||
Headers: map[string]string{
|
||||
"X-Callback-Timestamp": ts,
|
||||
"X-Callback-Signature": signature,
|
||||
"X-Callback-Algorithm": "hmac-sha256",
|
||||
"Content-Length": strconv.Itoa(len(payload)),
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
return nil, merrors.InvalidArgument("signing: unsupported mode", "mode")
|
||||
}
|
||||
}
|
||||
99
api/edge/callbacks/internal/storage/module.go
Normal file
99
api/edge/callbacks/internal/storage/module.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/db"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.mongodb.org/mongo-driver/v2/bson"
|
||||
)
|
||||
|
||||
// TaskStatus tracks delivery task lifecycle.
|
||||
type TaskStatus string
|
||||
|
||||
const (
|
||||
TaskStatusPending TaskStatus = "PENDING"
|
||||
TaskStatusRetry TaskStatus = "RETRY"
|
||||
TaskStatusDelivered TaskStatus = "DELIVERED"
|
||||
TaskStatusFailed TaskStatus = "FAILED"
|
||||
)
|
||||
|
||||
// Endpoint describes one target callback endpoint.
|
||||
type Endpoint struct {
|
||||
ID bson.ObjectID
|
||||
ClientID string
|
||||
URL string
|
||||
SigningMode string
|
||||
SecretRef string
|
||||
Headers map[string]string
|
||||
MaxAttempts int
|
||||
MinDelay time.Duration
|
||||
MaxDelay time.Duration
|
||||
RequestTimeout time.Duration
|
||||
}
|
||||
|
||||
// Task is one callback delivery job.
|
||||
type Task struct {
|
||||
ID bson.ObjectID
|
||||
EventID string
|
||||
EndpointID bson.ObjectID
|
||||
EndpointURL string
|
||||
SigningMode string
|
||||
SecretRef string
|
||||
Headers map[string]string
|
||||
Payload []byte
|
||||
Attempt int
|
||||
MaxAttempts int
|
||||
MinDelay time.Duration
|
||||
MaxDelay time.Duration
|
||||
RequestTimeout time.Duration
|
||||
Status TaskStatus
|
||||
NextAttemptAt time.Time
|
||||
}
|
||||
|
||||
// TaskDefaults are applied when creating tasks.
|
||||
type TaskDefaults struct {
|
||||
MaxAttempts int
|
||||
MinDelay time.Duration
|
||||
MaxDelay time.Duration
|
||||
RequestTimeout time.Duration
|
||||
}
|
||||
|
||||
// Options configures mongo collections.
|
||||
type Options struct {
|
||||
InboxCollection string
|
||||
TasksCollection string
|
||||
EndpointsCollection string
|
||||
}
|
||||
|
||||
// InboxRepo controls event dedupe state.
|
||||
type InboxRepo interface {
|
||||
TryInsert(ctx context.Context, eventID, clientID, eventType string, at time.Time) (bool, error)
|
||||
}
|
||||
|
||||
// EndpointRepo resolves endpoints for events.
|
||||
type EndpointRepo interface {
|
||||
FindActiveByClientAndType(ctx context.Context, clientID, eventType string) ([]Endpoint, error)
|
||||
}
|
||||
|
||||
// TaskRepo manages callback tasks.
|
||||
type TaskRepo interface {
|
||||
UpsertTasks(ctx context.Context, eventID string, endpoints []Endpoint, payload []byte, defaults TaskDefaults, at time.Time) error
|
||||
LockNextTask(ctx context.Context, now time.Time, workerID string, lockTTL time.Duration) (*Task, error)
|
||||
MarkDelivered(ctx context.Context, taskID bson.ObjectID, httpCode int, latency time.Duration, at time.Time) error
|
||||
MarkRetry(ctx context.Context, taskID bson.ObjectID, attempt int, nextAttemptAt time.Time, lastError string, httpCode int, at time.Time) error
|
||||
MarkFailed(ctx context.Context, taskID bson.ObjectID, attempt int, lastError string, httpCode int, at time.Time) error
|
||||
}
|
||||
|
||||
// Repository is the callbacks persistence contract.
|
||||
type Repository interface {
|
||||
Inbox() InboxRepo
|
||||
Endpoints() EndpointRepo
|
||||
Tasks() TaskRepo
|
||||
}
|
||||
|
||||
// New creates a Mongo-backed callbacks repository.
|
||||
func New(logger mlogger.Logger, conn *db.MongoConnection) (Repository, error) {
|
||||
return newMongoRepository(logger, conn)
|
||||
}
|
||||
513
api/edge/callbacks/internal/storage/service.go
Normal file
513
api/edge/callbacks/internal/storage/service.go
Normal file
@@ -0,0 +1,513 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/db"
|
||||
"github.com/tech/sendico/pkg/db/repository"
|
||||
"github.com/tech/sendico/pkg/db/repository/builder"
|
||||
ri "github.com/tech/sendico/pkg/db/repository/index"
|
||||
"github.com/tech/sendico/pkg/db/storable"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
mutil "github.com/tech/sendico/pkg/mutil/db"
|
||||
"go.mongodb.org/mongo-driver/v2/bson"
|
||||
"go.mongodb.org/mongo-driver/v2/mongo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
inboxCollection string = "inbox"
|
||||
tasksCollection string = "tasks"
|
||||
endpointsCollection string = "endpoints"
|
||||
)
|
||||
|
||||
type mongoRepository struct {
|
||||
logger mlogger.Logger
|
||||
|
||||
inboxRepo repository.Repository
|
||||
tasksRepo repository.Repository
|
||||
endpointsRepo repository.Repository
|
||||
|
||||
inbox InboxRepo
|
||||
endpoints EndpointRepo
|
||||
tasks TaskRepo
|
||||
}
|
||||
|
||||
type inboxDoc struct {
|
||||
storable.Base `bson:",inline"`
|
||||
EventID string `bson:"event_id"`
|
||||
ClientID string `bson:"client_id"`
|
||||
EventType string `bson:"event_type"`
|
||||
}
|
||||
|
||||
func (d *inboxDoc) Collection() string {
|
||||
return inboxCollection
|
||||
}
|
||||
|
||||
type delayConfig struct {
|
||||
MinDelayMS int `bson:"min_ms"`
|
||||
MaxDelayMS int `bson:"max_ms"`
|
||||
}
|
||||
|
||||
type deliveryPolicy struct {
|
||||
delayConfig `bson:",inline"`
|
||||
SigningMode string `bson:"signing_mode"`
|
||||
SecretRef string `bson:"secret_ref"`
|
||||
Headers map[string]string `bson:"headers"`
|
||||
MaxAttempts int `bson:"max_attempts"`
|
||||
RequestTimeoutMS int `bson:"request_timeout_ms"`
|
||||
}
|
||||
|
||||
type endpointDoc struct {
|
||||
storable.Base `bson:",inline"`
|
||||
deliveryPolicy `bson:"retry_policy"`
|
||||
ClientID string `bson:"client_id"`
|
||||
Status string `bson:"status"`
|
||||
URL string `bson:"url"`
|
||||
EventTypes []string `bson:"event_types"`
|
||||
}
|
||||
|
||||
func (d *endpointDoc) Collection() string {
|
||||
return endpointsCollection
|
||||
}
|
||||
|
||||
type taskDoc struct {
|
||||
storable.Base `bson:",inline"`
|
||||
deliveryPolicy `bson:"retry_policy"`
|
||||
EventID string `bson:"event_id"`
|
||||
EndpointID bson.ObjectID `bson:"endpoint_id"`
|
||||
EndpointURL string `bson:"endpoint_url"`
|
||||
Payload []byte `bson:"payload"`
|
||||
Status TaskStatus `bson:"status"`
|
||||
Attempt int `bson:"attempt"`
|
||||
LastError string `bson:"last_error,omitempty"`
|
||||
LastHTTPCode int `bson:"last_http_code,omitempty"`
|
||||
NextAttemptAt time.Time `bson:"next_attempt_at"`
|
||||
LockedUntil *time.Time `bson:"locked_until,omitempty"`
|
||||
WorkerID string `bson:"worker_id,omitempty"`
|
||||
DeliveredAt *time.Time `bson:"delivered_at,omitempty"`
|
||||
}
|
||||
|
||||
func (d *taskDoc) Collection() string {
|
||||
return tasksCollection
|
||||
}
|
||||
|
||||
func newMongoRepository(logger mlogger.Logger, conn *db.MongoConnection) (Repository, error) {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
if conn == nil {
|
||||
return nil, merrors.InvalidArgument("callbacks storage: mongo connection is required", "conn")
|
||||
}
|
||||
|
||||
repo := &mongoRepository{
|
||||
logger: logger.Named("storage"),
|
||||
inboxRepo: repository.CreateMongoRepository(conn.Database(), inboxCollection),
|
||||
tasksRepo: repository.CreateMongoRepository(conn.Database(), tasksCollection),
|
||||
endpointsRepo: repository.CreateMongoRepository(conn.Database(), endpointsCollection),
|
||||
}
|
||||
|
||||
if err := repo.ensureIndexes(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo.inbox = &inboxStore{logger: repo.logger.Named(repo.inboxRepo.Collection()), repo: repo.inboxRepo}
|
||||
repo.endpoints = &endpointStore{logger: repo.logger.Named(repo.endpointsRepo.Collection()), repo: repo.endpointsRepo}
|
||||
repo.tasks = &taskStore{logger: repo.logger.Named(repo.tasksRepo.Collection()), repo: repo.tasksRepo}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (m *mongoRepository) Inbox() InboxRepo {
|
||||
return m.inbox
|
||||
}
|
||||
|
||||
func (m *mongoRepository) Endpoints() EndpointRepo {
|
||||
return m.endpoints
|
||||
}
|
||||
|
||||
func (m *mongoRepository) Tasks() TaskRepo {
|
||||
return m.tasks
|
||||
}
|
||||
|
||||
func (m *mongoRepository) ensureIndexes() error {
|
||||
if err := m.inboxRepo.CreateIndex(&ri.Definition{
|
||||
Name: "uq_event_id",
|
||||
Unique: true,
|
||||
Keys: []ri.Key{
|
||||
{Field: "event_id", Sort: ri.Asc},
|
||||
},
|
||||
}); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks storage: failed to create inbox indexes")
|
||||
}
|
||||
|
||||
for _, def := range []*ri.Definition{
|
||||
{
|
||||
Name: "uq_event_endpoint",
|
||||
Unique: true,
|
||||
Keys: []ri.Key{
|
||||
{Field: "event_id", Sort: ri.Asc},
|
||||
{Field: "endpoint_id", Sort: ri.Asc},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "idx_dispatch_scan",
|
||||
Keys: []ri.Key{
|
||||
{Field: "status", Sort: ri.Asc},
|
||||
{Field: "next_attempt_at", Sort: ri.Asc},
|
||||
{Field: "locked_until", Sort: ri.Asc},
|
||||
},
|
||||
},
|
||||
} {
|
||||
if err := m.tasksRepo.CreateIndex(def); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks storage: failed to create tasks indexes")
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.endpointsRepo.CreateIndex(&ri.Definition{
|
||||
Name: "idx_client_event",
|
||||
Keys: []ri.Key{
|
||||
{Field: "client_id", Sort: ri.Asc},
|
||||
{Field: "status", Sort: ri.Asc},
|
||||
{Field: "event_types", Sort: ri.Asc},
|
||||
},
|
||||
}); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks storage: failed to create endpoint indexes")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type inboxStore struct {
|
||||
logger mlogger.Logger
|
||||
repo repository.Repository
|
||||
}
|
||||
|
||||
func (r *inboxStore) TryInsert(ctx context.Context, eventID, clientID, eventType string, at time.Time) (bool, error) {
|
||||
doc := &inboxDoc{
|
||||
EventID: strings.TrimSpace(eventID),
|
||||
ClientID: strings.TrimSpace(clientID),
|
||||
EventType: strings.TrimSpace(eventType),
|
||||
}
|
||||
|
||||
filter := repository.Filter("event_id", doc.EventID)
|
||||
if err := r.repo.Insert(ctx, doc, filter); err != nil {
|
||||
if errors.Is(err, merrors.ErrDataConflict) {
|
||||
return false, nil
|
||||
}
|
||||
r.logger.Warn("Failed to insert inbox dedupe marker", zap.String("event_id", eventID), zap.Error(err))
|
||||
return false, merrors.InternalWrap(err, "callbacks inbox insert failed")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type endpointStore struct {
|
||||
logger mlogger.Logger
|
||||
repo repository.Repository
|
||||
}
|
||||
|
||||
func (r *endpointStore) FindActiveByClientAndType(ctx context.Context, clientID, eventType string) ([]Endpoint, error) {
|
||||
clientID = strings.TrimSpace(clientID)
|
||||
eventType = strings.TrimSpace(eventType)
|
||||
if clientID == "" {
|
||||
return nil, merrors.InvalidArgument("client_id is required", "client_id")
|
||||
}
|
||||
if eventType == "" {
|
||||
return nil, merrors.InvalidArgument("event type is required", "event_type")
|
||||
}
|
||||
|
||||
query := repository.Query().
|
||||
Filter(repository.Field("client_id"), clientID).
|
||||
In(repository.Field("status"), "active", "enabled")
|
||||
|
||||
out := make([]Endpoint, 0)
|
||||
err := r.repo.FindManyByFilter(ctx, query, func(cur *mongo.Cursor) error {
|
||||
doc := &endpointDoc{}
|
||||
if err := cur.Decode(doc); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(doc.URL) == "" {
|
||||
return nil
|
||||
}
|
||||
if !supportsEventType(doc.EventTypes, eventType) {
|
||||
return nil
|
||||
}
|
||||
out = append(out, Endpoint{
|
||||
ID: doc.ID,
|
||||
ClientID: doc.ClientID,
|
||||
URL: strings.TrimSpace(doc.URL),
|
||||
SigningMode: strings.TrimSpace(doc.SigningMode),
|
||||
SecretRef: strings.TrimSpace(doc.SecretRef),
|
||||
Headers: cloneHeaders(doc.Headers),
|
||||
MaxAttempts: doc.MaxAttempts,
|
||||
MinDelay: time.Duration(doc.MinDelayMS) * time.Millisecond,
|
||||
MaxDelay: time.Duration(doc.MaxDelayMS) * time.Millisecond,
|
||||
RequestTimeout: time.Duration(doc.RequestTimeoutMS) * time.Millisecond,
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil && !errors.Is(err, merrors.ErrNoData) {
|
||||
return nil, merrors.InternalWrap(err, "callbacks endpoint lookup failed")
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func supportsEventType(eventTypes []string, eventType string) bool {
|
||||
if len(eventTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
eventType = strings.TrimSpace(eventType)
|
||||
for _, t := range eventTypes {
|
||||
current := strings.TrimSpace(t)
|
||||
if current == "" {
|
||||
continue
|
||||
}
|
||||
if current == "*" || current == eventType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type taskStore struct {
|
||||
logger mlogger.Logger
|
||||
repo repository.Repository
|
||||
}
|
||||
|
||||
func (r *taskStore) UpsertTasks(ctx context.Context, eventID string, endpoints []Endpoint, payload []byte, defaults TaskDefaults, at time.Time) error {
|
||||
eventID = strings.TrimSpace(eventID)
|
||||
if eventID == "" {
|
||||
return merrors.InvalidArgument("event id is required", "event_id")
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
now := at.UTC()
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.ID == bson.NilObjectID {
|
||||
continue
|
||||
}
|
||||
|
||||
maxAttempts := endpoint.MaxAttempts
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = defaults.MaxAttempts
|
||||
}
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = 1
|
||||
}
|
||||
|
||||
minDelay := endpoint.MinDelay
|
||||
if minDelay <= 0 {
|
||||
minDelay = defaults.MinDelay
|
||||
}
|
||||
if minDelay <= 0 {
|
||||
minDelay = time.Second
|
||||
}
|
||||
|
||||
maxDelay := endpoint.MaxDelay
|
||||
if maxDelay <= 0 {
|
||||
maxDelay = defaults.MaxDelay
|
||||
}
|
||||
if maxDelay < minDelay {
|
||||
maxDelay = minDelay
|
||||
}
|
||||
|
||||
requestTimeout := endpoint.RequestTimeout
|
||||
if requestTimeout <= 0 {
|
||||
requestTimeout = defaults.RequestTimeout
|
||||
}
|
||||
|
||||
doc := &taskDoc{}
|
||||
doc.EventID = eventID
|
||||
doc.EndpointID = endpoint.ID
|
||||
doc.EndpointURL = strings.TrimSpace(endpoint.URL)
|
||||
doc.SigningMode = strings.TrimSpace(endpoint.SigningMode)
|
||||
doc.SecretRef = strings.TrimSpace(endpoint.SecretRef)
|
||||
doc.Headers = cloneHeaders(endpoint.Headers)
|
||||
doc.Payload = append([]byte(nil), payload...)
|
||||
doc.Status = TaskStatusPending
|
||||
doc.Attempt = 0
|
||||
doc.MaxAttempts = maxAttempts
|
||||
doc.MinDelayMS = int(minDelay / time.Millisecond)
|
||||
doc.MaxDelayMS = int(maxDelay / time.Millisecond)
|
||||
doc.RequestTimeoutMS = int(requestTimeout / time.Millisecond)
|
||||
doc.NextAttemptAt = now
|
||||
|
||||
filter := repository.Filter("event_id", eventID).And(repository.Filter("endpoint_id", endpoint.ID))
|
||||
if err := r.repo.Insert(ctx, doc, filter); err != nil {
|
||||
if errors.Is(err, merrors.ErrDataConflict) {
|
||||
continue
|
||||
}
|
||||
return merrors.InternalWrap(err, "callbacks task upsert failed")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *taskStore) LockNextTask(ctx context.Context, now time.Time, workerID string, lockTTL time.Duration) (*Task, error) {
|
||||
workerID = strings.TrimSpace(workerID)
|
||||
if workerID == "" {
|
||||
return nil, merrors.InvalidArgument("worker id is required", "worker_id")
|
||||
}
|
||||
|
||||
now = now.UTC()
|
||||
limit := int64(32)
|
||||
lockFilter := repository.Query().Or(
|
||||
repository.Query().Comparison(repository.Field("locked_until"), builder.Exists, false),
|
||||
repository.Query().Filter(repository.Field("locked_until"), nil),
|
||||
repository.Query().Comparison(repository.Field("locked_until"), builder.Lte, now),
|
||||
)
|
||||
|
||||
query := repository.Query().
|
||||
In(repository.Field("status"), string(TaskStatusPending), string(TaskStatusRetry)).
|
||||
Comparison(repository.Field("next_attempt_at"), builder.Lte, now).
|
||||
And(lockFilter).
|
||||
Sort(repository.Field("next_attempt_at"), true).
|
||||
Sort(repository.Field("created_at"), true).
|
||||
Limit(&limit)
|
||||
|
||||
candidates, err := mutil.GetObjects[taskDoc](ctx, r.logger, query, nil, r.repo)
|
||||
if err != nil {
|
||||
if errors.Is(err, merrors.ErrNoData) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, merrors.InternalWrap(err, "callbacks task query failed")
|
||||
}
|
||||
|
||||
lockedUntil := now.Add(lockTTL)
|
||||
for _, candidate := range candidates {
|
||||
patch := repository.Patch().
|
||||
Set(repository.Field("locked_until"), lockedUntil).
|
||||
Set(repository.Field("worker_id"), workerID)
|
||||
|
||||
conditional := repository.IDFilter(candidate.ID).And(
|
||||
repository.Query().In(repository.Field("status"), string(TaskStatusPending), string(TaskStatusRetry)),
|
||||
repository.Query().Comparison(repository.Field("next_attempt_at"), builder.Lte, now),
|
||||
lockFilter,
|
||||
)
|
||||
|
||||
updated, err := r.repo.PatchMany(ctx, conditional, patch)
|
||||
if err != nil {
|
||||
return nil, merrors.InternalWrap(err, "callbacks task lock update failed")
|
||||
}
|
||||
if updated == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
locked := &taskDoc{}
|
||||
if err := r.repo.Get(ctx, candidate.ID, locked); err != nil {
|
||||
if errors.Is(err, merrors.ErrNoData) {
|
||||
continue
|
||||
}
|
||||
return nil, merrors.InternalWrap(err, "callbacks task lock reload failed")
|
||||
}
|
||||
if strings.TrimSpace(locked.WorkerID) != workerID {
|
||||
continue
|
||||
}
|
||||
|
||||
return mapTaskDoc(locked), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *taskStore) MarkDelivered(ctx context.Context, taskID bson.ObjectID, httpCode int, latency time.Duration, at time.Time) error {
|
||||
_ = latency
|
||||
if taskID == bson.NilObjectID {
|
||||
return merrors.InvalidArgument("task id is required", "task_id")
|
||||
}
|
||||
|
||||
patch := repository.Patch().
|
||||
Set(repository.Field("status"), TaskStatusDelivered).
|
||||
Set(repository.Field("last_http_code"), httpCode).
|
||||
Set(repository.Field("delivered_at"), time.Now()).
|
||||
Set(repository.Field("locked_until"), nil).
|
||||
Set(repository.Field("worker_id"), "").
|
||||
Set(repository.Field("last_error"), "")
|
||||
|
||||
if err := r.repo.Patch(ctx, taskID, patch); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks task mark delivered failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *taskStore) MarkRetry(ctx context.Context, taskID bson.ObjectID, attempt int, nextAttemptAt time.Time, lastError string, httpCode int, at time.Time) error {
|
||||
if taskID == bson.NilObjectID {
|
||||
return merrors.InvalidArgument("task id is required", "task_id")
|
||||
}
|
||||
|
||||
patch := repository.Patch().
|
||||
Set(repository.Field("status"), TaskStatusRetry).
|
||||
Set(repository.Field("attempt"), attempt).
|
||||
Set(repository.Field("next_attempt_at"), nextAttemptAt.UTC()).
|
||||
Set(repository.Field("last_error"), strings.TrimSpace(lastError)).
|
||||
Set(repository.Field("last_http_code"), httpCode).
|
||||
Set(repository.Field("locked_until"), nil).
|
||||
Set(repository.Field("worker_id"), "")
|
||||
|
||||
if err := r.repo.Patch(ctx, taskID, patch); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks task mark retry failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *taskStore) MarkFailed(ctx context.Context, taskID bson.ObjectID, attempt int, lastError string, httpCode int, at time.Time) error {
|
||||
if taskID == bson.NilObjectID {
|
||||
return merrors.InvalidArgument("task id is required", "task_id")
|
||||
}
|
||||
|
||||
patch := repository.Patch().
|
||||
Set(repository.Field("status"), TaskStatusFailed).
|
||||
Set(repository.Field("attempt"), attempt).
|
||||
Set(repository.Field("last_error"), strings.TrimSpace(lastError)).
|
||||
Set(repository.Field("last_http_code"), httpCode).
|
||||
Set(repository.Field("locked_until"), nil).
|
||||
Set(repository.Field("worker_id"), "")
|
||||
|
||||
if err := r.repo.Patch(ctx, taskID, patch); err != nil {
|
||||
return merrors.InternalWrap(err, "callbacks task mark failed failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapTaskDoc(doc *taskDoc) *Task {
|
||||
if doc == nil {
|
||||
return nil
|
||||
}
|
||||
return &Task{
|
||||
ID: doc.ID,
|
||||
EventID: doc.EventID,
|
||||
EndpointID: doc.EndpointID,
|
||||
EndpointURL: doc.EndpointURL,
|
||||
SigningMode: doc.SigningMode,
|
||||
SecretRef: doc.SecretRef,
|
||||
Headers: cloneHeaders(doc.Headers),
|
||||
Payload: append([]byte(nil), doc.Payload...),
|
||||
Attempt: doc.Attempt,
|
||||
MaxAttempts: doc.MaxAttempts,
|
||||
MinDelay: time.Duration(doc.MinDelayMS) * time.Millisecond,
|
||||
MaxDelay: time.Duration(doc.MaxDelayMS) * time.Millisecond,
|
||||
RequestTimeout: time.Duration(doc.RequestTimeoutMS) * time.Millisecond,
|
||||
Status: doc.Status,
|
||||
NextAttemptAt: doc.NextAttemptAt,
|
||||
}
|
||||
}
|
||||
|
||||
func cloneHeaders(in map[string]string) map[string]string {
|
||||
if len(in) == 0 {
|
||||
return map[string]string{}
|
||||
}
|
||||
out := make(map[string]string, len(in))
|
||||
for key, val := range in {
|
||||
out[key] = val
|
||||
}
|
||||
return out
|
||||
}
|
||||
17
api/edge/callbacks/internal/subscriptions/module.go
Normal file
17
api/edge/callbacks/internal/subscriptions/module.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package subscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
)
|
||||
|
||||
// Resolver resolves active webhook endpoints for an event.
|
||||
type Resolver interface {
|
||||
Resolve(ctx context.Context, clientID, eventType string) ([]storage.Endpoint, error)
|
||||
}
|
||||
|
||||
// Dependencies defines subscriptions resolver dependencies.
|
||||
type Dependencies struct {
|
||||
EndpointRepo storage.EndpointRepo
|
||||
}
|
||||
38
api/edge/callbacks/internal/subscriptions/service.go
Normal file
38
api/edge/callbacks/internal/subscriptions/service.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package subscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/tech/sendico/edge/callbacks/internal/storage"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
repo storage.EndpointRepo
|
||||
}
|
||||
|
||||
// New creates endpoint resolver service.
|
||||
func New(deps Dependencies) (Resolver, error) {
|
||||
if deps.EndpointRepo == nil {
|
||||
return nil, merrors.InvalidArgument("subscriptions: endpoint repo is required", "endpointRepo")
|
||||
}
|
||||
|
||||
return &service{repo: deps.EndpointRepo}, nil
|
||||
}
|
||||
|
||||
func (s *service) Resolve(ctx context.Context, clientID, eventType string) ([]storage.Endpoint, error) {
|
||||
if strings.TrimSpace(clientID) == "" {
|
||||
return nil, merrors.InvalidArgument("subscriptions: client id is required", "clientID")
|
||||
}
|
||||
if strings.TrimSpace(eventType) == "" {
|
||||
return nil, merrors.InvalidArgument("subscriptions: event type is required", "eventType")
|
||||
}
|
||||
|
||||
endpoints, err := s.repo.FindActiveByClientAndType(ctx, clientID, eventType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoints, nil
|
||||
}
|
||||
Reference in New Issue
Block a user