refactored orchestrator and callbacks service to use pkg messsaging + envelope factory / handler

This commit is contained in:
Stephan D
2026-02-28 20:56:26 +01:00
parent 363d6474f2
commit 12c67361dd
14 changed files with 316 additions and 311 deletions

View File

@@ -28,15 +28,6 @@ messaging:
reconnect_wait: 5
buffer_size: 1024
ingest:
stream: CALLBACKS
subject: callbacks.events
durable: callbacks-ingest
batch_size: 32
fetch_timeout_ms: 2000
idle_sleep_ms: 500
delivery:
worker_concurrency: 8
worker_poll_ms: 200

View File

@@ -28,14 +28,6 @@ messaging:
reconnect_wait: 5
buffer_size: 1024
ingest:
stream: CALLBACKS
subject: callbacks.events
durable: callbacks-ingest
batch_size: 32
fetch_timeout_ms: 2000
idle_sleep_ms: 500
delivery:
worker_concurrency: 8
worker_poll_ms: 200

View File

@@ -10,15 +10,6 @@ import (
const (
defaultShutdownTimeoutSeconds = 15
defaultMetricsAddress = ":9420"
defaultIngestStream = "CALLBACKS"
defaultIngestSubject = "callbacks.events"
defaultIngestDurable = "callbacks-ingest"
defaultIngestBatchSize = 32
defaultIngestFetchTimeoutMS = 2000
defaultIngestIdleSleepMS = 500
defaultTaskCollection = "callback_tasks"
defaultInboxCollection = "callback_inbox"
defaultEndpointsCollection = "webhook_endpoints"
defaultWorkerConcurrency = 8
defaultWorkerPollIntervalMS = 200
defaultLockTTLSeconds = 30
@@ -42,7 +33,6 @@ type Config struct {
Metrics *MetricsConfig `yaml:"metrics"`
Database *db.Config `yaml:"database"`
Messaging *messaging.Config `yaml:"messaging"`
Ingest IngestConfig `yaml:"ingest"`
Delivery DeliveryConfig `yaml:"delivery"`
Security SecurityConfig `yaml:"security"`
Secrets SecretsConfig `yaml:"secrets"`
@@ -72,30 +62,6 @@ func (c *MetricsConfig) ListenAddress() string {
return c.Address
}
// IngestConfig configures JetStream ingestion.
type IngestConfig struct {
Stream string `yaml:"stream"`
Subject string `yaml:"subject"`
Durable string `yaml:"durable"`
BatchSize int `yaml:"batch_size"`
FetchTimeoutMS int `yaml:"fetch_timeout_ms"`
IdleSleepMS int `yaml:"idle_sleep_ms"`
}
func (c *IngestConfig) FetchTimeout() time.Duration {
if c.FetchTimeoutMS <= 0 {
return time.Duration(defaultIngestFetchTimeoutMS) * time.Millisecond
}
return time.Duration(c.FetchTimeoutMS) * time.Millisecond
}
func (c *IngestConfig) IdleSleep() time.Duration {
if c.IdleSleepMS <= 0 {
return time.Duration(defaultIngestIdleSleepMS) * time.Millisecond
}
return time.Duration(c.IdleSleepMS) * time.Millisecond
}
// DeliveryConfig controls dispatcher behavior.
type DeliveryConfig struct {
WorkerConcurrency int `yaml:"worker_concurrency"`

View File

@@ -1,6 +1,7 @@
package config
import (
"bytes"
"os"
"strings"
@@ -34,7 +35,9 @@ func (s *service) Load(path string) (*Config, error) {
}
cfg := &Config{}
if err := yaml.Unmarshal(data, cfg); err != nil {
decoder := yaml.NewDecoder(bytes.NewReader(data))
decoder.KnownFields(true)
if err := decoder.Decode(cfg); err != nil {
s.logger.Error("Failed to parse config yaml", zap.String("path", path), zap.Error(err))
return nil, merrors.InternalWrap(err, "failed to parse callbacks config")
}
@@ -58,25 +61,6 @@ func (s *service) applyDefaults(cfg *Config) {
cfg.Metrics.Address = defaultMetricsAddress
}
if strings.TrimSpace(cfg.Ingest.Stream) == "" {
cfg.Ingest.Stream = defaultIngestStream
}
if strings.TrimSpace(cfg.Ingest.Subject) == "" {
cfg.Ingest.Subject = defaultIngestSubject
}
if strings.TrimSpace(cfg.Ingest.Durable) == "" {
cfg.Ingest.Durable = defaultIngestDurable
}
if cfg.Ingest.BatchSize <= 0 {
cfg.Ingest.BatchSize = defaultIngestBatchSize
}
if cfg.Ingest.FetchTimeoutMS <= 0 {
cfg.Ingest.FetchTimeoutMS = defaultIngestFetchTimeoutMS
}
if cfg.Ingest.IdleSleepMS <= 0 {
cfg.Ingest.IdleSleepMS = defaultIngestIdleSleepMS
}
if cfg.Delivery.WorkerConcurrency <= 0 {
cfg.Delivery.WorkerConcurrency = defaultWorkerConcurrency
}
@@ -139,9 +123,6 @@ func (s *service) validate(cfg *Config) error {
if cfg.Delivery.MaxAttempts < 1 {
return merrors.InvalidArgument("delivery.max_attempts must be > 0", "delivery.max_attempts")
}
if cfg.Ingest.BatchSize < 1 {
return merrors.InvalidArgument("ingest.batch_size must be > 0", "ingest.batch_size")
}
vaultAddress := strings.TrimSpace(cfg.Secrets.Vault.Address)
vaultTokenEnv := strings.TrimSpace(cfg.Secrets.Vault.TokenEnv)
vaultMountPath := strings.TrimSpace(cfg.Secrets.Vault.MountPath)

View File

@@ -4,10 +4,10 @@ import (
"context"
"time"
"github.com/nats-io/nats.go"
"github.com/tech/sendico/edge/callbacks/internal/events"
"github.com/tech/sendico/edge/callbacks/internal/storage"
"github.com/tech/sendico/edge/callbacks/internal/subscriptions"
mb "github.com/tech/sendico/pkg/messaging/broker"
"github.com/tech/sendico/pkg/mlogger"
)
@@ -16,21 +16,10 @@ type Observer interface {
ObserveIngest(result string, duration time.Duration)
}
// Config contains JetStream ingest settings.
type Config struct {
Stream string
Subject string
Durable string
BatchSize int
FetchTimeout time.Duration
IdleSleep time.Duration
}
// Dependencies configure the ingest service.
type Dependencies struct {
Logger mlogger.Logger
JetStream nats.JetStreamContext
Config Config
Broker mb.Broker
Events events.Service
Resolver subscriptions.Resolver
InboxRepo storage.InboxRepo
@@ -39,7 +28,7 @@ type Dependencies struct {
Observer Observer
}
// Service runs JetStream ingest workers.
// Service runs ingest workers.
type Service interface {
Start(ctx context.Context)
Stop()

View File

@@ -2,59 +2,86 @@ package ingest
import (
"context"
"encoding/json"
"errors"
"strings"
"sync"
"time"
"github.com/nats-io/nats.go"
"github.com/tech/sendico/edge/callbacks/internal/events"
"github.com/tech/sendico/pkg/merrors"
pkgmsg "github.com/tech/sendico/pkg/messaging"
cons "github.com/tech/sendico/pkg/messaging/consumer"
me "github.com/tech/sendico/pkg/messaging/envelope"
pon "github.com/tech/sendico/pkg/messaging/notifications/paymentorchestrator"
np "github.com/tech/sendico/pkg/messaging/notifications/processor"
"github.com/tech/sendico/pkg/mlogger"
"github.com/tech/sendico/pkg/model"
"go.uber.org/zap"
)
const (
loggerNameIngest = "ingest"
logFieldSubject = "subject"
errBrokerRequired = "ingest: broker is required"
errEventsRequired = "ingest: events service is required"
errResolverRequired = "ingest: subscriptions resolver is required"
errInboxRepoRequired = "ingest: inbox repo is required"
errTaskRepoRequired = "ingest: task repo is required"
configFieldBroker = "broker"
configFieldEvents = "events"
configFieldResolver = "resolver"
configFieldInboxRepo = "inboxRepo"
configFieldTaskRepo = "taskRepo"
logFailedStartConsumer = "Failed to start messaging consumer"
logIngestConsumerStarted = "Ingest consumer started"
logIngestConsumerStopped = "Ingest consumer stopped"
logIngestConsumerWarn = "Ingest consumer stopped with error"
ingestResultOK = "ok"
ingestResultEmptyPayload = "empty_payload"
ingestResultInvalidEvent = "invalid_event"
ingestResultPayloadError = "payload_error"
ingestResultInboxError = "inbox_error"
ingestResultDuplicate = "duplicate"
ingestResultResolveError = "resolve_error"
ingestResultNoEndpoints = "no_endpoints"
ingestResultTaskError = "task_error"
)
type service struct {
logger mlogger.Logger
js nats.JetStreamContext
cfg Config
deps Dependencies
event model.NotificationEvent
cancel context.CancelFunc
wg sync.WaitGroup
once sync.Once
stop sync.Once
mu sync.Mutex
consumer pkgmsg.Consumer
processor np.EnvelopeProcessor
}
func newService(deps Dependencies) (Service, error) {
if deps.JetStream == nil {
return nil, merrors.InvalidArgument("ingest: jetstream context is required", "jetstream")
if deps.Broker == nil {
return nil, merrors.InvalidArgument(errBrokerRequired, configFieldBroker)
}
if deps.Events == nil {
return nil, merrors.InvalidArgument("ingest: events service is required", "events")
return nil, merrors.InvalidArgument(errEventsRequired, configFieldEvents)
}
if deps.Resolver == nil {
return nil, merrors.InvalidArgument("ingest: subscriptions resolver is required", "resolver")
return nil, merrors.InvalidArgument(errResolverRequired, configFieldResolver)
}
if deps.InboxRepo == nil {
return nil, merrors.InvalidArgument("ingest: inbox repo is required", "inboxRepo")
return nil, merrors.InvalidArgument(errInboxRepoRequired, configFieldInboxRepo)
}
if deps.TaskRepo == nil {
return nil, merrors.InvalidArgument("ingest: task repo is required", "taskRepo")
}
if strings.TrimSpace(deps.Config.Subject) == "" {
return nil, merrors.InvalidArgument("ingest: subject is required", "config.subject")
}
if strings.TrimSpace(deps.Config.Durable) == "" {
return nil, merrors.InvalidArgument("ingest: durable is required", "config.durable")
}
if deps.Config.BatchSize <= 0 {
deps.Config.BatchSize = 1
}
if deps.Config.FetchTimeout <= 0 {
deps.Config.FetchTimeout = 2 * time.Second
}
if deps.Config.IdleSleep <= 0 {
deps.Config.IdleSleep = 500 * time.Millisecond
return nil, merrors.InvalidArgument(errTaskRepoRequired, configFieldTaskRepo)
}
logger := deps.Logger
@@ -62,12 +89,14 @@ func newService(deps Dependencies) (Service, error) {
logger = zap.NewNop()
}
return &service{
logger: logger.Named("ingest"),
js: deps.JetStream,
cfg: deps.Config,
svc := &service{
logger: logger.Named(loggerNameIngest),
deps: deps,
}, nil
}
svc.processor = pon.NewPaymentStatusUpdatedProcessor(svc.logger, svc.handlePaymentStatusUpdated)
svc.event = svc.processor.GetSubject()
return svc, nil
}
func (s *service) Start(ctx context.Context) {
@@ -91,114 +120,119 @@ func (s *service) Stop() {
if s.cancel != nil {
s.cancel()
}
s.closeConsumer()
s.wg.Wait()
})
}
func (s *service) run(ctx context.Context) {
subOpts := []nats.SubOpt{}
if stream := strings.TrimSpace(s.cfg.Stream); stream != "" {
subOpts = append(subOpts, nats.BindStream(stream))
}
sub, err := s.js.PullSubscribe(strings.TrimSpace(s.cfg.Subject), strings.TrimSpace(s.cfg.Durable), subOpts...)
consumer, err := cons.NewConsumer(s.logger, s.deps.Broker, s.event)
if err != nil {
s.logger.Error("Failed to start JetStream subscription", zap.String("subject", s.cfg.Subject), zap.String("durable", s.cfg.Durable), zap.Error(err))
s.logger.Error(logFailedStartConsumer, zap.String(logFieldSubject, s.event.ToString()), zap.Error(err))
return
}
s.setConsumer(consumer)
defer s.closeConsumer()
s.logger.Info("Ingest consumer started", zap.String("subject", s.cfg.Subject), zap.String("durable", s.cfg.Durable), zap.Int("batch_size", s.cfg.BatchSize))
for {
s.logger.Info(logIngestConsumerStarted, zap.String(logFieldSubject, s.event.ToString()))
if err := consumer.ConsumeMessages(func(messageCtx context.Context, envelope me.Envelope) error {
select {
case <-ctx.Done():
s.logger.Info("Ingest consumer stopped")
return
return ctx.Err()
default:
}
return s.processor.Process(messageCtx, envelope)
}); err != nil && !errors.Is(err, context.Canceled) {
s.logger.Warn(logIngestConsumerWarn, zap.String(logFieldSubject, s.event.ToString()), zap.Error(err))
}
s.logger.Info(logIngestConsumerStopped, zap.String(logFieldSubject, s.event.ToString()))
}
msgs, err := sub.Fetch(s.cfg.BatchSize, nats.MaxWait(s.cfg.FetchTimeout))
if err != nil {
if errors.Is(err, nats.ErrTimeout) {
time.Sleep(s.cfg.IdleSleep)
continue
}
if ctx.Err() != nil {
return
}
s.logger.Warn("Failed to fetch JetStream messages", zap.Error(err))
time.Sleep(s.cfg.IdleSleep)
continue
}
func (s *service) setConsumer(consumer pkgmsg.Consumer) {
s.mu.Lock()
s.consumer = consumer
s.mu.Unlock()
}
for _, msg := range msgs {
s.handleMessage(ctx, msg)
}
func (s *service) closeConsumer() {
s.mu.Lock()
consumer := s.consumer
s.consumer = nil
s.mu.Unlock()
if consumer != nil {
consumer.Close()
}
}
func (s *service) handleMessage(ctx context.Context, msg *nats.Msg) {
func (s *service) handlePaymentStatusUpdated(ctx context.Context, msg *model.PaymentStatusUpdated) error {
start := time.Now()
result := "ok"
nak := false
result := ingestResultOK
defer func() {
if s.deps.Observer != nil {
s.deps.Observer.ObserveIngest(result, time.Since(start))
}
var ackErr error
if nak {
ackErr = msg.Nak()
} else {
ackErr = msg.Ack()
}
if ackErr != nil {
s.logger.Warn("Failed to ack ingest message", zap.Bool("nak", nak), zap.Error(ackErr))
}
}()
envelope, err := s.deps.Events.Parse(msg.Data)
if err != nil {
result = "invalid_event"
nak = false
return
if msg == nil {
result = ingestResultEmptyPayload
return nil
}
if strings.TrimSpace(msg.EventID) == "" || strings.TrimSpace(msg.ClientID) == "" || msg.OccurredAt.IsZero() {
result = ingestResultInvalidEvent
return nil
}
inserted, err := s.deps.InboxRepo.TryInsert(ctx, envelope.EventID, envelope.ClientID, envelope.Type, time.Now().UTC())
eventType := strings.TrimSpace(msg.Type)
if eventType == "" {
eventType = model.PaymentStatusUpdatedType
}
data, err := json.Marshal(msg.Data)
if err != nil {
result = "inbox_error"
nak = true
return
result = ingestResultPayloadError
return err
}
parsed := &events.Envelope{
EventID: strings.TrimSpace(msg.EventID),
Type: eventType,
ClientID: strings.TrimSpace(msg.ClientID),
OccurredAt: msg.OccurredAt.UTC(),
PublishedAt: msg.PublishedAt.UTC(),
Data: data,
}
inserted, err := s.deps.InboxRepo.TryInsert(ctx, parsed.EventID, parsed.ClientID, parsed.Type, time.Now().UTC())
if err != nil {
result = ingestResultInboxError
return err
}
if !inserted {
result = "duplicate"
nak = false
return
result = ingestResultDuplicate
return nil
}
endpoints, err := s.deps.Resolver.Resolve(ctx, envelope.ClientID, envelope.Type)
endpoints, err := s.deps.Resolver.Resolve(ctx, parsed.ClientID, parsed.Type)
if err != nil {
result = "resolve_error"
nak = true
return
result = ingestResultResolveError
return err
}
if len(endpoints) == 0 {
result = "no_endpoints"
nak = false
return
result = ingestResultNoEndpoints
return nil
}
payload, err := s.deps.Events.BuildPayload(ctx, envelope)
payload, err := s.deps.Events.BuildPayload(ctx, parsed)
if err != nil {
result = "payload_error"
nak = true
return
result = ingestResultPayloadError
return err
}
if err := s.deps.TaskRepo.UpsertTasks(ctx, envelope.EventID, endpoints, payload, s.deps.TaskDefaults, time.Now().UTC()); err != nil {
result = "task_error"
nak = true
return
if err := s.deps.TaskRepo.UpsertTasks(ctx, parsed.EventID, endpoints, payload, s.deps.TaskDefaults, time.Now().UTC()); err != nil {
result = ingestResultTaskError
return err
}
return nil
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"time"
"github.com/nats-io/nats.go"
"github.com/tech/sendico/edge/callbacks/internal/config"
"github.com/tech/sendico/edge/callbacks/internal/delivery"
"github.com/tech/sendico/edge/callbacks/internal/events"
@@ -18,7 +17,6 @@ import (
"github.com/tech/sendico/edge/callbacks/internal/subscriptions"
"github.com/tech/sendico/pkg/api/routers/health"
"github.com/tech/sendico/pkg/db"
"github.com/tech/sendico/pkg/merrors"
msg "github.com/tech/sendico/pkg/messaging"
"github.com/tech/sendico/pkg/mlogger"
"github.com/tech/sendico/pkg/vault/kv"
@@ -27,10 +25,6 @@ import (
const defaultShutdownTimeout = 15 * time.Second
type jetStreamProvider interface {
JetStream() nats.JetStreamContext
}
func Create(logger mlogger.Logger, file string, debug bool) (*Imp, error) {
return &Imp{
logger: logger.Named("server"),
@@ -118,23 +112,9 @@ func (i *Imp) Start() error {
}
i.broker = broker
jsProvider, ok := broker.(jetStreamProvider)
if !ok || jsProvider.JetStream() == nil {
i.shutdownRuntime(context.Background())
return merrors.Internal("callbacks: messaging broker does not provide JetStream")
}
ingestSvc, err := ingest.New(ingest.Dependencies{
Logger: i.logger,
JetStream: jsProvider.JetStream(),
Config: ingest.Config{
Stream: cfg.Ingest.Stream,
Subject: cfg.Ingest.Subject,
Durable: cfg.Ingest.Durable,
BatchSize: cfg.Ingest.BatchSize,
FetchTimeout: cfg.Ingest.FetchTimeout(),
IdleSleep: cfg.Ingest.IdleSleep(),
},
Logger: i.logger,
Broker: broker,
Events: eventSvc,
Resolver: resolver,
InboxRepo: repo.Inbox(),
@@ -176,8 +156,6 @@ func (i *Imp) Start() error {
i.opServer.SetStatus(health.SSRunning)
i.logger.Info("Callbacks service ready",
zap.String("subject", cfg.Ingest.Subject),
zap.String("stream", cfg.Ingest.Stream),
zap.Int("workers", cfg.Delivery.WorkerConcurrency),
)