callbacks service draft
This commit is contained in:
36
api/edge/callbacks/internal/ops/module.go
Normal file
36
api/edge/callbacks/internal/ops/module.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/tech/sendico/pkg/api/routers/health"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
)
|
||||
|
||||
// Observer records service metrics.
|
||||
type Observer interface {
|
||||
ObserveIngest(result string, duration time.Duration)
|
||||
ObserveDelivery(result string, statusCode int, duration time.Duration)
|
||||
}
|
||||
|
||||
// HTTPServer exposes /metrics and /health.
|
||||
type HTTPServer interface {
|
||||
SetStatus(status health.ServiceStatus)
|
||||
Close(ctx context.Context)
|
||||
}
|
||||
|
||||
// HTTPServerConfig configures observability endpoint.
|
||||
type HTTPServerConfig struct {
|
||||
Address string
|
||||
}
|
||||
|
||||
// NewObserver creates process metrics observer.
|
||||
func NewObserver() Observer {
|
||||
return newObserver()
|
||||
}
|
||||
|
||||
// NewHTTPServer creates observability HTTP server.
|
||||
func NewHTTPServer(logger mlogger.Logger, cfg HTTPServerConfig) (HTTPServer, error) {
|
||||
return newHTTPServer(logger, cfg)
|
||||
}
|
||||
119
api/edge/callbacks/internal/ops/server.go
Normal file
119
api/edge/callbacks/internal/ops/server.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/tech/sendico/pkg/api/routers"
|
||||
"github.com/tech/sendico/pkg/api/routers/health"
|
||||
"github.com/tech/sendico/pkg/merrors"
|
||||
"github.com/tech/sendico/pkg/mlogger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddress = ":9420"
|
||||
readHeaderTimeout = 5 * time.Second
|
||||
defaultShutdownWindow = 5 * time.Second
|
||||
)
|
||||
|
||||
type httpServer struct {
|
||||
logger mlogger.Logger
|
||||
server *http.Server
|
||||
health routers.Health
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newHTTPServer(logger mlogger.Logger, cfg HTTPServerConfig) (HTTPServer, error) {
|
||||
if logger == nil {
|
||||
return nil, merrors.InvalidArgument("ops: logger is nil")
|
||||
}
|
||||
|
||||
address := strings.TrimSpace(cfg.Address)
|
||||
if address == "" {
|
||||
address = defaultAddress
|
||||
}
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
metricsLogger := logger.Named("ops")
|
||||
var healthRouter routers.Health
|
||||
hr, err := routers.NewHealthRouter(metricsLogger, r, "")
|
||||
if err != nil {
|
||||
metricsLogger.Warn("Failed to initialise health router", zap.Error(err))
|
||||
} else {
|
||||
hr.SetStatus(health.SSStarting)
|
||||
healthRouter = hr
|
||||
}
|
||||
|
||||
httpSrv := &http.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
ReadHeaderTimeout: readHeaderTimeout,
|
||||
}
|
||||
|
||||
wrapper := &httpServer{
|
||||
logger: metricsLogger,
|
||||
server: httpSrv,
|
||||
health: healthRouter,
|
||||
timeout: defaultShutdownWindow,
|
||||
}
|
||||
|
||||
go func() {
|
||||
metricsLogger.Info("Prometheus endpoint listening", zap.String("address", address))
|
||||
serveErr := httpSrv.ListenAndServe()
|
||||
if serveErr != nil && !errors.Is(serveErr, http.ErrServerClosed) {
|
||||
metricsLogger.Error("Prometheus endpoint stopped unexpectedly", zap.Error(serveErr))
|
||||
if healthRouter != nil {
|
||||
healthRouter.SetStatus(health.SSTerminating)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return wrapper, nil
|
||||
}
|
||||
|
||||
func (s *httpServer) SetStatus(status health.ServiceStatus) {
|
||||
if s == nil || s.health == nil {
|
||||
return
|
||||
}
|
||||
s.health.SetStatus(status)
|
||||
}
|
||||
|
||||
func (s *httpServer) Close(ctx context.Context) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.health != nil {
|
||||
s.health.SetStatus(health.SSTerminating)
|
||||
s.health.Finish()
|
||||
s.health = nil
|
||||
}
|
||||
|
||||
if s.server == nil {
|
||||
return
|
||||
}
|
||||
|
||||
shutdownCtx := ctx
|
||||
if shutdownCtx == nil {
|
||||
shutdownCtx = context.Background()
|
||||
}
|
||||
if s.timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
shutdownCtx, cancel = context.WithTimeout(shutdownCtx, s.timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if err := s.server.Shutdown(shutdownCtx); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
s.logger.Warn("Failed to stop metrics server", zap.Error(err))
|
||||
} else {
|
||||
s.logger.Info("Metrics server stopped")
|
||||
}
|
||||
}
|
||||
75
api/edge/callbacks/internal/ops/service.go
Normal file
75
api/edge/callbacks/internal/ops/service.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricsOnce sync.Once
|
||||
ingestTotal *prometheus.CounterVec
|
||||
ingestLatency *prometheus.HistogramVec
|
||||
deliveryTotal *prometheus.CounterVec
|
||||
deliveryLatency *prometheus.HistogramVec
|
||||
)
|
||||
|
||||
type observer struct{}
|
||||
|
||||
func newObserver() Observer {
|
||||
initMetrics()
|
||||
return observer{}
|
||||
}
|
||||
|
||||
func initMetrics() {
|
||||
metricsOnce.Do(func() {
|
||||
ingestTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "ingest_total",
|
||||
Help: "Total ingest attempts by result",
|
||||
}, []string{"result"})
|
||||
|
||||
ingestLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "ingest_duration_seconds",
|
||||
Help: "Ingest latency in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"result"})
|
||||
|
||||
deliveryTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "delivery_total",
|
||||
Help: "Total delivery attempts by result and status code",
|
||||
}, []string{"result", "status_code"})
|
||||
|
||||
deliveryLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "sendico",
|
||||
Subsystem: "callbacks",
|
||||
Name: "delivery_duration_seconds",
|
||||
Help: "Delivery latency in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"result"})
|
||||
})
|
||||
}
|
||||
|
||||
func (observer) ObserveIngest(result string, duration time.Duration) {
|
||||
if result == "" {
|
||||
result = "unknown"
|
||||
}
|
||||
ingestTotal.WithLabelValues(result).Inc()
|
||||
ingestLatency.WithLabelValues(result).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
func (observer) ObserveDelivery(result string, statusCode int, duration time.Duration) {
|
||||
if result == "" {
|
||||
result = "unknown"
|
||||
}
|
||||
deliveryTotal.WithLabelValues(result, strconv.Itoa(statusCode)).Inc()
|
||||
deliveryLatency.WithLabelValues(result).Observe(duration.Seconds())
|
||||
}
|
||||
Reference in New Issue
Block a user