7 Commits

Author SHA1 Message Date
4cacc0ee2d connection
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 50s
Build and Publish / BuildAndDeployArm64 (push) Successful in 4m10s
2025-10-11 10:29:44 +02:00
24cd0b6ad7 major refactor
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 49s
Build and Publish / BuildAndDeployArm64 (push) Successful in 3m49s
2025-10-11 10:22:47 +02:00
matst80
e48a2590bd change ids
All checks were successful
Build and Publish / Metadata (push) Successful in 3s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 50s
Build and Publish / BuildAndDeployArm64 (push) Successful in 4m25s
2025-10-10 21:50:18 +00:00
matst80
b0e6c8eca8 add tests and grafana dashboard
All checks were successful
Build and Publish / Metadata (push) Successful in 7s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 47s
Build and Publish / BuildAndDeployArm64 (push) Successful in 4m4s
2025-10-10 20:45:42 +00:00
matst80
7814f33a06 some strange storage stuff
Some checks failed
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 48s
Build and Publish / BuildAndDeployArm64 (push) Failing after 26m40s
2025-10-10 19:31:06 +00:00
matst80
fb111ebf97 cleanup and http proxy
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 47s
Build and Publish / BuildAndDeployArm64 (push) Successful in 8m9s
2025-10-10 18:44:31 +00:00
matst80
5525e91ecc refactor once again
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 52s
Build and Publish / BuildAndDeployArm64 (push) Successful in 8m23s
2025-10-10 18:34:46 +00:00
28 changed files with 1902 additions and 4741 deletions

View File

@@ -19,7 +19,7 @@
MODULE_PATH := git.tornberg.me/go-cart-actor MODULE_PATH := git.tornberg.me/go-cart-actor
PROTO_DIR := proto PROTO_DIR := proto
PROTOS := $(PROTO_DIR)/messages.proto $(PROTO_DIR)/cart_actor.proto $(PROTO_DIR)/control_plane.proto PROTOS := $(PROTO_DIR)/messages.proto $(PROTO_DIR)/control_plane.proto
# Allow override: make PROTOC=/path/to/protoc # Allow override: make PROTOC=/path/to/protoc
PROTOC ?= protoc PROTOC ?= protoc

View File

@@ -4,41 +4,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"sync" "sync"
"time"
messages "git.tornberg.me/go-cart-actor/proto" messages "git.tornberg.me/go-cart-actor/proto"
) )
type CartId [16]byte // Legacy padded [16]byte CartId and its helper methods removed.
// Unified CartId (uint64 with base62 string form) now defined in cart_id.go.
// String returns the cart id as a trimmed UTF-8 string (trailing zero bytes removed).
func (id CartId) String() string {
n := 0
for n < len(id) && id[n] != 0 {
n++
}
return string(id[:n])
}
// ToCartId converts an arbitrary string to a fixed-size CartId (truncating or padding with zeros).
func ToCartId(s string) CartId {
var id CartId
copy(id[:], []byte(s))
return id
}
func (id CartId) MarshalJSON() ([]byte, error) {
return json.Marshal(id.String())
}
func (id *CartId) UnmarshalJSON(data []byte) error {
var str string
err := json.Unmarshal(data, &str)
if err != nil {
return err
}
copy(id[:], []byte(str))
return nil
}
type StockStatus int type StockStatus int
@@ -89,6 +61,7 @@ type CartGrain struct {
mu sync.RWMutex mu sync.RWMutex
lastItemId int lastItemId int
lastDeliveryId int lastDeliveryId int
lastChange int64 // unix seconds of last successful mutation (replay sets from event ts)
Id CartId `json:"id"` Id CartId `json:"id"`
Items []*CartItem `json:"items"` Items []*CartItem `json:"items"`
TotalPrice int64 `json:"totalPrice"` TotalPrice int64 `json:"totalPrice"`
@@ -112,8 +85,7 @@ func (c *CartGrain) GetId() CartId {
} }
func (c *CartGrain) GetLastChange() int64 { func (c *CartGrain) GetLastChange() int64 {
// Legacy event log removed; return 0 to indicate no persisted mutation history. return c.lastChange
return 0
} }
func (c *CartGrain) GetCurrentState() (*CartGrain, error) { func (c *CartGrain) GetCurrentState() (*CartGrain, error) {
@@ -269,6 +241,13 @@ func (c *CartGrain) Apply(content interface{}, isReplay bool) (*CartGrain, error
} }
return nil, err return nil, err
} }
// Sliding TTL: update lastChange only for non-replay successful mutations.
if updated != nil && !isReplay {
c.lastChange = time.Now().Unix()
_ = AppendCartEvent(c.Id, content)
}
return updated, nil return updated, nil
} }

View File

@@ -2,63 +2,43 @@ package main
import ( import (
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/json"
"errors"
"fmt" "fmt"
"strings"
) )
// cart_id.go // cart_id.go
// //
// Compact CartID implementation using 64 bits of cryptographic randomness, // Breaking change:
// base62 encoded (0-9 A-Z a-z). Typical length is 11 characters (since 62^11 > 2^64). // Unified cart identifier as a raw 64-bit unsigned integer (type CartId uint64).
// External textual representation: base62 (0-9 A-Z a-z), shortest possible
// encoding for 64 bits (max 11 characters, since 62^11 > 2^64).
// //
// Motivation: // Rationale:
// * Shorter identifiers for cookies / URLs than legacy padded 16-byte CartId // - Replaces legacy fixed [16]byte padded string and transitional CartID wrapper.
// * O(1) hashing (raw uint64) for consistent hashing ring integration // - Provides compact, URL/cookie-friendly identifiers.
// * Extremely low collision probability (birthday bound negligible at scale) // - O(1) hashing and minimal memory footprint.
// - 64 bits of crypto randomness => negligible collision probability at realistic scale.
// //
// Backward Compatibility Strategy (Phased): // Public API:
// Phase 1: Introduce CartID helpers while continuing to accept legacy CartId. // type CartId uint64
// Phase 2: Internally migrate maps to key by uint64 (CartID.Raw()). // func NewCartId() (CartId, error)
// Phase 3: Canonicalize all inbound IDs to short base62; reissue Set-Cart-Id header. // func MustNewCartId() CartId
// func ParseCartId(string) (CartId, bool)
// func MustParseCartId(string) CartId
// (CartId).String() string
// (CartId).MarshalJSON() / UnmarshalJSON()
// //
// NOTE: // NOTE:
// The legacy type `CartId [16]byte` is still present elsewhere; helper // All legacy helpers (UpgradeLegacyCartId, Fallback hashing, Canonicalize variants,
// UpgradeLegacyCartId bridges that representation to the new form without // CartIDToLegacy, LegacyToCartID) have been removed as part of the breaking change.
// breaking deterministic mapping for existing carts.
//
// Security / Predictability:
// Uses crypto/rand for generation. If ever required, you can layer an
// HMAC-based derivation for additional secrecy. Current approach already
// provides 64 bits of entropy (brute force infeasible for practical risk).
//
// Future Extensions:
// * Time-sortable IDs: prepend a 48-bit timestamp field and encode 80 bits.
// * Add metrics counters for: generated_new, parsed_existing, legacy_fallback.
// * Add a pool of pre-generated IDs for ultra-low-latency hot paths (rarely needed).
//
// Public Surface Summary:
// NewCartID() (CartID, error)
// ParseCartID(string) (CartID, bool)
// FallbackFromString(string) CartID
// UpgradeLegacyCartId(CartId) CartID
// CanonicalizeIncoming(string) (CartID, bool /*wasGenerated*/, error)
//
// Encoding Details:
// encodeBase62 / decodeBase62 maintain a stable alphabet. DO NOT change
// alphabet order once IDs are in circulation, or previously issued IDs
// will change meaning.
//
// Zero Values:
// The zero value CartID{} has raw=0, txt="0". Treat it as valid but
// usually you will call NewCartID instead.
// //
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
type CartId uint64
const base62Alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" const base62Alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
// Precomputed reverse lookup table for decode (255 = invalid). // Reverse lookup (0xFF marks invalid)
var base62Rev [256]byte var base62Rev [256]byte
func init() { func init() {
@@ -70,136 +50,90 @@ func init() {
} }
} }
// CartID is the compact representation of a cart identifier. // String returns the canonical base62 encoding of the 64-bit id.
// raw: 64-bit entropy (also used directly for consistent hashing). func (id CartId) String() string {
// txt: cached base62 textual form. return encodeBase62(uint64(id))
type CartID struct {
raw uint64
txt string
} }
// String returns the canonical base62 encoded ID. // MarshalJSON encodes the cart id as a JSON string.
func (c CartID) String() string { func (id CartId) MarshalJSON() ([]byte, error) {
if c.txt == "" { // lazily encode if constructed manually return json.Marshal(id.String())
c.txt = encodeBase62(c.raw)
}
return c.txt
} }
// Raw returns the 64-bit numeric value (useful for hashing / ring lookup). // UnmarshalJSON decodes a cart id from a JSON string containing base62 text.
func (c CartID) Raw() uint64 { func (id *CartId) UnmarshalJSON(data []byte) error {
return c.raw var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
parsed, ok := ParseCartId(s)
if !ok {
return fmt.Errorf("invalid cart id: %q", s)
}
*id = parsed
return nil
} }
// IsZero reports whether this CartID is the zero value. // NewCartId generates a new cryptographically random non-zero 64-bit id.
func (c CartID) IsZero() bool { func NewCartId() (CartId, error) {
return c.raw == 0
}
// NewCartID generates a new cryptographically random 64-bit ID.
func NewCartID() (CartID, error) {
var b [8]byte var b [8]byte
if _, err := rand.Read(b[:]); err != nil { if _, err := rand.Read(b[:]); err != nil {
return CartID{}, fmt.Errorf("NewCartID: %w", err) return 0, fmt.Errorf("NewCartId: %w", err)
} }
u := binary.BigEndian.Uint64(b[:]) u := (uint64(b[0]) << 56) |
// Reject zero if you want to avoid ever producing "0" (optional). (uint64(b[1]) << 48) |
(uint64(b[2]) << 40) |
(uint64(b[3]) << 32) |
(uint64(b[4]) << 24) |
(uint64(b[5]) << 16) |
(uint64(b[6]) << 8) |
uint64(b[7])
if u == 0 { if u == 0 {
// Extremely unlikely; recurse once. // Extremely unlikely; regenerate once to avoid "0" identifier if desired.
return NewCartID() return NewCartId()
} }
return CartID{raw: u, txt: encodeBase62(u)}, nil return CartId(u), nil
} }
// MustNewCartID panics on failure (suitable for tests / initialization). // MustNewCartId panics if generation fails.
func MustNewCartID() CartID { func MustNewCartId() CartId {
id, err := NewCartID() id, err := NewCartId()
if err != nil { if err != nil {
panic(err) panic(err)
} }
return id return id
} }
// ParseCartID attempts to parse a base62 canonical ID. // ParseCartId parses a base62 string into a CartId.
// Returns (id, true) if fully valid; (zero, false) otherwise. // Returns (0,false) for invalid input.
func ParseCartID(s string) (CartID, bool) { func ParseCartId(s string) (CartId, bool) {
if len(s) == 0 { // Accept length 1..11 (11 sufficient for 64 bits). Reject >11 immediately.
return CartID{}, false // Provide a slightly looser upper bound (<=16) only if you anticipate future
} // extensions; here we stay strict.
// Basic length sanity; allow a bit of headroom for future timestamp variant. if len(s) == 0 || len(s) > 11 {
if len(s) > 16 { return 0, false
return CartID{}, false
} }
u, ok := decodeBase62(s) u, ok := decodeBase62(s)
if !ok { if !ok {
return CartID{}, false return 0, false
} }
return CartID{raw: u, txt: s}, true return CartId(u), true
} }
// FallbackFromString produces a deterministic CartID from arbitrary input // MustParseCartId panics on invalid base62 input.
// using a 64-bit FNV-1a hash. This allows legacy or malformed IDs to map func MustParseCartId(s string) CartId {
// consistently into the new scheme (collision probability still low). id, ok := ParseCartId(s)
func FallbackFromString(s string) CartID { if !ok {
const ( panic(fmt.Sprintf("invalid cart id: %q", s))
offset64 = 1469598103934665603
prime64 = 1099511628211
)
h := uint64(offset64)
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
} }
return CartID{raw: h, txt: encodeBase62(h)} return id
} }
// UpgradeLegacyCartId converts the old 16-byte CartId (padded) to CartID // encodeBase62 converts a uint64 to base62 (shortest form).
// by hashing its trimmed string form. Keeps stable mapping across restarts.
func UpgradeLegacyCartId(old CartId) CartID {
return FallbackFromString(old.String())
}
// CanonicalizeIncoming normalizes user-provided ID strings.
// Behavior:
//
// Empty string -> generate new ID (wasGenerated = true)
// Valid base62 -> parse and return (wasGenerated = false)
// Anything else -> fallback deterministic hash (wasGenerated = false)
//
// Errors only occur if crypto/rand fails during generation.
func CanonicalizeIncoming(s string) (CartID, bool, error) {
if s == "" {
id, err := NewCartID()
return id, true, err
}
if cid, ok := ParseCartID(s); ok {
return cid, false, nil
}
// Legacy heuristic: if length == 16 and contains non-base62 chars, treat as legacy padded ID.
if len(s) == 16 && !isAllBase62(s) {
return FallbackFromString(strings.TrimRight(s, "\x00")), false, nil
}
return FallbackFromString(s), false, nil
}
// isAllBase62 returns true if every byte is in the base62 alphabet.
func isAllBase62(s string) bool {
for i := 0; i < len(s); i++ {
if base62Rev[s[i]] == 0xFF {
return false
}
}
return true
}
// encodeBase62 turns a uint64 into base62 text.
// Complexity: O(log_62 n) ~ at most 11 iterations for 64 bits.
func encodeBase62(u uint64) string { func encodeBase62(u uint64) string {
if u == 0 { if u == 0 {
return "0" return "0"
} }
// 62^11 = 743008370688 > 2^39; 62^11 > 2^64? Actually 62^11 ~= 5.18e19 < 2^64 (1.84e19)? 2^64 ≈ 1.84e19.
// 62^11 ≈ 5.18e19 > 2^64? Correction: 2^64 ≈ 1.844e19, so 62^11 > 2^64. Thus 11 chars suffice.
var buf [11]byte var buf [11]byte
i := len(buf) i := len(buf)
for u > 0 { for u > 0 {
@@ -210,8 +144,7 @@ func encodeBase62(u uint64) string {
return string(buf[i:]) return string(buf[i:])
} }
// decodeBase62 converts a base62 string to uint64. // decodeBase62 converts base62 text to uint64.
// Returns (value, false) if any invalid character appears.
func decodeBase62(s string) (uint64, bool) { func decodeBase62(s string) (uint64, bool) {
var v uint64 var v uint64
for i := 0; i < len(s); i++ { for i := 0; i < len(s); i++ {
@@ -224,104 +157,3 @@ func decodeBase62(s string) (uint64, bool) {
} }
return v, true return v, true
} }
// ErrInvalidCartID can be returned by higher-level validation layers if you decide
// to reject fallback-derived IDs (currently unused here).
var ErrInvalidCartID = errors.New("invalid cart id")
// ---------------------------------------------------------------------------
// Legacy / Compatibility Conversion Helpers
// ---------------------------------------------------------------------------
// CartIDToLegacy converts a CartID (base62) into the legacy fixed-size CartId
// ([16]byte) by copying the textual form (truncated or zero-padded).
// NOTE: If the base62 string is longer than 16 (should not happen with current
// 64-bit space), it will be truncated.
func CartIDToLegacy(c CartID) CartId {
var id CartId
txt := c.String()
copy(id[:], []byte(txt))
return id
}
// LegacyToCartID upgrades a legacy CartId (padded) to a CartID by hashing its
// trimmed string form (deterministic). This preserves stable mapping without
// depending on original randomness.
func LegacyToCartID(old CartId) CartID {
return UpgradeLegacyCartId(old)
}
// CartIDToKey returns the numeric key representation (uint64) for map indexing.
func CartIDToKey(c CartID) uint64 {
return c.Raw()
}
// LegacyToCartKey converts a legacy CartId to the numeric key via deterministic
// fallback hashing. (Uses the same logic as LegacyToCartID then returns raw.)
func LegacyToCartKey(old CartId) uint64 {
return LegacyToCartID(old).Raw()
}
// ---------------------- Optional Helper Utilities ----------------------------
// CartIDOrNew tries to parse s; if empty OR invalid returns a fresh ID.
func CartIDOrNew(s string) (CartID, bool /*wasParsed*/, error) {
if cid, ok := ParseCartID(s); ok {
return cid, true, nil
}
id, err := NewCartID()
return id, false, err
}
// MustParseCartID panics if s is not a valid base62 ID (useful in tests).
func MustParseCartID(s string) CartID {
if cid, ok := ParseCartID(s); ok {
return cid
}
panic(fmt.Sprintf("invalid CartID: %s", s))
}
// DebugString returns a verbose description (for logging / diagnostics).
func (c CartID) DebugString() string {
return fmt.Sprintf("CartID(raw=%d txt=%s)", c.raw, c.String())
}
// Equal compares two CartIDs by raw value.
func (c CartID) Equal(other CartID) bool {
return c.raw == other.raw
}
// CanonicalizeOrLegacy preserves legacy (non-base62) IDs without altering their
// textual form, avoiding the previous behavior where fallback hashing replaced
// the original string with a base62-encoded hash (which broke deterministic
// key derivation across mixed call paths).
//
// Behavior:
// - s == "" -> generate new CartID (generatedNew = true, wasBase62 = true)
// - base62 ok -> return parsed CartID (generatedNew = false, wasBase62 = true)
// - otherwise -> treat as legacy: raw = hash(s), txt = original s
//
// Returns:
//
// cid - CartID (txt preserved for legacy inputs)
// generatedNew - true only when a brand new ID was created due to empty input
// wasBase62 - true if the input was already canonical base62 (or generated)
// err - only set if crypto/rand fails when generating a new ID
func CanonicalizeOrLegacy(s string) (cid CartID, generatedNew bool, wasBase62 bool, err error) {
if s == "" {
id, e := NewCartID()
if e != nil {
return CartID{}, false, false, e
}
return id, true, true, nil
}
if parsed, ok := ParseCartID(s); ok {
return parsed, false, true, nil
}
// Legacy path: keep original text so downstream legacy-to-key hashing
// (which uses the visible string) yields consistent keys across code paths.
hashCID := FallbackFromString(s)
// Preserve original textual form
hashCID.txt = s
return hashCID, false, false, nil
}

View File

@@ -1,168 +1,155 @@
package main package main
import ( import (
"crypto/rand" "encoding/json"
"encoding/binary"
"fmt" "fmt"
mrand "math/rand"
"testing" "testing"
) )
// TestEncodeDecodeBase62RoundTrip verifies encodeBase62/decodeBase62 are inverse. // TestNewCartIdUniqueness generates many ids and checks for collisions.
func TestEncodeDecodeBase62RoundTrip(t *testing.T) { func TestNewCartIdUniqueness(t *testing.T) {
mrand.Seed(42) const n = 20000
for i := 0; i < 1000; i++ {
// Random 64-bit value
v := mrand.Uint64()
s := encodeBase62(v)
dec, ok := decodeBase62(s)
if !ok {
t.Fatalf("decodeBase62 failed for %d encoded=%s", v, s)
}
if dec != v {
t.Fatalf("round trip mismatch: have %d got %d (encoded=%s)", v, dec, s)
}
}
// Explicit zero test
if s := encodeBase62(0); s != "0" {
t.Fatalf("expected encodeBase62(0) == \"0\", got %q", s)
}
if v, ok := decodeBase62("0"); !ok || v != 0 {
t.Fatalf("decodeBase62(0) unexpected result v=%d ok=%v", v, ok)
}
}
// TestNewCartIDUniqueness generates a number of IDs and checks for duplicates.
func TestNewCartIDUniqueness(t *testing.T) {
const n = 10000
seen := make(map[string]struct{}, n) seen := make(map[string]struct{}, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
id, err := NewCartID() id, err := NewCartId()
if err != nil { if err != nil {
t.Fatalf("NewCartID error: %v", err) t.Fatalf("NewCartId error: %v", err)
} }
s := id.String() s := id.String()
if _, exists := seen[s]; exists { if _, exists := seen[s]; exists {
t.Fatalf("duplicate CartID generated: %s", s) t.Fatalf("duplicate id encountered: %s", s)
} }
seen[s] = struct{}{} seen[s] = struct{}{}
if id.IsZero() { if s == "" {
t.Fatalf("NewCartID returned zero value") t.Fatalf("empty string representation for id %d", id)
}
if len(s) > 11 {
t.Fatalf("encoded id length exceeds 11 chars: %s (%d)", s, len(s))
}
if id == 0 {
// We force regeneration on zero, extremely unlikely but test guards intent.
t.Fatalf("zero id generated (should be regenerated)")
} }
} }
} }
// TestParseCartIDValidation tests parsing of valid and invalid base62 strings. // TestParseCartIdRoundTrip ensures parse -> string -> parse is stable.
func TestParseCartIDValidation(t *testing.T) { func TestParseCartIdRoundTrip(t *testing.T) {
id, err := NewCartID() id := MustNewCartId()
if err != nil { txt := id.String()
t.Fatalf("NewCartID error: %v", err) parsed, ok := ParseCartId(txt)
}
parsed, ok := ParseCartID(id.String())
if !ok { if !ok {
t.Fatalf("ParseCartID failed for valid id %s", id) t.Fatalf("ParseCartId failed for valid text %q", txt)
} }
if parsed.raw != id.raw { if parsed != id {
t.Fatalf("parsed raw mismatch: %d vs %d", parsed.raw, id.raw) t.Fatalf("round trip mismatch: original=%d parsed=%d txt=%s", id, parsed, txt)
}
if _, ok := ParseCartID(""); ok {
t.Fatalf("expected empty string to be invalid")
}
// Invalid char ('-')
if _, ok := ParseCartID("abc-123"); ok {
t.Fatalf("expected invalid chars to fail parse")
}
// Overly long ( >16 )
if _, ok := ParseCartID("1234567890abcdefg"); ok {
t.Fatalf("expected overly long string to fail parse")
} }
} }
// TestFallbackDeterminism ensures fallback hashing is deterministic. // TestParseCartIdInvalid covers invalid inputs.
func TestFallbackDeterminism(t *testing.T) { func TestParseCartIdInvalid(t *testing.T) {
inputs := []string{ invalid := []string{
"legacy-cart-1", "", // empty
"legacy-cart-2", " ", // space
"UPPER_lower_123", "01234567890abc", // >11 chars
"🚀unicode", // unicode bytes (will hash byte sequence) "!!!!", // invalid chars
"-underscore-", // invalid chars
"abc_def", // underscore invalid for base62
"0123456789ABCD", // 14 chars
} }
for _, in := range inputs { for _, s := range invalid {
a := FallbackFromString(in) if _, ok := ParseCartId(s); ok {
b := FallbackFromString(in) t.Fatalf("expected parse failure for %q", s)
if a.raw != b.raw || a.String() != b.String() {
t.Fatalf("fallback mismatch for %q: %+v vs %+v", in, a, b)
} }
} }
// Distinct inputs should almost always differ; sample check
a := FallbackFromString("distinct-A")
b := FallbackFromString("distinct-B")
if a.raw == b.raw {
t.Fatalf("unexpected identical fallback hashes for distinct inputs")
}
} }
// TestCanonicalizeIncomingBehavior covers main control flow branches. // TestMustParseCartIdPanics verifies panic behavior for invalid input.
func TestCanonicalizeIncomingBehavior(t *testing.T) { func TestMustParseCartIdPanics(t *testing.T) {
// Empty => new id defer func() {
id1, generated, err := CanonicalizeIncoming("") if r := recover(); r == nil {
if err != nil || !generated || id1.IsZero() { t.Fatalf("expected panic for invalid MustParseCartId input")
t.Fatalf("CanonicalizeIncoming empty failed: id=%v gen=%v err=%v", id1, generated, err) }
}()
_ = MustParseCartId("not*base62")
} }
// Valid base62 => parse; no generation // TestJSONMarshalUnmarshalCartId verifies JSON round trip.
id2, gen2, err := CanonicalizeIncoming(id1.String()) func TestJSONMarshalUnmarshalCartId(t *testing.T) {
if err != nil || gen2 || id2.raw != id1.raw { id := MustNewCartId()
t.Fatalf("CanonicalizeIncoming parse mismatch: id2=%v gen2=%v err=%v", id2, gen2, err) data, err := json.Marshal(struct {
Cart CartId `json:"cart"`
}{Cart: id})
if err != nil {
t.Fatalf("marshal error: %v", err)
} }
var out struct {
// Legacy-like random containing invalid chars -> fallback Cart CartId `json:"cart"`
fallbackInput := "legacy\x00\x00padding"
id3, gen3, err := CanonicalizeIncoming(fallbackInput)
if err != nil || gen3 {
t.Fatalf("CanonicalizeIncoming fallback unexpected: id3=%v gen3=%v err=%v", id3, gen3, err)
} }
if err := json.Unmarshal(data, &out); err != nil {
// Deterministic fallback t.Fatalf("unmarshal error: %v", err)
id4, _, _ := CanonicalizeIncoming(fallbackInput) }
if id3.raw != id4.raw { if out.Cart != id {
t.Fatalf("fallback canonicalization not deterministic") t.Fatalf("JSON round trip mismatch: have %d got %d", id, out.Cart)
} }
} }
// TestUpgradeLegacyCartId ensures mapping of old CartId is stable. // TestBase62LengthBound checks worst-case length (near max uint64).
func TestUpgradeLegacyCartId(t *testing.T) { func TestBase62LengthBound(t *testing.T) {
var legacy CartId // Largest uint64
copy(legacy[:], []byte("legacy-123456789")) // 15 bytes + padding const maxU64 = ^uint64(0)
up1 := UpgradeLegacyCartId(legacy) s := encodeBase62(maxU64)
up2 := UpgradeLegacyCartId(legacy) if len(s) > 11 {
if up1.raw != up2.raw { t.Fatalf("max uint64 encoded length > 11: %d (%s)", len(s), s)
t.Fatalf("UpgradeLegacyCartId not deterministic: %v vs %v", up1, up2)
} }
if up1.String() != up2.String() { dec, ok := decodeBase62(s)
t.Fatalf("UpgradeLegacyCartId string mismatch: %s vs %s", up1, up2) if !ok || dec != maxU64 {
t.Fatalf("decode failed for max uint64: ok=%v dec=%d want=%d", ok, dec, maxU64)
} }
} }
// BenchmarkNewCartID gives a rough idea of generation cost. // TestZeroEncoding ensures zero value encodes to "0" and parses back.
func BenchmarkNewCartID(b *testing.B) { func TestZeroEncoding(t *testing.T) {
if s := encodeBase62(0); s != "0" {
t.Fatalf("encodeBase62(0) expected '0', got %q", s)
}
v, ok := decodeBase62("0")
if !ok || v != 0 {
t.Fatalf("decodeBase62('0') failed: ok=%v v=%d", ok, v)
}
if _, ok := ParseCartId("0"); !ok {
t.Fatalf("ParseCartId(\"0\") should succeed")
}
}
// TestSequentialParse ensures sequentially generated ids parse correctly.
func TestSequentialParse(t *testing.T) {
for i := 0; i < 1000; i++ {
id := MustNewCartId()
txt := id.String()
parsed, ok := ParseCartId(txt)
if !ok || parsed != id {
t.Fatalf("sequential parse mismatch: idx=%d orig=%d parsed=%d txt=%s", i, id, parsed, txt)
}
}
}
// BenchmarkNewCartId measures generation performance.
func BenchmarkNewCartId(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
if _, err := NewCartID(); err != nil { if _, err := NewCartId(); err != nil {
b.Fatalf("error: %v", err) b.Fatalf("NewCartId error: %v", err)
} }
} }
} }
// BenchmarkEncodeBase62 measures encode speed in isolation. // BenchmarkEncodeBase62 measures encoding performance.
func BenchmarkEncodeBase62(b *testing.B) { func BenchmarkEncodeBase62(b *testing.B) {
// Random sample of values // Precompute sample values
samples := make([]uint64, 1024) samples := make([]uint64, 1024)
for i := range samples { for i := range samples {
var buf [8]byte // Spread bits without crypto randomness overhead
if _, err := rand.Read(buf[:]); err != nil { samples[i] = (uint64(i) << 53) ^ (uint64(i) * 0x9E3779B185EBCA87)
b.Fatalf("rand: %v", err)
}
samples[i] = binary.BigEndian.Uint64(buf[:])
} }
b.ResetTimer() b.ResetTimer()
var sink string var sink string
@@ -172,88 +159,27 @@ func BenchmarkEncodeBase62(b *testing.B) {
_ = sink _ = sink
} }
// BenchmarkDecodeBase62 measures decode speed. // BenchmarkDecodeBase62 measures decoding performance.
func BenchmarkDecodeBase62(b *testing.B) { func BenchmarkDecodeBase62(b *testing.B) {
// Pre-encode
encoded := make([]string, 1024) encoded := make([]string, 1024)
for i := range encoded { for i := range encoded {
encoded[i] = encodeBase62(uint64(i)<<32 | uint64(i)) encoded[i] = encodeBase62((uint64(i) << 32) | uint64(i))
} }
b.ResetTimer() b.ResetTimer()
var sum uint64 var sum uint64
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
v, ok := decodeBase62(encoded[i%len(encoded)]) v, ok := decodeBase62(encoded[i%len(encoded)])
if !ok { if !ok {
b.Fatalf("decode failed") b.Fatalf("decode failure for %s", encoded[i%len(encoded)])
} }
sum ^= v sum ^= v
} }
_ = sum _ = sum
} }
// TestLookupNDeterminism (ring integration smoke test) ensures LookupN // ExampleCartIdString documents usage of CartId string form.
// returns distinct hosts and stable ordering for a fixed ring. func ExampleCartId_string() {
func TestLookupNDeterminism(t *testing.T) { id := MustNewCartId()
rb := NewRingBuilder().WithEpoch(1).WithVnodesPerHost(8).WithHosts([]string{"a", "b", "c"}) fmt.Println(len(id.String()) <= 11) // outputs true
ring := rb.Build() // Output: true
if ring.Empty() {
t.Fatalf("expected non-empty ring")
}
id := MustNewCartID()
owners1 := ring.LookupN(id.Raw(), 3)
owners2 := ring.LookupN(id.Raw(), 3)
if len(owners1) != len(owners2) {
t.Fatalf("LookupN length mismatch")
}
for i := range owners1 {
if owners1[i].Host != owners2[i].Host {
t.Fatalf("LookupN ordering instability at %d: %v vs %v", i, owners1[i], owners2[i])
}
}
// Distinct host constraint
seen := map[string]struct{}{}
for _, v := range owners1 {
if _, ok := seen[v.Host]; ok {
t.Fatalf("duplicate host in LookupN result: %v", owners1)
}
seen[v.Host] = struct{}{}
}
}
// TestRingFingerprintChanges ensures fingerprint updates with membership changes.
func TestRingFingerprintChanges(t *testing.T) {
b1 := NewRingBuilder().WithEpoch(1).WithHosts([]string{"node1", "node2"})
r1 := b1.Build()
b2 := NewRingBuilder().WithEpoch(2).WithHosts([]string{"node1", "node2", "node3"})
r2 := b2.Build()
if r1.Fingerprint() == r2.Fingerprint() {
t.Fatalf("expected differing fingerprints after host set change")
}
}
// TestRingDiffHosts verifies added/removed host detection.
func TestRingDiffHosts(t *testing.T) {
r1 := NewRingBuilder().WithEpoch(1).WithHosts([]string{"a", "b"}).Build()
r2 := NewRingBuilder().WithEpoch(2).WithHosts([]string{"b", "c"}).Build()
added, removed := r1.DiffHosts(r2)
if fmt.Sprintf("%v", added) != "[c]" {
t.Fatalf("expected added [c], got %v", added)
}
if fmt.Sprintf("%v", removed) != "[a]" {
t.Fatalf("expected removed [a], got %v", removed)
}
}
// TestRingLookupConsistency ensures direct Lookup and LookupID are aligned.
func TestRingLookupConsistency(t *testing.T) {
ring := NewRingBuilder().WithEpoch(1).WithHosts([]string{"alpha", "beta"}).WithVnodesPerHost(4).Build()
id, _ := ParseCartID("1")
if id.IsZero() {
t.Fatalf("expected parsed id non-zero")
}
v1 := ring.Lookup(id.Raw())
v2 := ring.LookupID(id)
if v1.Host != v2.Host || v1.Hash != v2.Hash {
t.Fatalf("Lookup vs LookupID mismatch: %+v vs %+v", v1, v2)
}
} }

View File

@@ -1,212 +0,0 @@
package main
import (
messages "git.tornberg.me/go-cart-actor/proto"
)
// cart_state_mapper.go
//
// Utilities to translate between internal CartGrain state and the gRPC
// (typed) protobuf representation CartState. This replaces the previous
// JSON blob framing and enables type-safe replies over gRPC, as well as
// internal reuse for HTTP handlers without an extra marshal / unmarshal
// hop (you can marshal CartState directly for JSON responses if desired).
//
// Only the oneway mapping (CartGrain -> CartState) is strictly required
// for mutation / state replies. A reverse helper is included in case
// future features (e.g. snapshot import, replay, or migration) need it.
// ToCartState converts the inmemory CartGrain into a protobuf CartState.
func ToCartState(c *CartGrain) *messages.CartState {
if c == nil {
return nil
}
items := make([]*messages.CartItemState, 0, len(c.Items))
for _, it := range c.Items {
if it == nil {
continue
}
itemDiscountPerUnit := max(0, it.OrgPrice-it.Price)
itemTotalDiscount := itemDiscountPerUnit * int64(it.Quantity)
items = append(items, &messages.CartItemState{
Id: int64(it.Id),
ItemId: int64(it.ItemId),
Sku: it.Sku,
Name: it.Name,
Price: it.Price,
Qty: int32(it.Quantity),
TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice,
TaxRate: int32(it.TaxRate),
TotalDiscount: itemTotalDiscount,
Brand: it.Brand,
Category: it.Category,
Category2: it.Category2,
Category3: it.Category3,
Category4: it.Category4,
Category5: it.Category5,
Image: it.Image,
Type: it.ArticleType,
SellerId: it.SellerId,
SellerName: it.SellerName,
Disclaimer: it.Disclaimer,
Outlet: deref(it.Outlet),
StoreId: deref(it.StoreId),
Stock: int32(it.Stock),
})
}
deliveries := make([]*messages.DeliveryState, 0, len(c.Deliveries))
for _, d := range c.Deliveries {
if d == nil {
continue
}
itemIds := make([]int64, 0, len(d.Items))
for _, id := range d.Items {
itemIds = append(itemIds, int64(id))
}
var pp *messages.PickupPoint
if d.PickupPoint != nil {
// Copy to avoid accidental shared mutation (proto points are fine but explicit).
pp = &messages.PickupPoint{
Id: d.PickupPoint.Id,
Name: d.PickupPoint.Name,
Address: d.PickupPoint.Address,
City: d.PickupPoint.City,
Zip: d.PickupPoint.Zip,
Country: d.PickupPoint.Country,
}
}
deliveries = append(deliveries, &messages.DeliveryState{
Id: int64(d.Id),
Provider: d.Provider,
Price: d.Price,
Items: itemIds,
PickupPoint: pp,
})
}
return &messages.CartState{
Id: c.Id.String(),
Items: items,
TotalPrice: c.TotalPrice,
TotalTax: c.TotalTax,
TotalDiscount: c.TotalDiscount,
Deliveries: deliveries,
PaymentInProgress: c.PaymentInProgress,
OrderReference: c.OrderReference,
PaymentStatus: c.PaymentStatus,
}
}
// FromCartState merges a protobuf CartState into an existing CartGrain.
// This is optional and primarily useful for snapshot import or testing.
func FromCartState(cs *messages.CartState, g *CartGrain) *CartGrain {
if cs == nil {
return g
}
if g == nil {
g = &CartGrain{}
}
g.Id = ToCartId(cs.Id)
g.TotalPrice = cs.TotalPrice
g.TotalTax = cs.TotalTax
g.TotalDiscount = cs.TotalDiscount
g.PaymentInProgress = cs.PaymentInProgress
g.OrderReference = cs.OrderReference
g.PaymentStatus = cs.PaymentStatus
// Items
g.Items = g.Items[:0]
for _, it := range cs.Items {
if it == nil {
continue
}
outlet := toPtr(it.Outlet)
storeId := toPtr(it.StoreId)
g.Items = append(g.Items, &CartItem{
Id: int(it.Id),
ItemId: int(it.ItemId),
Sku: it.Sku,
Name: it.Name,
Price: it.Price,
Quantity: int(it.Qty),
TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice,
TaxRate: int(it.TaxRate),
Brand: it.Brand,
Category: it.Category,
Category2: it.Category2,
Category3: it.Category3,
Category4: it.Category4,
Category5: it.Category5,
Image: it.Image,
ArticleType: it.Type,
SellerId: it.SellerId,
SellerName: it.SellerName,
Disclaimer: it.Disclaimer,
Outlet: outlet,
StoreId: storeId,
Stock: StockStatus(it.Stock),
// Tax, TaxRate already set via Price / Totals if needed
})
if it.Id > int64(g.lastItemId) {
g.lastItemId = int(it.Id)
}
}
// Deliveries
g.Deliveries = g.Deliveries[:0]
for _, d := range cs.Deliveries {
if d == nil {
continue
}
intIds := make([]int, 0, len(d.Items))
for _, id := range d.Items {
intIds = append(intIds, int(id))
}
var pp *messages.PickupPoint
if d.PickupPoint != nil {
pp = &messages.PickupPoint{
Id: d.PickupPoint.Id,
Name: d.PickupPoint.Name,
Address: d.PickupPoint.Address,
City: d.PickupPoint.City,
Zip: d.PickupPoint.Zip,
Country: d.PickupPoint.Country,
}
}
g.Deliveries = append(g.Deliveries, &CartDelivery{
Id: int(d.Id),
Provider: d.Provider,
Price: d.Price,
Items: intIds,
PickupPoint: pp,
})
if d.Id > int64(g.lastDeliveryId) {
g.lastDeliveryId = int(d.Id)
}
}
return g
}
// Helper to safely de-reference optional string pointers to value or "".
func deref(p *string) string {
if p == nil {
return ""
}
return *p
}
func toPtr(s string) *string {
if s == "" {
return nil
}
return &s
}

View File

@@ -228,10 +228,10 @@ metadata:
name: cart-ingress name: cart-ingress
annotations: annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod cert-manager.io/cluster-issuer: letsencrypt-prod
# nginx.ingress.kubernetes.io/affinity: "cookie" nginx.ingress.kubernetes.io/affinity: "cookie"
# nginx.ingress.kubernetes.io/session-cookie-name: "cart-affinity" nginx.ingress.kubernetes.io/session-cookie-name: "cart-affinity"
# nginx.ingress.kubernetes.io/session-cookie-expires: "172800" nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
# nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"
nginx.ingress.kubernetes.io/proxy-body-size: 4m nginx.ingress.kubernetes.io/proxy-body-size: 4m
spec: spec:
ingressClassName: nginx ingressClassName: nginx

View File

@@ -7,16 +7,21 @@ import (
"time" "time"
) )
func init() {
gob.Register(map[uint64]int64{})
}
type DiskStorage struct { type DiskStorage struct {
stateFile string stateFile string
lastSave int64 lastSave int64
LastSaves map[CartId]int64 LastSaves map[uint64]int64
} }
func NewDiskStorage(stateFile string) (*DiskStorage, error) { func NewDiskStorage(stateFile string) (*DiskStorage, error) {
ret := &DiskStorage{ ret := &DiskStorage{
stateFile: stateFile, stateFile: stateFile,
LastSaves: make(map[CartId]int64), LastSaves: make(map[uint64]int64),
} }
err := ret.loadState() err := ret.loadState()
return ret, err return ret, err
@@ -64,7 +69,7 @@ func (s *DiskStorage) loadState() error {
func (s *DiskStorage) Store(id CartId, _ *CartGrain) error { func (s *DiskStorage) Store(id CartId, _ *CartGrain) error {
// With the removal of the legacy message log, we only update the timestamp. // With the removal of the legacy message log, we only update the timestamp.
ts := time.Now().Unix() ts := time.Now().Unix()
s.LastSaves[id] = ts s.LastSaves[uint64(id)] = ts
s.lastSave = ts s.lastSave = ts
return nil return nil
} }

288
event_log.go Normal file
View File

@@ -0,0 +1,288 @@
package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"sync"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
/*
event_log.go
Append-only cart event log (per cart id) with replay + metrics.
Rationale:
- Enables recovery of in-memory cart state after process restarts or TTL eviction.
- Provides a chronological mutation trail for auditing / debugging.
- Avoids write amplification of full snapshots on every mutation.
Format:
One JSON object per line:
{
"ts": 1700000000,
"type": "AddRequest",
"payload": { ... mutation fields ... }
}
Concurrency:
- Appends: synchronized per-cart via an in-process mutex map to avoid partial writes.
- Replay: sequential read of entire file; mutations applied in order.
Usage Integration (to be wired by caller):
1. After successful mutation application (non-replay), invoke:
AppendCartEvent(grain.GetId(), mutation)
2. During grain spawn, call:
ReplayCartEvents(grain, grain.GetId())
BEFORE serving requests, so state is reconstructed.
Metrics:
- cart_event_log_appends_total
- cart_event_log_replay_total
- cart_event_log_replay_failures_total
- cart_event_log_bytes_written_total
- cart_event_log_files_existing (gauge)
- cart_event_log_last_append_unix (gauge)
- cart_event_log_replay_duration_seconds (histogram)
Rotation / Compaction:
- Not implemented. If needed, implement size checks and snapshot+truncate later.
Caveats:
- Mutation schema changes may break replay unless backward-compatible.
- Missing / unknown event types are skipped (metric incremented).
- If a mutation fails during replay, replay continues (logged + metric).
*/
var (
eventLogDir = "data"
eventAppendsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_appends_total",
Help: "Total number of cart mutation events appended to event logs.",
})
eventReplayTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_replay_total",
Help: "Total number of successful event log replays (per cart).",
})
eventReplayFailuresTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_replay_failures_total",
Help: "Total number of failed event log replay operations.",
})
eventBytesWrittenTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_bytes_written_total",
Help: "Cumulative number of bytes written to all cart event logs.",
})
eventFilesExisting = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_event_log_files_existing",
Help: "Number of cart event log files currently existing on disk.",
})
eventLastAppendUnix = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_event_log_last_append_unix",
Help: "Unix timestamp of the last append to any cart event log.",
})
eventReplayDuration = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "cart_event_log_replay_duration_seconds",
Help: "Duration of replay operations per cart in seconds.",
Buckets: prometheus.DefBuckets,
})
eventUnknownTypesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_unknown_types_total",
Help: "Total number of unknown event types encountered during replay (skipped).",
})
eventMutationErrorsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_event_log_mutation_errors_total",
Help: "Total number of errors applying mutation events during replay.",
})
)
type cartEventRecord struct {
Timestamp int64 `json:"ts"`
Type string `json:"type"`
Payload interface{} `json:"payload"`
}
// registry of supported mutation payload type constructors
var eventTypeFactories = map[string]func() interface{}{
"AddRequest": func() interface{} { return &messages.AddRequest{} },
"AddItem": func() interface{} { return &messages.AddItem{} },
"RemoveItem": func() interface{} { return &messages.RemoveItem{} },
"RemoveDelivery": func() interface{} { return &messages.RemoveDelivery{} },
"ChangeQuantity": func() interface{} { return &messages.ChangeQuantity{} },
"SetDelivery": func() interface{} { return &messages.SetDelivery{} },
"SetPickupPoint": func() interface{} { return &messages.SetPickupPoint{} },
"SetCartRequest": func() interface{} { return &messages.SetCartRequest{} },
"OrderCreated": func() interface{} { return &messages.OrderCreated{} },
"InitializeCheckout": func() interface{} { return &messages.InitializeCheckout{} },
}
// Per-cart mutexes to serialize append operations (avoid partial overlapping writes)
var (
eventLogMu sync.Map // map[string]*sync.Mutex
)
// getCartEventMutex returns a mutex for a specific cart id string.
func getCartEventMutex(id string) *sync.Mutex {
if v, ok := eventLogMu.Load(id); ok {
return v.(*sync.Mutex)
}
m := &sync.Mutex{}
actual, _ := eventLogMu.LoadOrStore(id, m)
return actual.(*sync.Mutex)
}
// EventLogPath returns the path to the cart's event log file.
func EventLogPath(id CartId) string {
return filepath.Join(eventLogDir, fmt.Sprintf("%s.events.log", id.String()))
}
// EnsureEventLogDirectory ensures base directory exists and updates gauge.
func EnsureEventLogDirectory() error {
if _, err := os.Stat(eventLogDir); errors.Is(err, os.ErrNotExist) {
if err2 := os.MkdirAll(eventLogDir, 0755); err2 != nil {
return err2
}
}
// Update files existing gauge (approximate; counts matching *.events.log)
pattern := filepath.Join(eventLogDir, "*.events.log")
matches, _ := filepath.Glob(pattern)
eventFilesExisting.Set(float64(len(matches)))
return nil
}
// AppendCartEvent appends a mutation event to the cart's log (JSON line).
func AppendCartEvent(id CartId, mutation interface{}) error {
if mutation == nil {
return errors.New("nil mutation cannot be logged")
}
if err := EnsureEventLogDirectory(); err != nil {
return err
}
typ := mutationTypeName(mutation)
rec := cartEventRecord{
Timestamp: time.Now().Unix(),
Type: typ,
Payload: mutation,
}
lineBytes, err := json.Marshal(rec)
if err != nil {
return fmt.Errorf("marshal event: %w", err)
}
lineBytes = append(lineBytes, '\n')
path := EventLogPath(id)
mtx := getCartEventMutex(id.String())
mtx.Lock()
defer mtx.Unlock()
fh, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("open event log: %w", err)
}
defer fh.Close()
n, werr := fh.Write(lineBytes)
if werr != nil {
return fmt.Errorf("write event log: %w", werr)
}
eventAppendsTotal.Inc()
eventBytesWrittenTotal.Add(float64(n))
eventLastAppendUnix.Set(float64(rec.Timestamp))
return nil
}
// ReplayCartEvents replays an existing cart's event log into the provided grain.
// It applies mutation payloads in order, skipping unknown types.
func ReplayCartEvents(grain *CartGrain, id CartId) error {
start := time.Now()
path := EventLogPath(id)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
// No log -> nothing to replay
return nil
}
fh, err := os.Open(path)
if err != nil {
eventReplayFailuresTotal.Inc()
return fmt.Errorf("open replay file: %w", err)
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
// Increase buffer in case of large payloads
const maxLine = 256 * 1024
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, maxLine)
for scanner.Scan() {
line := scanner.Bytes()
var raw struct {
Timestamp int64 `json:"ts"`
Type string `json:"type"`
Payload json.RawMessage `json:"payload"`
}
if err := json.Unmarshal(line, &raw); err != nil {
eventReplayFailuresTotal.Inc()
continue // skip malformed line
}
factory, ok := eventTypeFactories[raw.Type]
if !ok {
eventUnknownTypesTotal.Inc()
continue // skip unknown mutation type
}
instance := factory()
if err := json.Unmarshal(raw.Payload, instance); err != nil {
eventMutationErrorsTotal.Inc()
continue
}
// Apply mutation directly using internal registration (bypass AppendCartEvent recursion).
if _, applyErr := ApplyRegistered(grain, instance); applyErr != nil {
eventMutationErrorsTotal.Inc()
continue
} else {
// Update lastChange to the timestamp of this event (sliding inactivity window support).
grain.lastChange = raw.Timestamp
}
}
if serr := scanner.Err(); serr != nil {
eventReplayFailuresTotal.Inc()
return fmt.Errorf("scanner error: %w", serr)
}
eventReplayTotal.Inc()
eventReplayDuration.Observe(time.Since(start).Seconds())
return nil
}
// mutationTypeName returns the short struct name for a mutation (pointer aware).
func mutationTypeName(v interface{}) string {
if v == nil {
return "nil"
}
t := reflect.TypeOf(v)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t.Name()
}
/*
Future enhancements:
- Compression: gzip large (> N events) logs to reduce disk usage.
- Compaction: periodic snapshot + truncate old events to bound replay latency.
- Checkpoint events: inject cart state snapshots every M mutations.
- Integrity: add checksum per line for corruption detection.
- Multi-writer safety across processes (currently only safe within one process).
*/

327
grafana_dashboard_cart.json Normal file
View File

@@ -0,0 +1,327 @@
{
"uid": "cart-actors",
"title": "Cart Actor Cluster",
"timezone": "browser",
"refresh": "30s",
"schemaVersion": 38,
"version": 1,
"editable": true,
"graphTooltip": 0,
"panels": [
{
"type": "row",
"title": "Overview",
"gridPos": { "x": 0, "y": 0, "w": 24, "h": 1 },
"id": 1,
"collapsed": false
},
{
"type": "stat",
"title": "Active Grains",
"id": 2,
"gridPos": { "x": 0, "y": 1, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "cart_active_grains" }
],
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
}
},
{
"type": "stat",
"title": "Grains In Pool",
"id": 3,
"gridPos": { "x": 6, "y": 1, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "cart_grains_in_pool" }
],
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
}
},
{
"type": "stat",
"title": "Pool Usage %",
"id": 4,
"gridPos": { "x": 12, "y": 1, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "cart_grain_pool_usage * 100" }
],
"units": "percent",
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"reduceOptions": { "calcs": ["lastNotNull"] }
}
},
{
"type": "stat",
"title": "Connected Remotes",
"id": 5,
"gridPos": { "x": 18, "y": 1, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "connected_remotes" }
],
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"reduceOptions": { "calcs": ["lastNotNull"] }
}
},
{
"type": "row",
"title": "Mutations",
"gridPos": { "x": 0, "y": 5, "w": 24, "h": 1 },
"id": 6,
"collapsed": false
},
{
"type": "timeseries",
"title": "Mutation Rate (1m)",
"id": 7,
"gridPos": { "x": 0, "y": 6, "w": 12, "h": 8 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "rate(cart_mutations_total[1m])", "legendFormat": "mutations/s" },
{ "refId": "B", "expr": "rate(cart_mutation_failures_total[1m])", "legendFormat": "failures/s" }
],
"fieldConfig": { "defaults": { "unit": "ops" } }
},
{
"type": "stat",
"title": "Failure % (5m)",
"id": 8,
"gridPos": { "x": 12, "y": 6, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{
"refId": "A",
"expr": "100 * (increase(cart_mutation_failures_total[5m]) / clamp_max(increase(cart_mutations_total[5m]), 1))"
}
],
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "center",
"reduceOptions": { "calcs": ["lastNotNull"] }
}
},
{
"type": "timeseries",
"title": "Mutation Latency Quantiles",
"id": 9,
"gridPos": { "x": 18, "y": 6, "w": 6, "h": 8 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{
"refId": "A",
"expr": "histogram_quantile(0.50, sum(rate(cart_mutation_latency_seconds_bucket[5m])) by (le))",
"legendFormat": "p50"
},
{
"refId": "B",
"expr": "histogram_quantile(0.90, sum(rate(cart_mutation_latency_seconds_bucket[5m])) by (le))",
"legendFormat": "p90"
},
{
"refId": "C",
"expr": "histogram_quantile(0.99, sum(rate(cart_mutation_latency_seconds_bucket[5m])) by (le))",
"legendFormat": "p99"
}
],
"fieldConfig": { "defaults": { "unit": "s" } }
},
{
"type": "row",
"title": "Event Log",
"gridPos": { "x": 0, "y": 14, "w": 24, "h": 1 },
"id": 10,
"collapsed": false
},
{
"type": "timeseries",
"title": "Event Append Rate (5m)",
"id": 11,
"gridPos": { "x": 0, "y": 15, "w": 8, "h": 6 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "rate(cart_event_log_appends_total[5m])", "legendFormat": "appends/s" }
]
},
{
"type": "timeseries",
"title": "Event Bytes Written Rate (5m)",
"id": 12,
"gridPos": { "x": 8, "y": 15, "w": 8, "h": 6 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "rate(cart_event_log_bytes_written_total[5m])", "legendFormat": "bytes/s" }
],
"fieldConfig": { "defaults": { "unit": "Bps" } }
},
{
"type": "stat",
"title": "Existing Log Files",
"id": 13,
"gridPos": { "x": 16, "y": 15, "w": 4, "h": 3 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_event_log_files_existing" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Last Append Age (s)",
"id": 14,
"gridPos": { "x": 20, "y": 15, "w": 4, "h": 3 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "(time() - cart_event_log_last_append_unix)" }
],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Replay Failures Total",
"id": 15,
"gridPos": { "x": 16, "y": 18, "w": 4, "h": 3 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_event_log_replay_failures_total" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Replay Duration p95 (5m)",
"id": 16,
"gridPos": { "x": 20, "y": 18, "w": 4, "h": 3 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{
"refId": "A",
"expr": "histogram_quantile(0.95, sum(rate(cart_event_log_replay_duration_seconds_bucket[5m])) by (le))"
}
],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] }, "fieldConfig": { "defaults": { "unit": "s" } } }
},
{
"type": "row",
"title": "Grain Lifecycle",
"gridPos": { "x": 0, "y": 21, "w": 24, "h": 1 },
"id": 17,
"collapsed": false
},
{
"type": "timeseries",
"title": "Spawn & Lookup Rates (1m)",
"id": 18,
"gridPos": { "x": 0, "y": 22, "w": 12, "h": 8 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "rate(cart_grain_spawned_total[1m])", "legendFormat": "spawns/s" },
{ "refId": "B", "expr": "rate(cart_grain_lookups_total[1m])", "legendFormat": "lookups/s" }
]
},
{
"type": "stat",
"title": "Negotiations Rate (5m)",
"id": 19,
"gridPos": { "x": 12, "y": 22, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{ "refId": "A", "expr": "rate(cart_remote_negotiation_total[5m])" }
],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] }, "orientation": "horizontal" }
},
{
"type": "stat",
"title": "Mutations Total",
"id": 20,
"gridPos": { "x": 18, "y": 22, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_mutations_total" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "row",
"title": "Event Log Errors",
"gridPos": { "x": 0, "y": 30, "w": 24, "h": 1 },
"id": 21,
"collapsed": false
},
{
"type": "stat",
"title": "Unknown Event Types",
"id": 22,
"gridPos": { "x": 0, "y": 31, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_event_log_unknown_types_total" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Event Mutation Errors",
"id": 23,
"gridPos": { "x": 6, "y": 31, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_event_log_mutation_errors_total" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Replay Success Total",
"id": 24,
"gridPos": { "x": 12, "y": 31, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [{ "refId": "A", "expr": "cart_event_log_replay_total" }],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] } }
},
{
"type": "stat",
"title": "Replay Duration p50 (5m)",
"id": 25,
"gridPos": { "x": 18, "y": 31, "w": 6, "h": 4 },
"datasource": "${DS_PROMETHEUS}",
"targets": [
{
"refId": "A",
"expr": "histogram_quantile(0.50, sum(rate(cart_event_log_replay_duration_seconds_bucket[5m])) by (le))"
}
],
"options": { "reduceOptions": { "calcs": ["lastNotNull"] }, "fieldConfig": { "defaults": { "unit": "s" } } }
}
],
"templating": {
"list": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"type": "datasource",
"query": "prometheus",
"current": { "text": "Prometheus", "value": "Prometheus" }
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": ["5s","10s","30s","1m","5m","15m","30m","1h"],
"time_options": ["5m","15m","30m","1h","6h","12h","24h","2d","7d"]
}
}

View File

@@ -3,6 +3,7 @@ package main
import ( import (
"fmt" "fmt"
"log" "log"
"net/http"
"sync" "sync"
"time" "time"
@@ -17,7 +18,7 @@ import (
// from the new CartID base62 representation). For backward compatibility, // from the new CartID base62 representation). For backward compatibility,
// a deprecated legacy map keyed by CartId is maintained so existing code // a deprecated legacy map keyed by CartId is maintained so existing code
// that directly indexes pool.grains with a CartId continues to compile // that directly indexes pool.grains with a CartId continues to compile
// until the full refactor across SyncedPool / remoteIndex is completed. // until the full refactor across SyncedPool is completed.
// //
// Authoritative storage: grains (map[uint64]*CartGrain) // Authoritative storage: grains (map[uint64]*CartGrain)
// Legacy compatibility: grainsLegacy (map[CartId]*CartGrain) - kept in sync. // Legacy compatibility: grainsLegacy (map[CartId]*CartGrain) - kept in sync.
@@ -46,6 +47,15 @@ var (
type GrainPool interface { type GrainPool interface {
Apply(id CartId, mutation interface{}) (*CartGrain, error) Apply(id CartId, mutation interface{}) (*CartGrain, error)
Get(id CartId) (*CartGrain, error) Get(id CartId) (*CartGrain, error)
// OwnerHost returns the primary owner host for a given cart id.
OwnerHost(id CartId) (Host, bool)
// Hostname returns the hostname of the local pool implementation.
Hostname() string
}
type Host interface {
Name() string
Proxy(id CartId, w http.ResponseWriter, r *http.Request) (bool, error)
} }
// Ttl keeps expiry info // Ttl keeps expiry info
@@ -84,7 +94,7 @@ func NewGrainLocalPool(size int, ttl time.Duration, spawn func(id CartId) (*Cart
// keyFromCartId derives the uint64 key from a legacy CartId deterministically. // keyFromCartId derives the uint64 key from a legacy CartId deterministically.
func keyFromCartId(id CartId) uint64 { func keyFromCartId(id CartId) uint64 {
return LegacyToCartKey(id) return uint64(id)
} }
// storeGrain indexes a grain in both maps. // storeGrain indexes a grain in both maps.
@@ -153,6 +163,20 @@ func (p *GrainLocalPool) Purge() {
} }
} }
// RefreshExpiry resets the expiry timestamp for a living grain to now + TTL.
// Called after successful mutations to implement a sliding inactivity window.
func (p *GrainLocalPool) RefreshExpiry(id CartId) {
p.mu.Lock()
defer p.mu.Unlock()
for i := range p.expiry {
g := p.expiry[i].Grain
if g != nil && g.Id == id {
p.expiry[i].Expires = time.Now().Add(p.Ttl)
break
}
}
}
// GetGrains returns a legacy view of grains (copy) for compatibility. // GetGrains returns a legacy view of grains (copy) for compatibility.
func (p *GrainLocalPool) GetGrains() map[CartId]*CartGrain { func (p *GrainLocalPool) GetGrains() map[CartId]*CartGrain {
p.mu.RLock() p.mu.RLock()
@@ -221,7 +245,12 @@ func (p *GrainLocalPool) Apply(id CartId, mutation interface{}) (*CartGrain, err
if err != nil || grain == nil { if err != nil || grain == nil {
return nil, err return nil, err
} }
return grain.Apply(mutation, false) result, applyErr := grain.Apply(mutation, false)
// Sliding TTL: refresh expiry on successful non-replay mutation (Apply always non-replay here)
if applyErr == nil && result != nil {
p.RefreshExpiry(id)
}
return result, applyErr
} }
// Get returns current state (legacy wrapper). // Get returns current state (legacy wrapper).
@@ -242,3 +271,16 @@ func (p *GrainLocalPool) UnsafePointerToLegacyMap() uintptr {
// Legacy map removed; retained only to satisfy any transitional callers. // Legacy map removed; retained only to satisfy any transitional callers.
return 0 return 0
} }
// OwnerHost implements the extended GrainPool interface for the standalone
// local pool. Since the local pool has no concept of multi-host ownership,
// it returns an empty string. Callers can treat empty as "local host".
func (p *GrainLocalPool) OwnerHost(id CartId) (Host, bool) {
return nil, false
}
// Hostname returns a blank string because GrainLocalPool does not track a node
// identity. (SyncedPool will return the real hostname.)
func (p *GrainLocalPool) Hostname() string {
return ""
}

View File

@@ -1,115 +0,0 @@
package main
import (
"context"
"fmt"
"testing"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"google.golang.org/grpc"
)
// TestCartActorMutationAndState validates end-to-end gRPC mutation + state retrieval
// against a locally started gRPC server (single-node scenario).
// This test uses the new per-mutation AddItem RPC (breaking v2 API) to avoid external product fetch logic
// fetching logic (FetchItem) which would require network I/O.
func TestCartActorMutationAndState(t *testing.T) {
// Setup local grain pool + synced pool (no discovery, single host)
pool := NewGrainLocalPool(1024, time.Minute, spawn)
synced, err := NewSyncedPool(pool, "127.0.0.1", nil)
if err != nil {
t.Fatalf("NewSyncedPool error: %v", err)
}
// Start gRPC server (CartActor + ControlPlane) on :1337
grpcSrv, err := StartGRPCServer(":1337", pool, synced)
if err != nil {
t.Fatalf("StartGRPCServer error: %v", err)
}
defer grpcSrv.GracefulStop()
// Dial the local server
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, "127.0.0.1:1337",
grpc.WithInsecure(),
grpc.WithBlock(),
)
if err != nil {
t.Fatalf("grpc.Dial error: %v", err)
}
defer conn.Close()
cartClient := messages.NewCartActorClient(conn)
// Create a short cart id (<=16 chars so it fits into the fixed CartId 16-byte array cleanly)
cartID := fmt.Sprintf("cart-%d", time.Now().UnixNano())
// Build an AddItem payload (bypasses FetchItem to keep test deterministic)
addItem := &messages.AddItem{
ItemId: 1,
Quantity: 1,
Price: 1000,
OrgPrice: 1000,
Sku: "test-sku",
Name: "Test SKU",
Image: "/img.png",
Stock: 2, // InStock
Tax: 2500,
Country: "se",
}
// Issue AddItem RPC directly (breaking v2 API)
addResp, err := cartClient.AddItem(context.Background(), &messages.AddItemRequest{
CartId: cartID,
ClientTimestamp: time.Now().Unix(),
Payload: addItem,
})
if err != nil {
t.Fatalf("AddItem RPC error: %v", err)
}
if addResp.StatusCode != 200 {
t.Fatalf("AddItem returned non-200 status: %d, error: %s", addResp.StatusCode, addResp.GetError())
}
// Validate the response state (from AddItem)
state := addResp.GetState()
if state == nil {
t.Fatalf("AddItem response state is nil")
}
// (Removed obsolete Mutate response handling)
if len(state.Items) != 1 {
t.Fatalf("Expected 1 item after AddItem, got %d", len(state.Items))
}
if state.Items[0].Sku != "test-sku" {
t.Fatalf("Unexpected item SKU: %s", state.Items[0].Sku)
}
// Issue GetState RPC
getResp, err := cartClient.GetState(context.Background(), &messages.StateRequest{
CartId: cartID,
})
if err != nil {
t.Fatalf("GetState RPC error: %v", err)
}
if getResp.StatusCode != 200 {
t.Fatalf("GetState returned non-200 status: %d, error: %s", getResp.StatusCode, getResp.GetError())
}
state2 := getResp.GetState()
if state2 == nil {
t.Fatalf("GetState response state is nil")
}
if len(state2.Items) != 1 {
t.Fatalf("Expected 1 item in GetState, got %d", len(state2.Items))
}
if state2.Items[0].Sku != "test-sku" {
t.Fatalf("Unexpected SKU in GetState: %s", state2.Items[0].Sku)
}
}
// Legacy serialization helper removed (oneof envelope used directly)

View File

@@ -12,194 +12,41 @@ import (
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
) )
// cartActorGRPCServer implements the CartActor and ControlPlane gRPC services. // cartActorGRPCServer implements the ControlPlane gRPC services.
// It delegates cart operations to a grain pool and cluster operations to a synced pool. // It delegates cart operations to a grain pool and cluster operations to a synced pool.
type cartActorGRPCServer struct { type cartActorGRPCServer struct {
messages.UnimplementedCartActorServer
messages.UnimplementedControlPlaneServer messages.UnimplementedControlPlaneServer
pool GrainPool // For cart state mutations and queries //pool GrainPool // For cart state mutations and queries
syncedPool *SyncedPool // For cluster membership and control syncedPool *SyncedPool // For cluster membership and control
} }
// NewCartActorGRPCServer creates and initializes the server. // NewCartActorGRPCServer creates and initializes the server.
func NewCartActorGRPCServer(pool GrainPool, syncedPool *SyncedPool) *cartActorGRPCServer { func NewCartActorGRPCServer(syncedPool *SyncedPool) *cartActorGRPCServer {
return &cartActorGRPCServer{ return &cartActorGRPCServer{
pool: pool, //pool: pool,
syncedPool: syncedPool, syncedPool: syncedPool,
} }
} }
// applyMutation routes a single cart mutation to the target grain (used by per-mutation RPC handlers). func (s *cartActorGRPCServer) AnnounceOwnership(ctx context.Context, req *messages.OwnershipAnnounce) (*messages.OwnerChangeAck, error) {
func (s *cartActorGRPCServer) applyMutation(cartID string, mutation interface{}) *messages.CartMutationReply { for _, cartId := range req.CartIds {
// Canonicalize or preserve legacy id (do NOT hash-rewrite legacy textual ids) s.syncedPool.removeLocalGrain(CartId(cartId))
cid, _, wasBase62, cerr := CanonicalizeOrLegacy(cartID)
if cerr != nil {
return &messages.CartMutationReply{
StatusCode: 500,
Result: &messages.CartMutationReply_Error{Error: fmt.Sprintf("cart_id canonicalization failed: %v", cerr)},
ServerTimestamp: time.Now().Unix(),
} }
} log.Printf("Ack count: %d", len(req.CartIds))
_ = wasBase62 // placeholder; future: propagate canonical id in reply metadata return &messages.OwnerChangeAck{
legacy := CartIDToLegacy(cid) Accepted: true,
grain, err := s.pool.Apply(legacy, mutation) Message: "ownership announced",
if err != nil {
return &messages.CartMutationReply{
StatusCode: 500,
Result: &messages.CartMutationReply_Error{Error: err.Error()},
ServerTimestamp: time.Now().Unix(),
}
}
cartState := ToCartState(grain)
return &messages.CartMutationReply{
StatusCode: 200,
Result: &messages.CartMutationReply_State{State: cartState},
ServerTimestamp: time.Now().Unix(),
}
}
func (s *cartActorGRPCServer) AddRequest(ctx context.Context, req *messages.AddRequestRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) AddItem(ctx context.Context, req *messages.AddItemRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) RemoveItem(ctx context.Context, req *messages.RemoveItemRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) RemoveDelivery(ctx context.Context, req *messages.RemoveDeliveryRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) ChangeQuantity(ctx context.Context, req *messages.ChangeQuantityRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) SetDelivery(ctx context.Context, req *messages.SetDeliveryRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) SetPickupPoint(ctx context.Context, req *messages.SetPickupPointRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
/*
Checkout RPC removed. Checkout is handled at the HTTP layer (PoolServer.HandleCheckout).
*/
func (s *cartActorGRPCServer) SetCartItems(ctx context.Context, req *messages.SetCartItemsRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
func (s *cartActorGRPCServer) OrderCompleted(ctx context.Context, req *messages.OrderCompletedRequest) (*messages.CartMutationReply, error) {
if req.GetCartId() == "" {
return &messages.CartMutationReply{
StatusCode: 400,
Result: &messages.CartMutationReply_Error{Error: "cart_id is required"},
ServerTimestamp: time.Now().Unix(),
}, nil
}
return s.applyMutation(req.GetCartId(), req.GetPayload()), nil
}
// GetState retrieves the current state of a cart grain.
func (s *cartActorGRPCServer) GetState(ctx context.Context, req *messages.StateRequest) (*messages.StateReply, error) {
if req.GetCartId() == "" {
return &messages.StateReply{
StatusCode: 400,
Result: &messages.StateReply_Error{Error: "cart_id is required"},
}, nil
}
// Canonicalize / upgrade incoming cart id (preserve legacy strings)
cid, _, _, cerr := CanonicalizeOrLegacy(req.GetCartId())
if cerr != nil {
return &messages.StateReply{
StatusCode: 500,
Result: &messages.StateReply_Error{Error: fmt.Sprintf("cart_id canonicalization failed: %v", cerr)},
}, nil
}
legacy := CartIDToLegacy(cid)
grain, err := s.pool.Get(legacy)
if err != nil {
return &messages.StateReply{
StatusCode: 500,
Result: &messages.StateReply_Error{Error: err.Error()},
}, nil
}
cartState := ToCartState(grain)
return &messages.StateReply{
StatusCode: 200,
Result: &messages.StateReply_State{State: cartState},
}, nil }, nil
} }
// ControlPlane: Ping // ControlPlane: Ping
func (s *cartActorGRPCServer) Ping(ctx context.Context, _ *messages.Empty) (*messages.PingReply, error) { func (s *cartActorGRPCServer) Ping(ctx context.Context, _ *messages.Empty) (*messages.PingReply, error) {
// Expose cart owner cookie (first-touch owner = this host) for HTTP gateways translating gRPC metadata.
// Gateways that propagate Set-Cookie can help establish sticky sessions at the edge.
//_ = grpc.SendHeader(ctx, metadata.Pairs("set-cookie", fmt.Sprintf("cartowner=%s; Path=/; HttpOnly", s.syncedPool.Hostname())))
return &messages.PingReply{ return &messages.PingReply{
Host: s.syncedPool.Hostname, Host: s.syncedPool.Hostname(),
UnixTime: time.Now().Unix(), UnixTime: time.Now().Unix(),
}, nil }, nil
} }
@@ -214,7 +61,7 @@ func (s *cartActorGRPCServer) Negotiate(ctx context.Context, req *messages.Negot
} }
} }
// This host // This host
hostSet[s.syncedPool.Hostname] = struct{}{} hostSet[s.syncedPool.Hostname()] = struct{}{}
// Known remotes // Known remotes
s.syncedPool.mu.RLock() s.syncedPool.mu.RLock()
for h := range s.syncedPool.remoteHosts { for h := range s.syncedPool.remoteHosts {
@@ -232,12 +79,12 @@ func (s *cartActorGRPCServer) Negotiate(ctx context.Context, req *messages.Negot
// ControlPlane: GetCartIds (locally owned carts only) // ControlPlane: GetCartIds (locally owned carts only)
func (s *cartActorGRPCServer) GetCartIds(ctx context.Context, _ *messages.Empty) (*messages.CartIdsReply, error) { func (s *cartActorGRPCServer) GetCartIds(ctx context.Context, _ *messages.Empty) (*messages.CartIdsReply, error) {
s.syncedPool.local.mu.RLock() s.syncedPool.local.mu.RLock()
ids := make([]string, 0, len(s.syncedPool.local.grains)) ids := make([]uint64, 0, len(s.syncedPool.local.grains))
for _, g := range s.syncedPool.local.grains { for _, g := range s.syncedPool.local.grains {
if g == nil { if g == nil {
continue continue
} }
ids = append(ids, g.GetId().String()) ids = append(ids, uint64(g.GetId()))
} }
s.syncedPool.local.mu.RUnlock() s.syncedPool.local.mu.RUnlock()
return &messages.CartIdsReply{CartIds: ids}, nil return &messages.CartIdsReply{CartIds: ids}, nil
@@ -256,16 +103,15 @@ func (s *cartActorGRPCServer) Closing(ctx context.Context, req *messages.Closing
// StartGRPCServer configures and starts the unified gRPC server on the given address. // StartGRPCServer configures and starts the unified gRPC server on the given address.
// It registers both the CartActor and ControlPlane services. // It registers both the CartActor and ControlPlane services.
func StartGRPCServer(addr string, pool GrainPool, syncedPool *SyncedPool) (*grpc.Server, error) { func StartGRPCServer(addr string, syncedPool *SyncedPool) (*grpc.Server, error) {
lis, err := net.Listen("tcp", addr) lis, err := net.Listen("tcp", addr)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to listen: %w", err) return nil, fmt.Errorf("failed to listen: %w", err)
} }
grpcServer := grpc.NewServer() grpcServer := grpc.NewServer()
server := NewCartActorGRPCServer(pool, syncedPool) server := NewCartActorGRPCServer(syncedPool)
messages.RegisterCartActorServer(grpcServer, server)
messages.RegisterControlPlaneServer(grpcServer, server) messages.RegisterControlPlaneServer(grpcServer, server)
reflection.Register(grpcServer) reflection.Register(grpcServer)

174
k6/README.md Normal file
View File

@@ -0,0 +1,174 @@
# k6 Load Tests for Cart API
This directory contains a k6 script (`cart_load_test.js`) to stress and observe the cart actor HTTP API.
## Contents
- `cart_load_test.js` primary k6 scenario script
- `README.md` this file
## Prerequisites
- Node not required (k6 runs standalone)
- k6 installed (>= v0.43 recommended)
- Prometheus + Grafana (optional) if you want to correlate with the dashboard you generated
- A running cart service exposing HTTP endpoints at (default) `http://localhost:8080/cart`
## Endpoints Exercised
The script exercises (per iteration):
1. `GET /cart/` ensure / fetch cart state (creates cart if missing; sets `cartid` & `cartowner` cookies)
2. `POST /cart/` add item mutation (random SKU & quantity)
3. `GET /cart/` fetch after mutations
4. `GET /cart/checkout` occasionally (~2% of iterations) to simulate checkout start
You can extend it easily to hit deliveries, quantity changes, or removal endpoints.
## Environment Variables
| Variable | Purpose | Default |
|-----------------|----------------------------------------------|-------------------------|
| `BASE_URL` | Base URL root (either host or host/cart) | `http://localhost:8080/cart` |
| `VUS` | VUs for steady_mutations scenario | `20` |
| `DURATION` | Duration for steady_mutations scenario | `5m` |
| `RAMP_TARGET` | Peak VUs for ramp_up scenario | `50` |
You can also disable one scenario by editing `options.scenarios` inside the script.
Example run:
```bash
k6 run \
-e BASE_URL=https://cart.prod.example.com/cart \
-e VUS=40 \
-e DURATION=10m \
-e RAMP_TARGET=120 \
k6/cart_load_test.js
```
## Metrics (Custom)
The script defines additional k6 metrics:
- `cart_add_item_duration` (Trend) latency of POST add item
- `cart_fetch_duration` (Trend) latency of GET cart state
- `cart_checkout_duration` (Trend) latency of checkout
- `cart_items_added` (Counter) successful add item operations
- `cart_checkout_calls` (Counter) successful checkout calls
Thresholds (in `options.thresholds`) enforce basic SLO:
- Mutation failure rate < 2%
- p90 mutation latency < 800 ms
- p99 overall HTTP latency < 1500 ms
Adjust thresholds to your environment if they trigger prematurely.
## Cookies & Stickiness
The script preserves:
- `cartid` cart identity (server sets expiry separately)
- `cartowner` owning host for sticky routing
If your load balancer or ingress enforces affinity based on these cookies, traffic will naturally concentrate on the originally claimed host for each cart under test.
## SKU Set
SKUs used (randomly selected each mutation):
```
778290 778345 778317 778277 778267 778376 778244 778384
778365 778377 778255 778286 778246 778270 778266 778285
778329 778425 778407 778418 778430 778469 778358 778351
778319 778307 778278 778251 778253 778261 778263 778273
778281 778294 778297 778302
```
To add/remove SKUs, edit the `SKUS` array. Keeping it non-empty and moderately sized helps randomization.
## Extending the Script
### Add Quantity Change
```js
function changeQuantity(itemId, newQty) {
const payload = JSON.stringify({ Id: itemId, Qty: newQty });
http.put(baseUrl() + '/', payload, { headers: headers() });
}
```
### Remove Item
```js
function removeItem(itemId) {
http.del(baseUrl() + '/' + itemId, null, { headers: headers() });
}
```
### Add Delivery
```js
function addDelivery(itemIds) {
const payload = JSON.stringify({ provider: "POSTNORD", items: itemIds });
http.post(baseUrl() + '/delivery', payload, { headers: headers() });
}
```
You can integrate these into the iteration loop with probabilities.
## Output Summary
`handleSummary` outputs a JSON summary to stdout:
- Average & p95 mutation latencies (if present)
- Fetch p95
- Checkout count
- Check statuses
Redirect or parse that output for CI pipelines.
## Running in CI
Use shorter durations (e.g. `DURATION=2m VUS=10`) to keep builds fast. Fail build on threshold breaches:
```bash
k6 run -e BASE_URL=$TARGET -e VUS=10 -e DURATION=2m k6/cart_load_test.js || exit 1
```
## Correlating with Prometheus / Grafana
During load, observe:
- `cart_mutations_total` growth and latency histograms
- Event log write rate (`cart_event_log_appends_total`)
- Pool usage (`cart_grain_pool_usage`) and spawn rate (`cart_grain_spawned_total`)
- Failure counters (`cart_mutation_failures_total`) ensure they remain low
If mutation latency spikes without high error rate, inspect external dependencies (e.g., product fetcher or Klarna endpoints).
## Common Tuning Tips
| Symptom | Potential Adjustment |
|------------------------------------|---------------------------------------------------|
| High latency p99 | Increase CPU/memory, optimize mutation handlers |
| Pool at capacity | Raise pool size argument or TTL |
| Frequent cart eviction mid-test | Confirm TTL is sliding (now 2h on mutation) |
| High replay duration | Consider snapshot + truncate event logs |
| Uneven host load | Verify `cartowner` cookie is respected upstream |
## Safety / Load Guardrails
- Start with low VUs (510) and short duration.
- Scale incrementally to find saturation points.
- If using production endpoints, coordinate off-peak runs.
## License / Attribution
This test script is tailored for your internal cart actor system; adapt freely. k6 is open-source (AGPL v3). Ensure compliance if redistributing.
---
Feel free to request:
- A variant script for spike tests
- WebSocket / long poll integration (if added later)
- Synthetic error injection harness
Happy load testing!

248
k6/cart_load_test.js Normal file
View File

@@ -0,0 +1,248 @@
import http from "k6/http";
import { check, sleep, group } from "k6";
import { Counter, Trend } from "k6/metrics";
// ---------------- Configuration ----------------
export const options = {
// Adjust vus/duration for your environment
scenarios: {
steady_mutations: {
executor: "constant-vus",
vus: __ENV.VUS ? parseInt(__ENV.VUS, 10) : 20,
duration: __ENV.DURATION || "5m",
gracefulStop: "30s",
},
ramp_up: {
executor: "ramping-vus",
startVUs: 0,
stages: [
{
duration: "1m",
target: __ENV.RAMP_TARGET
? parseInt(__ENV.RAMP_TARGET, 10)
: 50,
},
{
duration: "1m",
target: __ENV.RAMP_TARGET
? parseInt(__ENV.RAMP_TARGET, 10)
: 50,
},
{ duration: "1m", target: 0 },
],
gracefulStop: "30s",
startTime: "30s",
},
},
thresholds: {
http_req_failed: ["rate<0.02"], // < 2% failures
http_req_duration: ["p(90)<800", "p(99)<1500"], // latency SLO
"cart_add_item_duration{op:add}": ["p(90)<800"],
"cart_fetch_duration{op:get}": ["p(90)<600"],
},
summaryTrendStats: ["avg", "min", "med", "max", "p(90)", "p(95)", "p(99)"],
};
// ---------------- Metrics ----------------
const addItemTrend = new Trend("cart_add_item_duration", true);
const fetchTrend = new Trend("cart_fetch_duration", true);
const checkoutTrend = new Trend("cart_checkout_duration", true);
const addedItemsCounter = new Counter("cart_items_added");
const checkoutCounter = new Counter("cart_checkout_calls");
// ---------------- SKUs ----------------
const SKUS = [
"778290",
"778345",
"778317",
"778277",
"778267",
"778376",
"778244",
"778384",
"778365",
"778377",
"778255",
"778286",
"778246",
"778270",
"778266",
"778285",
"778329",
"778425",
"778407",
"778418",
"778430",
"778469",
"778358",
"778351",
"778319",
"778307",
"778278",
"778251",
"778253",
"778261",
"778263",
"778273",
"778281",
"778294",
"778297",
"778302",
];
// ---------------- Helpers ----------------
function randomSku() {
return SKUS[Math.floor(Math.random() * SKUS.length)];
}
function randomQty() {
return 1 + Math.floor(Math.random() * 3); // 1..3
}
function baseUrl() {
const u = __ENV.BASE_URL || "http://localhost:8080/cart";
// Allow user to pass either root host or full /cart path
return u.endsWith("/cart") ? u : u.replace(/\/+$/, "") + "/cart";
}
function extractCookie(res, name) {
const cookies = res.cookies[name];
if (!cookies || cookies.length === 0) return null;
return cookies[0].value;
}
function withCookies(headers, cookieJar) {
if (!cookieJar || Object.keys(cookieJar).length === 0) return headers;
const cookieStr = Object.entries(cookieJar)
.map(([k, v]) => `${k}=${v}`)
.join("; ");
return { ...headers, Cookie: cookieStr };
}
// Maintain cart + owner cookies per VU
let cartState = {
cartid: null,
cartowner: null,
};
// Refresh cookies from response
function updateCookies(res) {
const cid = extractCookie(res, "cartid");
if (cid) cartState.cartid = cid;
const owner = extractCookie(res, "cartowner");
if (owner) cartState.cartowner = owner;
}
// Build headers
function headers() {
const h = { "Content-Type": "application/json" };
const jar = {};
if (cartState.cartid) jar["cartid"] = cartState.cartid;
if (cartState.cartowner) jar["cartowner"] = cartState.cartowner;
return withCookies(h, jar);
}
// Ensure cart exists (GET /)
function ensureCart() {
if (cartState.cartid) return;
const res = http.get(baseUrl() + "/", { headers: headers() });
updateCookies(res);
check(res, {
"ensure cart status 200": (r) => r.status === 200,
"ensure cart has id": () => !!cartState.cartid,
});
}
// Add random item
function addRandomItem() {
const payload = JSON.stringify({
sku: randomSku(),
quantity: randomQty(),
country: "no",
});
const start = Date.now();
const res = http.post(baseUrl(), payload, { headers: headers() });
const dur = Date.now() - start;
addItemTrend.add(dur, { op: "add" });
if (res.status === 200) {
addedItemsCounter.add(1);
}
updateCookies(res);
check(res, {
"add item status ok": (r) => r.status === 200,
});
}
// Fetch cart state
function fetchCart() {
const start = Date.now();
const res = http.get(baseUrl() + "/", { headers: headers() });
const dur = Date.now() - start;
fetchTrend.add(dur, { op: "get" });
updateCookies(res);
check(res, { "fetch status ok": (r) => r.status === 200 });
}
// Occasional checkout trigger
function maybeCheckout() {
if (!cartState.cartid) return;
// // Small probability
// if (Math.random() < 0.02) {
// const start = Date.now();
// const res = http.get(baseUrl() + "/checkout", { headers: headers() });
// const dur = Date.now() - start;
// checkoutTrend.add(dur, { op: "checkout" });
// updateCookies(res);
// if (res.status === 200) checkoutCounter.add(1);
// check(res, { "checkout status ok": (r) => r.status === 200 });
// }
}
// ---------------- k6 lifecycle ----------------
export function setup() {
// Provide SKU list length for summary
return { skuCount: SKUS.length };
}
export default function (data) {
group("cart flow", () => {
// Create or reuse cart
ensureCart();
// Random number of item mutations per iteration (1..5)
const ops = 1 + Math.floor(Math.random() * 5);
for (let i = 0; i < ops; i++) {
addRandomItem();
}
// Fetch state
fetchCart();
// Optional checkout attempt
maybeCheckout();
});
// Small think time
sleep(Math.random() * 0.5);
}
export function teardown(data) {
// Optionally we could GET confirmation or clear cart cookie
// Not implemented for load purpose.
console.log(`Test complete. SKU count: ${data.skuCount}`);
}
// ---------------- Summary ----------------
export function handleSummary(data) {
return {
stdout: JSON.stringify(
{
metrics: {
mutations_avg: data.metrics.cart_add_item_duration?.avg,
mutations_p95: data.metrics.cart_add_item_duration?.p(95),
fetch_p95: data.metrics.cart_fetch_duration?.p(95),
checkout_count: data.metrics.cart_checkout_calls?.count,
},
checks: data.root_checks,
},
null,
2,
),
};
}

44
main.go
View File

@@ -43,11 +43,16 @@ func spawn(id CartId) (*CartGrain, error) {
Deliveries: []*CartDelivery{}, Deliveries: []*CartDelivery{},
Id: id, Id: id,
Items: []*CartItem{}, Items: []*CartItem{},
// storageMessages removed (legacy event log deprecated)
TotalPrice: 0, TotalPrice: 0,
} }
err := loadMessages(ret, id) // Set baseline lastChange at spawn; replay may update it to last event timestamp.
return ret, err ret.lastChange = time.Now().Unix()
// Legacy loadMessages (no-op) retained; then replay append-only event log
_ = loadMessages(ret, id)
_ = ReplayCartEvents(ret, id)
return ret, nil
} }
func init() { func init() {
@@ -67,7 +72,7 @@ func (a *App) Save() error {
if grain == nil { if grain == nil {
continue continue
} }
if grain.GetLastChange() > a.storage.LastSaves[id] { if grain.GetLastChange() > a.storage.LastSaves[uint64(id)] {
hasChanges = true hasChanges = true
err := a.storage.Store(id, grain) err := a.storage.Store(id, grain)
if err != nil { if err != nil {
@@ -155,23 +160,24 @@ func GetDiscovery() Discovery {
func main() { func main() {
storage, err := NewDiskStorage(fmt.Sprintf("data/%s_state.gob", name)) storage, err := NewDiskStorage(fmt.Sprintf("data/s_%s.gob", name))
if err != nil { if err != nil {
log.Printf("Error loading state: %v\n", err) log.Printf("Error loading state: %v\n", err)
} }
localPool := NewGrainLocalPool(2*65535, 15*time.Minute, spawn)
app := &App{ app := &App{
pool: NewGrainLocalPool(65535, 5*time.Minute, spawn), pool: localPool,
storage: storage, storage: storage,
} }
syncedPool, err := NewSyncedPool(app.pool, podIp, GetDiscovery()) syncedPool, err := NewSyncedPool(localPool, podIp, GetDiscovery())
if err != nil { if err != nil {
log.Fatalf("Error creating synced pool: %v\n", err) log.Fatalf("Error creating synced pool: %v\n", err)
} }
// Start unified gRPC server (CartActor + ControlPlane) replacing legacy RPC server on :1337 // Start unified gRPC server (CartActor + ControlPlane) replacing legacy RPC server on :1337
// TODO: Remove any remaining legacy RPC server references and deprecated frame-based code after full gRPC migration is validated. // TODO: Remove any remaining legacy RPC server references and deprecated frame-based code after full gRPC migration is validated.
grpcSrv, err := StartGRPCServer(":1337", app.pool, syncedPool) grpcSrv, err := StartGRPCServer(":1337", syncedPool)
if err != nil { if err != nil {
log.Fatalf("Error starting gRPC server: %v\n", err) log.Fatalf("Error starting gRPC server: %v\n", err)
} }
@@ -248,7 +254,13 @@ func main() {
w.Write([]byte("no cart id to checkout is empty")) w.Write([]byte("no cart id to checkout is empty"))
return return
} }
cartId := ToCartId(cookie.Value) parsed, ok := ParseCartId(cookie.Value)
if !ok {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("invalid cart id format"))
return
}
cartId := parsed
order, err = syncedServer.CreateOrUpdateCheckout(r.Host, cartId) order, err = syncedServer.CreateOrUpdateCheckout(r.Host, cartId)
if err != nil { if err != nil {
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
@@ -384,11 +396,19 @@ func main() {
} }
func triggerOrderCompleted(err error, syncedServer *PoolServer, order *CheckoutOrder) error { func triggerOrderCompleted(err error, syncedServer *PoolServer, order *CheckoutOrder) error {
_, err = syncedServer.pool.Apply(ToCartId(order.MerchantReference1), &messages.OrderCreated{ mutation := &messages.OrderCreated{
OrderId: order.ID, OrderId: order.ID,
Status: order.Status, Status: order.Status,
}) }
return err cid, ok := ParseCartId(order.MerchantReference1)
if !ok {
return fmt.Errorf("invalid cart id in order reference: %s", order.MerchantReference1)
}
_, applyErr := syncedServer.pool.Apply(cid, mutation)
if applyErr == nil {
_ = AppendCartEvent(cid, mutation)
}
return applyErr
} }
func confirmOrder(order *CheckoutOrder, orderHandler *AmqpOrderHandler) error { func confirmOrder(order *CheckoutOrder, orderHandler *AmqpOrderHandler) error {

View File

@@ -1,182 +0,0 @@
package main
import (
"context"
"fmt"
"testing"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"google.golang.org/grpc"
)
// TestMultiNodeOwnershipNegotiation spins up two gRPC servers (nodeA, nodeB),
// manually links their SyncedPools (bypassing AddRemote's fixed port assumption),
// and verifies that only one node becomes the owner of a new cart while the
// other can still apply a mutation via the remote proxy path.
//
// NOTE:
// - We manually inject RemoteHostGRPC entries because AddRemote() hard-codes
// port 1337; to run two distinct servers concurrently we need distinct ports.
// - This test asserts single ownership consistency rather than the complete
// quorum semantics (which depend on real discovery + AddRemote).
func TestMultiNodeOwnershipNegotiation(t *testing.T) {
// Allocate distinct ports for the two nodes.
const (
addrA = "127.0.0.1:18081"
addrB = "127.0.0.1:18082"
hostA = "nodeA"
hostB = "nodeB"
)
// Create local grain pools.
poolA := NewGrainLocalPool(1024, time.Minute, spawn)
poolB := NewGrainLocalPool(1024, time.Minute, spawn)
// Create synced pools (no discovery).
syncedA, err := NewSyncedPool(poolA, hostA, nil)
if err != nil {
t.Fatalf("nodeA NewSyncedPool error: %v", err)
}
syncedB, err := NewSyncedPool(poolB, hostB, nil)
if err != nil {
t.Fatalf("nodeB NewSyncedPool error: %v", err)
}
// Start gRPC servers (CartActor + ControlPlane) on different ports.
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
grpcSrvB, err := StartGRPCServer(addrB, poolB, syncedB)
if err != nil {
t.Fatalf("StartGRPCServer B error: %v", err)
}
defer grpcSrvB.GracefulStop()
// Helper to connect one pool to the other's server (manual AddRemote equivalent).
link := func(src *SyncedPool, remoteHost, remoteAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
conn, dialErr := grpc.DialContext(ctx, remoteAddr, grpc.WithInsecure(), grpc.WithBlock())
if dialErr != nil {
t.Fatalf("dial %s (%s) failed: %v", remoteHost, remoteAddr, dialErr)
}
cartClient := messages.NewCartActorClient(conn)
controlClient := messages.NewControlPlaneClient(conn)
src.mu.Lock()
src.remoteHosts[remoteHost] = &RemoteHostGRPC{
Host: remoteHost,
Conn: conn,
CartClient: cartClient,
ControlClient: controlClient,
}
src.mu.Unlock()
}
// Cross-link the two pools.
link(syncedA, hostB, addrB)
link(syncedB, hostA, addrA)
// Rebuild rings after manual cross-link so deterministic ownership works immediately.
syncedA.ForceRingRefresh()
syncedB.ForceRingRefresh()
// Allow brief stabilization (control plane pings / no real negotiation needed here).
time.Sleep(200 * time.Millisecond)
// Create a deterministic cart id for test readability.
cartID := ToCartId(fmt.Sprintf("cart-%d", time.Now().UnixNano()))
// Mutation payload (ring-determined ownership; no assumption about which node owns).
addItem := &messages.AddItem{
ItemId: 1,
Quantity: 1,
Price: 1500,
OrgPrice: 1500,
Sku: "sku-test-multi",
Name: "Multi Node Test",
Image: "/test.png",
Stock: 2,
Tax: 2500,
Country: "se",
}
// Determine ring owner and set primary / secondary references.
ownerHost := syncedA.DebugOwnerHost(cartID)
var ownerSynced, otherSynced *SyncedPool
var ownerPool, otherPool *GrainLocalPool
switch ownerHost {
case hostA:
ownerSynced, ownerPool = syncedA, poolA
otherSynced, otherPool = syncedB, poolB
case hostB:
ownerSynced, ownerPool = syncedB, poolB
otherSynced, otherPool = syncedA, poolA
default:
t.Fatalf("unexpected ring owner %s (expected %s or %s)", ownerHost, hostA, hostB)
}
// Apply mutation on the ring-designated owner.
if _, err := ownerSynced.Apply(cartID, addItem); err != nil {
t.Fatalf("owner %s Apply addItem error: %v", ownerHost, err)
}
// Validate owner pool has the grain and the other does not.
if _, ok := ownerPool.GetGrains()[cartID]; !ok {
t.Fatalf("expected owner %s to have local grain", ownerHost)
}
if _, ok := otherPool.GetGrains()[cartID]; ok {
t.Fatalf("non-owner unexpectedly holds local grain")
}
// Prepare change mutation to be applied from the non-owner (should route remotely).
change := &messages.ChangeQuantity{
Id: 1, // line id after first AddItem
Quantity: 2,
}
// Apply remotely via the non-owner.
if _, err := otherSynced.Apply(cartID, change); err != nil {
t.Fatalf("non-owner remote Apply changeQuantity error: %v", err)
}
// Remote re-mutation already performed via otherSynced; removed duplicate block.
// NodeB local grain assertion:
// Only assert absence if nodeB is NOT the ring-designated owner. If nodeB is the owner,
// it is expected to have a local grain (previous generic ownership assertions already ran).
if ownerHost != hostB {
if _, local := poolB.GetGrains()[cartID]; local {
t.Fatalf("nodeB unexpectedly created local grain (ownership duplication)")
}
}
// Fetch state from nodeB to ensure we see updated quantity (2).
grainStateB, err := syncedB.Get(cartID)
if err != nil {
t.Fatalf("nodeB Get error: %v", err)
}
if len(grainStateB.Items) != 1 || grainStateB.Items[0].Quantity != 2 {
t.Fatalf("nodeB observed inconsistent state: items=%d qty=%d (expected 1 / 2)",
len(grainStateB.Items),
func() int {
if len(grainStateB.Items) == 0 {
return -1
}
return grainStateB.Items[0].Quantity
}(),
)
}
// Cross-check from nodeA (authoritative) to ensure state matches.
grainStateA, err := syncedA.Get(cartID)
if err != nil {
t.Fatalf("nodeA Get error: %v", err)
}
if grainStateA.Items[0].Quantity != 2 {
t.Fatalf("nodeA authoritative state mismatch: expected qty=2 got %d", grainStateA.Items[0].Quantity)
}
}

View File

@@ -1,304 +0,0 @@
package main
import (
"context"
"fmt"
"testing"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"google.golang.org/grpc"
)
// TestThreeNodeMajorityOwnership validates ring-determined ownership and routing
// in a 3-node cluster (A,B,C) using the consistent hashing ring (no quorum RPC).
// The previous ConfirmOwner / quorum semantics have been removed; ownership is
// deterministic and derived from the ring.
//
// It validates:
// 1. The ring selects exactly one primary owner for a new cart.
// 2. Other nodes (B,C) do NOT create local grains for the cart.
// 3. Remote proxies are installed lazily so remote mutations can route.
// 4. A remote mutation from one non-owner updates state visible on another.
// 5. Authoritative state on the owner matches remote observations.
// 6. (Future) This scaffolds replication tests when RF>1 is enabled.
//
// (Legacy comments about ConfirmOwner acceptance thresholds have been removed.)
// (Function name retained for historical continuity.)
func TestThreeNodeMajorityOwnership(t *testing.T) {
const (
addrA = "127.0.0.1:18181"
addrB = "127.0.0.1:18182"
addrC = "127.0.0.1:18183"
hostA = "nodeA3"
hostB = "nodeB3"
hostC = "nodeC3"
)
// Local grain pools
poolA := NewGrainLocalPool(1024, time.Minute, spawn)
poolB := NewGrainLocalPool(1024, time.Minute, spawn)
poolC := NewGrainLocalPool(1024, time.Minute, spawn)
// Synced pools (no discovery)
syncedA, err := NewSyncedPool(poolA, hostA, nil)
if err != nil {
t.Fatalf("nodeA NewSyncedPool error: %v", err)
}
syncedB, err := NewSyncedPool(poolB, hostB, nil)
if err != nil {
t.Fatalf("nodeB NewSyncedPool error: %v", err)
}
syncedC, err := NewSyncedPool(poolC, hostC, nil)
if err != nil {
t.Fatalf("nodeC NewSyncedPool error: %v", err)
}
// Start gRPC servers
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
grpcSrvB, err := StartGRPCServer(addrB, poolB, syncedB)
if err != nil {
t.Fatalf("StartGRPCServer B error: %v", err)
}
defer grpcSrvB.GracefulStop()
grpcSrvC, err := StartGRPCServer(addrC, poolC, syncedC)
if err != nil {
t.Fatalf("StartGRPCServer C error: %v", err)
}
defer grpcSrvC.GracefulStop()
// Helper for manual cross-link (since AddRemote assumes fixed port)
link := func(src *SyncedPool, remoteHost, remoteAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
conn, dialErr := grpc.DialContext(ctx, remoteAddr, grpc.WithInsecure(), grpc.WithBlock())
if dialErr != nil {
t.Fatalf("dial %s (%s) failed: %v", remoteHost, remoteAddr, dialErr)
}
cartClient := messages.NewCartActorClient(conn)
controlClient := messages.NewControlPlaneClient(conn)
src.mu.Lock()
src.remoteHosts[remoteHost] = &RemoteHostGRPC{
Host: remoteHost,
Conn: conn,
CartClient: cartClient,
ControlClient: controlClient,
}
src.mu.Unlock()
}
// Full mesh (each node knows all others)
link(syncedA, hostB, addrB)
link(syncedA, hostC, addrC)
link(syncedB, hostA, addrA)
link(syncedB, hostC, addrC)
link(syncedC, hostA, addrA)
link(syncedC, hostB, addrB)
// Rebuild rings after manual linking so ownership resolution is immediate.
syncedA.ForceRingRefresh()
syncedB.ForceRingRefresh()
syncedC.ForceRingRefresh()
// Allow brief stabilization
time.Sleep(200 * time.Millisecond)
// Deterministic-ish cart id
cartID := ToCartId(fmt.Sprintf("cart3-%d", time.Now().UnixNano()))
addItem := &messages.AddItem{
ItemId: 10,
Quantity: 1,
Price: 5000,
OrgPrice: 5000,
Sku: "sku-3node",
Name: "Three Node Test",
Image: "/t.png",
Stock: 10,
Tax: 2500,
Country: "se",
}
// Determine ring-designated owner (may be any of the three hosts)
ownerPre := syncedA.DebugOwnerHost(cartID)
if ownerPre != hostA && ownerPre != hostB && ownerPre != hostC {
t.Fatalf("ring returned unexpected owner %s (not in set {%s,%s,%s})", ownerPre, hostA, hostB, hostC)
}
var ownerSynced *SyncedPool
var ownerPool *GrainLocalPool
switch ownerPre {
case hostA:
ownerSynced, ownerPool = syncedA, poolA
case hostB:
ownerSynced, ownerPool = syncedB, poolB
case hostC:
ownerSynced, ownerPool = syncedC, poolC
}
// Pick two distinct non-owner nodes for remote mutation assertions
var remote1Synced, remote2Synced *SyncedPool
switch ownerPre {
case hostA:
remote1Synced, remote2Synced = syncedB, syncedC
case hostB:
remote1Synced, remote2Synced = syncedA, syncedC
case hostC:
remote1Synced, remote2Synced = syncedA, syncedB
}
// Apply on the ring-designated owner
if _, err := ownerSynced.Apply(cartID, addItem); err != nil {
t.Fatalf("owner %s Apply addItem error: %v", ownerPre, err)
}
// Small wait for remote proxy spawn (ring ownership already deterministic)
time.Sleep(150 * time.Millisecond)
// Assert only nodeA has local grain
localCount := 0
if _, ok := poolA.GetGrains()[cartID]; ok {
localCount++
}
if _, ok := poolB.GetGrains()[cartID]; ok {
localCount++
}
if _, ok := poolC.GetGrains()[cartID]; ok {
localCount++
}
if localCount != 1 {
t.Fatalf("expected exactly 1 local grain, got %d", localCount)
}
if _, ok := ownerPool.GetGrains()[cartID]; !ok {
t.Fatalf("expected owner %s to hold local grain", ownerPre)
}
// Remote proxies may not pre-exist; first remote mutation will trigger SpawnRemoteGrain lazily.
// Issue remote mutation from one non-owner -> ChangeQuantity (increase)
change := &messages.ChangeQuantity{
Id: 1,
Quantity: 3,
}
if _, err := remote1Synced.Apply(cartID, change); err != nil {
t.Fatalf("remote mutation (remote1) changeQuantity error: %v", err)
}
// Validate updated state visible via nodeC
stateC, err := remote2Synced.Get(cartID)
if err != nil {
t.Fatalf("nodeC Get error: %v", err)
}
if len(stateC.Items) != 1 || stateC.Items[0].Quantity != 3 {
t.Fatalf("nodeC observed state mismatch: items=%d qty=%d (expected 1 / 3)",
len(stateC.Items),
func() int {
if len(stateC.Items) == 0 {
return -1
}
return stateC.Items[0].Quantity
}(),
)
}
// Cross-check authoritative nodeA
stateA, err := syncedA.Get(cartID)
if err != nil {
t.Fatalf("nodeA Get error: %v", err)
}
if stateA.Items[0].Quantity != 3 {
t.Fatalf("nodeA authoritative state mismatch: expected qty=3 got %d", stateA.Items[0].Quantity)
}
}
// TestThreeNodeDiscoveryMajorityOwnership (placeholder)
// This test is a scaffold demonstrating how a MockDiscovery would be wired
// once AddRemote supports host:port (currently hard-coded to :1337).
// It is skipped to avoid flakiness / false negatives until the production
// AddRemote logic is enhanced to parse dynamic ports or the test harness
// provides consistent port mapping.
func TestThreeNodeDiscoveryMajorityOwnership(t *testing.T) {
t.Skip("Pending enhancement: AddRemote needs host:port support to fully exercise discovery-based multi-node linking")
// Example skeleton (non-functional with current AddRemote implementation):
//
// md := NewMockDiscovery([]string{"nodeB3", "nodeC3"})
// poolA := NewGrainLocalPool(1024, time.Minute, spawn)
// syncedA, err := NewSyncedPool(poolA, "nodeA3", md)
// if err != nil {
// t.Fatalf("NewSyncedPool with mock discovery error: %v", err)
// }
// // Start server for nodeA (would also need servers for nodeB3/nodeC3 on expected ports)
// // grpcSrvA, _ := StartGRPCServer(":1337", poolA, syncedA)
// // defer grpcSrvA.GracefulStop()
//
// // Dynamically add a host via discovery
// // md.AddHost("nodeB3")
// // time.Sleep(100 * time.Millisecond) // allow AddRemote attempt
//
// // Assertions would verify syncedA.remoteHosts contains "nodeB3"
}
// TestHostRemovalAndErrorWithMockDiscovery validates behavior when:
// 1. Discovery reports a host that cannot be dialed (AddRemote error path)
// 2. That host is then removed (Deleted event) without leaving residual state
// 3. A second failing host is added afterward (ensuring watcher still processes events)
//
// NOTE: Because AddRemote currently hard-codes :1337 and we are NOT starting a
// real server for the bogus hosts, the dial will fail and the remote host should
// never appear in remoteHosts. This intentionally exercises the error logging
// path: "AddRemote: dial ... failed".
func TestHostRemovalAndErrorWithMockDiscovery(t *testing.T) {
// Start a real node A (acts as the observing node)
const addrA = "127.0.0.1:18281"
hostA := "nodeA-md"
poolA := NewGrainLocalPool(128, time.Minute, spawn)
// Mock discovery starts with one bogus host that will fail to connect.
md := NewMockDiscovery([]string{"bogus-host-1"})
syncedA, err := NewSyncedPool(poolA, hostA, md)
if err != nil {
t.Fatalf("NewSyncedPool error: %v", err)
}
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
// Kick off watch processing by starting Watch() (NewSyncedPool does this internally
// when discovery is non-nil, but we ensure events channel is active).
// The initial bogus host should trigger AddRemote -> dial failure.
time.Sleep(300 * time.Millisecond)
syncedA.mu.RLock()
if len(syncedA.remoteHosts) != 0 {
syncedA.mu.RUnlock()
t.Fatalf("expected 0 remoteHosts after failing dial, got %d", len(syncedA.remoteHosts))
}
syncedA.mu.RUnlock()
// Remove the bogus host (should not panic; no entry to clean up).
md.RemoveHost("bogus-host-1")
time.Sleep(100 * time.Millisecond)
// Add another bogus host to ensure watcher still alive.
md.AddHost("bogus-host-2")
time.Sleep(300 * time.Millisecond)
syncedA.mu.RLock()
if len(syncedA.remoteHosts) != 0 {
syncedA.mu.RUnlock()
t.Fatalf("expected 0 remoteHosts after second failing dial, got %d", len(syncedA.remoteHosts))
}
syncedA.mu.RUnlock()
// Clean up discovery
md.Close()
}

View File

@@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"log" "log"
"math/rand"
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
@@ -25,12 +24,12 @@ func NewPoolServer(pool GrainPool, pod_name string) *PoolServer {
} }
} }
func (s *PoolServer) process(id CartId, mutation interface{}) (*messages.CartState, error) { func (s *PoolServer) process(id CartId, mutation interface{}) (*CartGrain, error) {
grain, err := s.pool.Apply(id, mutation) grain, err := s.pool.Apply(id, mutation)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ToCartState(grain), nil return grain, nil
} }
func (s *PoolServer) HandleGet(w http.ResponseWriter, r *http.Request, id CartId) error { func (s *PoolServer) HandleGet(w http.ResponseWriter, r *http.Request, id CartId) error {
@@ -39,7 +38,7 @@ func (s *PoolServer) HandleGet(w http.ResponseWriter, r *http.Request, id CartId
return err return err
} }
return s.WriteResult(w, ToCartState(grain)) return s.WriteResult(w, grain)
} }
func (s *PoolServer) HandleAddSku(w http.ResponseWriter, r *http.Request, id CartId) error { func (s *PoolServer) HandleAddSku(w http.ResponseWriter, r *http.Request, id CartId) error {
@@ -62,7 +61,7 @@ func ErrorHandler(fn func(w http.ResponseWriter, r *http.Request) error) func(w
} }
} }
func (s *PoolServer) WriteResult(w http.ResponseWriter, result *messages.CartState) error { func (s *PoolServer) WriteResult(w http.ResponseWriter, result *CartGrain) error {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Origin", "*")
@@ -266,73 +265,64 @@ func (s *PoolServer) HandleCheckout(w http.ResponseWriter, r *http.Request, id C
return json.NewEncoder(w).Encode(klarnaOrder) return json.NewEncoder(w).Encode(klarnaOrder)
} }
func NewCartId() CartId { /*
// Deprecated: legacy random/time based cart id generator. Legacy wrapper NewCartId removed.
// Retained for compatibility; new code should prefer canonical CartID path. Use the unified generator in cart_id.go:
cid, err := NewCartID() id, err := NewCartId()
if err != nil { or panic-on-error helper:
// Fallback to legacy method only if crypto/rand fails id := MustNewCartId()
id := time.Now().UnixNano() + rand.Int63() */
return ToCartId(fmt.Sprintf("%d", id))
func CookieCartIdHandler(fn func(cartId CartId, w http.ResponseWriter, r *http.Request) error) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
var id CartId
cookie, err := r.Cookie("cartid")
if err != nil || cookie.Value == "" {
id = MustNewCartId()
http.SetCookie(w, &http.Cookie{
Name: "cartid",
Value: id.String(),
Secure: r.TLS != nil,
HttpOnly: true,
Path: "/",
Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode,
})
w.Header().Set("Set-Cart-Id", id.String())
} else {
parsed, ok := ParseCartId(cookie.Value)
if !ok {
id = MustNewCartId()
http.SetCookie(w, &http.Cookie{
Name: "cartid",
Value: id.String(),
Secure: r.TLS != nil,
HttpOnly: true,
Path: "/",
Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode,
})
w.Header().Set("Set-Cart-Id", id.String())
} else {
id = parsed
}
}
// if ownershipProxyAfterExtraction != nil {
// if handled, err := ownershipProxyAfterExtraction(id, w, r); handled || err != nil {
// return err
// }
// }
return fn(id, w, r)
} }
return CartIDToLegacy(cid)
} }
func CookieCartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error { // Removed leftover legacy block after CookieCartIdHandler (obsolete code referencing cid/legacy)
return func(w http.ResponseWriter, r *http.Request) error {
// Extract / normalize cookie (preserve legacy textual IDs without rewriting).
var legacy CartId
cookies := r.CookiesNamed("cartid")
if len(cookies) == 0 {
// No cookie -> generate new canonical base62 id.
cid, generated, _, err := CanonicalizeOrLegacy("")
if err != nil {
return fmt.Errorf("failed to generate cart id: %w", err)
}
legacy = CartIDToLegacy(cid)
if generated {
http.SetCookie(w, &http.Cookie{
Name: "cartid",
Value: cid.String(),
Secure: r.TLS != nil,
HttpOnly: true,
Path: "/",
Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode,
})
w.Header().Set("Set-Cart-Id", cid.String())
}
} else {
raw := cookies[0].Value
cid, generated, wasBase62, err := CanonicalizeOrLegacy(raw)
if err != nil {
return fmt.Errorf("failed to canonicalize cart id: %w", err)
}
legacy = CartIDToLegacy(cid)
// Only set a new cookie if we actually generated a brand-new ID (empty input).
// For legacy (non-base62) ids we preserve the original text and do not overwrite.
if generated && wasBase62 {
http.SetCookie(w, &http.Cookie{
Name: "cartid",
Value: cid.String(),
Secure: r.TLS != nil,
HttpOnly: true,
Path: "/",
Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode,
})
w.Header().Set("Set-Cart-Id", cid.String())
}
}
return fn(w, r, legacy)
}
}
func (s *PoolServer) RemoveCartCookie(w http.ResponseWriter, r *http.Request, cartId CartId) error { func (s *PoolServer) RemoveCartCookie(w http.ResponseWriter, r *http.Request, cartId CartId) error {
cartId = NewCartId() // Clear cart cookie (breaking change: do not issue a new legacy id here)
http.SetCookie(w, &http.Cookie{ http.SetCookie(w, &http.Cookie{
Name: "cartid", Name: "cartid",
Value: cartId.String(), Value: "",
Path: "/", Path: "/",
Secure: r.TLS != nil, Secure: r.TLS != nil,
HttpOnly: true, HttpOnly: true,
@@ -343,26 +333,115 @@ func (s *PoolServer) RemoveCartCookie(w http.ResponseWriter, r *http.Request, ca
return nil return nil
} }
func CartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error { func CartIdHandler(fn func(cartId CartId, w http.ResponseWriter, r *http.Request) error) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error { return func(w http.ResponseWriter, r *http.Request) error {
raw := r.PathValue("id") raw := r.PathValue("id")
cid, generated, wasBase62, err := CanonicalizeOrLegacy(raw) // If no id supplied, generate a new one
if err != nil { if raw == "" {
return fmt.Errorf("invalid cart id: %w", err) id := MustNewCartId()
w.Header().Set("Set-Cart-Id", id.String())
return fn(id, w, r)
} }
legacy := CartIDToLegacy(cid) // Parse base62 cart id
// Only emit Set-Cart-Id header if we produced a brand-new canonical id id, ok := ParseCartId(raw)
// AND it is base62 (avoid rewriting legacy textual identifiers). if !ok {
if generated && wasBase62 { return fmt.Errorf("invalid cart id format")
w.Header().Set("Set-Cart-Id", cid.String())
} }
return fn(w, r, legacy)
return fn(id, w, r)
} }
} }
func (s *PoolServer) ProxyHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(cartId CartId, w http.ResponseWriter, r *http.Request) error {
return func(cartId CartId, w http.ResponseWriter, r *http.Request) error {
if ownerHost, ok := s.pool.OwnerHost(cartId); ok {
ok, err := ownerHost.Proxy(cartId, w, r)
if ok || err != nil {
log.Printf("proxy failed: %v", err)
// todo take ownership!!
} else {
return nil
}
}
// Local ownership or no owner known, proceed with local handling
return fn(w, r, cartId)
}
}
//var ownershipProxyAfterExtraction func(cartId CartId, w http.ResponseWriter, r *http.Request) (handled bool, err error)
func (s *PoolServer) Serve() *http.ServeMux { func (s *PoolServer) Serve() *http.ServeMux {
// // Install ownership proxy hook that runs AFTER id extraction (cookie OR path)
// ownershipProxyAfterExtraction = func(cartId CartId, w http.ResponseWriter, r *http.Request) (bool, error) {
// if cartId.String() == "" {
// return false, nil
// }
// owner := s.pool.OwnerHost(cartId)
// if owner == "" || owner == s.pool.Hostname() {
// // Set / refresh cartowner cookie pointing to the local host (claim or already owned).
// localHost := owner
// if localHost == "" {
// localHost = s.pool.Hostname()
// }
// http.SetCookie(w, &http.Cookie{
// Name: "cartowner",
// Value: localHost,
// Path: "/",
// HttpOnly: true,
// SameSite: http.SameSiteLaxMode,
// })
// return false, nil
// }
// // For remote ownership set cartowner cookie to remote host for sticky sessions.
// http.SetCookie(w, &http.Cookie{
// Name: "cartowner",
// Value: owner,
// Path: "/",
// HttpOnly: true,
// SameSite: http.SameSiteLaxMode,
// })
// // Proxy logic (simplified): reuse existing request to owning host on same port.
// target := "http://" + owner + r.URL.Path
// if q := r.URL.RawQuery; q != "" {
// target += "?" + q
// }
// req, err := http.NewRequestWithContext(r.Context(), r.Method, target, r.Body)
// if err != nil {
// http.Error(w, "proxy build error", http.StatusBadGateway)
// return true, err
// }
// for k, v := range r.Header {
// for _, vv := range v {
// req.Header.Add(k, vv)
// }
// }
// req.Header.Set("X-Forwarded-Host", r.Host)
// req.Header.Set("X-Cart-Id", cartId.String())
// req.Header.Set("X-Cart-Owner", owner)
// resp, err := http.DefaultClient.Do(req)
// if err != nil {
// http.Error(w, "proxy upstream error", http.StatusBadGateway)
// return true, err
// }
// defer resp.Body.Close()
// for k, v := range resp.Header {
// for _, vv := range v {
// w.Header().Add(k, vv)
// }
// }
// w.Header().Set("X-Cart-Owner-Routed", "true")
// w.WriteHeader(resp.StatusCode)
// _, copyErr := io.Copy(w, resp.Body)
// if copyErr != nil {
// return true, copyErr
// }
// return true, nil
// }
mux := http.NewServeMux() mux := http.NewServeMux()
//mux.HandleFunc("/", s.RewritePath)
mux.HandleFunc("OPTIONS /", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("OPTIONS /", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE") w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE")
@@ -370,29 +449,29 @@ func (s *PoolServer) Serve() *http.ServeMux {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
}) })
mux.HandleFunc("GET /", ErrorHandler(CookieCartIdHandler(s.HandleGet))) mux.HandleFunc("GET /", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleGet))))
mux.HandleFunc("GET /add/{sku}", ErrorHandler(CookieCartIdHandler(s.HandleAddSku))) mux.HandleFunc("GET /add/{sku}", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleAddSku))))
mux.HandleFunc("POST /", ErrorHandler(CookieCartIdHandler(s.HandleAddRequest))) mux.HandleFunc("POST /", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleAddRequest))))
mux.HandleFunc("POST /set", ErrorHandler(CookieCartIdHandler(s.HandleSetCartItems))) mux.HandleFunc("POST /set", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleSetCartItems))))
mux.HandleFunc("DELETE /{itemId}", ErrorHandler(CookieCartIdHandler(s.HandleDeleteItem))) mux.HandleFunc("DELETE /{itemId}", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleDeleteItem))))
mux.HandleFunc("PUT /", ErrorHandler(CookieCartIdHandler(s.HandleQuantityChange))) mux.HandleFunc("PUT /", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleQuantityChange))))
mux.HandleFunc("DELETE /", ErrorHandler(CookieCartIdHandler(s.RemoveCartCookie))) mux.HandleFunc("DELETE /", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.RemoveCartCookie))))
mux.HandleFunc("POST /delivery", ErrorHandler(CookieCartIdHandler(s.HandleSetDelivery))) mux.HandleFunc("POST /delivery", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleSetDelivery))))
mux.HandleFunc("DELETE /delivery/{deliveryId}", ErrorHandler(CookieCartIdHandler(s.HandleRemoveDelivery))) mux.HandleFunc("DELETE /delivery/{deliveryId}", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleRemoveDelivery))))
mux.HandleFunc("PUT /delivery/{deliveryId}/pickupPoint", ErrorHandler(CookieCartIdHandler(s.HandleSetPickupPoint))) mux.HandleFunc("PUT /delivery/{deliveryId}/pickupPoint", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleSetPickupPoint))))
mux.HandleFunc("GET /checkout", ErrorHandler(CookieCartIdHandler(s.HandleCheckout))) mux.HandleFunc("GET /checkout", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleCheckout))))
mux.HandleFunc("GET /confirmation/{orderId}", ErrorHandler(CookieCartIdHandler(s.HandleConfirmation))) mux.HandleFunc("GET /confirmation/{orderId}", ErrorHandler(CookieCartIdHandler(s.ProxyHandler(s.HandleConfirmation))))
mux.HandleFunc("GET /byid/{id}", ErrorHandler(CartIdHandler(s.HandleGet))) mux.HandleFunc("GET /byid/{id}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleGet))))
mux.HandleFunc("GET /byid/{id}/add/{sku}", ErrorHandler(CartIdHandler(s.HandleAddSku))) mux.HandleFunc("GET /byid/{id}/add/{sku}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleAddSku))))
mux.HandleFunc("POST /byid/{id}", ErrorHandler(CartIdHandler(s.HandleAddRequest))) mux.HandleFunc("POST /byid/{id}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleAddRequest))))
mux.HandleFunc("DELETE /byid/{id}/{itemId}", ErrorHandler(CartIdHandler(s.HandleDeleteItem))) mux.HandleFunc("DELETE /byid/{id}/{itemId}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleDeleteItem))))
mux.HandleFunc("PUT /byid/{id}", ErrorHandler(CartIdHandler(s.HandleQuantityChange))) mux.HandleFunc("PUT /byid/{id}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleQuantityChange))))
mux.HandleFunc("POST /byid/{id}/delivery", ErrorHandler(CartIdHandler(s.HandleSetDelivery))) mux.HandleFunc("POST /byid/{id}/delivery", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleSetDelivery))))
mux.HandleFunc("DELETE /byid/{id}/delivery/{deliveryId}", ErrorHandler(CartIdHandler(s.HandleRemoveDelivery))) mux.HandleFunc("DELETE /byid/{id}/delivery/{deliveryId}", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleRemoveDelivery))))
mux.HandleFunc("PUT /byid/{id}/delivery/{deliveryId}/pickupPoint", ErrorHandler(CartIdHandler(s.HandleSetPickupPoint))) mux.HandleFunc("PUT /byid/{id}/delivery/{deliveryId}/pickupPoint", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleSetPickupPoint))))
mux.HandleFunc("GET /byid/{id}/checkout", ErrorHandler(CartIdHandler(s.HandleCheckout))) mux.HandleFunc("GET /byid/{id}/checkout", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleCheckout))))
mux.HandleFunc("GET /byid/{id}/confirmation", ErrorHandler(CartIdHandler(s.HandleConfirmation))) mux.HandleFunc("GET /byid/{id}/confirmation", ErrorHandler(CartIdHandler(s.ProxyHandler(s.HandleConfirmation))))
return mux return mux
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,187 +0,0 @@
syntax = "proto3";
package messages;
option go_package = "git.tornberg.me/go-cart-actor/proto;messages";
import "messages.proto";
// -----------------------------------------------------------------------------
// Cart Actor gRPC API (Breaking v2 - Per-Mutation RPCs)
// -----------------------------------------------------------------------------
// This version removes the previous MutationEnvelope + Mutate RPC.
// Each mutation now has its own request wrapper and dedicated RPC method
// providing simpler, type-focused client stubs and enabling per-mutation
// metrics, auth and rate limiting.
//
// Regenerate Go code after editing:
// protoc --go_out=. --go_opt=paths=source_relative \
// --go-grpc_out=. --go-grpc_opt=paths=source_relative \
// proto/cart_actor.proto proto/messages.proto
//
// Backward compatibility: This is a breaking change (old clients must update).
// -----------------------------------------------------------------------------
// Shared reply for all mutation RPCs.
message CartMutationReply {
int32 status_code = 1; // HTTP-like status (200 success, 4xx client, 5xx server)
oneof result {
CartState state = 2; // Updated cart state on success
string error = 3; // Error message on failure
}
int64 server_timestamp = 4; // Server-assigned Unix timestamp (optional auditing)
}
// Fetch current cart state without mutation.
message StateRequest {
string cart_id = 1;
}
message StateReply {
int32 status_code = 1;
oneof result {
CartState state = 2;
string error = 3;
}
}
// Per-mutation request wrappers. We wrap the existing inner mutation
// messages (defined in messages.proto) to add cart_id + optional metadata
// without altering the inner message definitions.
message AddRequestRequest {
string cart_id = 1;
int64 client_timestamp = 2;
AddRequest payload = 10;
}
message AddItemRequest {
string cart_id = 1;
int64 client_timestamp = 2;
AddItem payload = 10;
}
message RemoveItemRequest {
string cart_id = 1;
int64 client_timestamp = 2;
RemoveItem payload = 10;
}
message RemoveDeliveryRequest {
string cart_id = 1;
int64 client_timestamp = 2;
RemoveDelivery payload = 10;
}
message ChangeQuantityRequest {
string cart_id = 1;
int64 client_timestamp = 2;
ChangeQuantity payload = 10;
}
message SetDeliveryRequest {
string cart_id = 1;
int64 client_timestamp = 2;
SetDelivery payload = 10;
}
message SetPickupPointRequest {
string cart_id = 1;
int64 client_timestamp = 2;
SetPickupPoint payload = 10;
}
message CreateCheckoutOrderRequest {
string cart_id = 1;
int64 client_timestamp = 2;
CreateCheckoutOrder payload = 10;
}
message SetCartItemsRequest {
string cart_id = 1;
int64 client_timestamp = 2;
SetCartRequest payload = 10;
}
message OrderCompletedRequest {
string cart_id = 1;
int64 client_timestamp = 2;
OrderCreated payload = 10;
}
// Excerpt: updated messages for camelCase JSON output
message CartState {
string id = 1; // was cart_id
repeated CartItemState items = 2;
int64 totalPrice = 3; // was total_price
int64 totalTax = 4; // was total_tax
int64 totalDiscount = 5; // was total_discount
repeated DeliveryState deliveries = 6;
bool paymentInProgress = 7; // was payment_in_progress
string orderReference = 8; // was order_reference
string paymentStatus = 9; // was payment_status
bool processing = 10; // NEW (mirrors legacy CartGrain.processing)
}
message CartItemState {
int64 id = 1;
int64 itemId = 2; // was source_item_id
string sku = 3;
string name = 4;
int64 price = 5; // was unit_price
int32 qty = 6; // was quantity
int64 totalPrice = 7; // was total_price
int64 totalTax = 8; // was total_tax
int64 orgPrice = 9; // was org_price
int32 taxRate = 10; // was tax_rate
int64 totalDiscount = 11;
string brand = 12;
string category = 13;
string category2 = 14;
string category3 = 15;
string category4 = 16;
string category5 = 17;
string image = 18;
string type = 19; // was article_type
string sellerId = 20; // was seller_id
string sellerName = 21; // was seller_name
string disclaimer = 22;
string outlet = 23;
string storeId = 24; // was store_id
int32 stock = 25;
}
message DeliveryState {
int64 id = 1;
string provider = 2;
int64 price = 3;
repeated int64 items = 4; // was item_ids
PickupPoint pickupPoint = 5; // was pickup_point
}
// (CheckoutRequest / CheckoutReply removed - checkout handled at HTTP layer)
// -----------------------------------------------------------------------------
// Service definition (per-mutation RPCs + checkout)
// -----------------------------------------------------------------------------
service CartActor {
rpc AddRequest(AddRequestRequest) returns (CartMutationReply);
rpc AddItem(AddItemRequest) returns (CartMutationReply);
rpc RemoveItem(RemoveItemRequest) returns (CartMutationReply);
rpc RemoveDelivery(RemoveDeliveryRequest) returns (CartMutationReply);
rpc ChangeQuantity(ChangeQuantityRequest) returns (CartMutationReply);
rpc SetDelivery(SetDeliveryRequest) returns (CartMutationReply);
rpc SetPickupPoint(SetPickupPointRequest) returns (CartMutationReply);
// (Checkout RPC removed - handled externally)
rpc SetCartItems(SetCartItemsRequest) returns (CartMutationReply);
rpc OrderCompleted(OrderCompletedRequest) returns (CartMutationReply);
rpc GetState(StateRequest) returns (StateReply);
}
// -----------------------------------------------------------------------------
// Future enhancements:
// * BatchMutate RPC (repeated heterogeneous mutations)
// * Streaming state updates (WatchState)
// * Versioning / optimistic concurrency control
// -----------------------------------------------------------------------------

View File

@@ -1,473 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.21.12
// source: cart_actor.proto
package messages
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
CartActor_AddRequest_FullMethodName = "/messages.CartActor/AddRequest"
CartActor_AddItem_FullMethodName = "/messages.CartActor/AddItem"
CartActor_RemoveItem_FullMethodName = "/messages.CartActor/RemoveItem"
CartActor_RemoveDelivery_FullMethodName = "/messages.CartActor/RemoveDelivery"
CartActor_ChangeQuantity_FullMethodName = "/messages.CartActor/ChangeQuantity"
CartActor_SetDelivery_FullMethodName = "/messages.CartActor/SetDelivery"
CartActor_SetPickupPoint_FullMethodName = "/messages.CartActor/SetPickupPoint"
CartActor_SetCartItems_FullMethodName = "/messages.CartActor/SetCartItems"
CartActor_OrderCompleted_FullMethodName = "/messages.CartActor/OrderCompleted"
CartActor_GetState_FullMethodName = "/messages.CartActor/GetState"
)
// CartActorClient is the client API for CartActor service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// -----------------------------------------------------------------------------
// Service definition (per-mutation RPCs + checkout)
// -----------------------------------------------------------------------------
type CartActorClient interface {
AddRequest(ctx context.Context, in *AddRequestRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
AddItem(ctx context.Context, in *AddItemRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
RemoveItem(ctx context.Context, in *RemoveItemRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
RemoveDelivery(ctx context.Context, in *RemoveDeliveryRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
ChangeQuantity(ctx context.Context, in *ChangeQuantityRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
SetDelivery(ctx context.Context, in *SetDeliveryRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
SetPickupPoint(ctx context.Context, in *SetPickupPointRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
// (Checkout RPC removed - handled externally)
SetCartItems(ctx context.Context, in *SetCartItemsRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
OrderCompleted(ctx context.Context, in *OrderCompletedRequest, opts ...grpc.CallOption) (*CartMutationReply, error)
GetState(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateReply, error)
}
type cartActorClient struct {
cc grpc.ClientConnInterface
}
func NewCartActorClient(cc grpc.ClientConnInterface) CartActorClient {
return &cartActorClient{cc}
}
func (c *cartActorClient) AddRequest(ctx context.Context, in *AddRequestRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_AddRequest_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) AddItem(ctx context.Context, in *AddItemRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_AddItem_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) RemoveItem(ctx context.Context, in *RemoveItemRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_RemoveItem_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) RemoveDelivery(ctx context.Context, in *RemoveDeliveryRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_RemoveDelivery_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) ChangeQuantity(ctx context.Context, in *ChangeQuantityRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_ChangeQuantity_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) SetDelivery(ctx context.Context, in *SetDeliveryRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_SetDelivery_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) SetPickupPoint(ctx context.Context, in *SetPickupPointRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_SetPickupPoint_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) SetCartItems(ctx context.Context, in *SetCartItemsRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_SetCartItems_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) OrderCompleted(ctx context.Context, in *OrderCompletedRequest, opts ...grpc.CallOption) (*CartMutationReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CartMutationReply)
err := c.cc.Invoke(ctx, CartActor_OrderCompleted_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cartActorClient) GetState(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateReply, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(StateReply)
err := c.cc.Invoke(ctx, CartActor_GetState_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// CartActorServer is the server API for CartActor service.
// All implementations must embed UnimplementedCartActorServer
// for forward compatibility.
//
// -----------------------------------------------------------------------------
// Service definition (per-mutation RPCs + checkout)
// -----------------------------------------------------------------------------
type CartActorServer interface {
AddRequest(context.Context, *AddRequestRequest) (*CartMutationReply, error)
AddItem(context.Context, *AddItemRequest) (*CartMutationReply, error)
RemoveItem(context.Context, *RemoveItemRequest) (*CartMutationReply, error)
RemoveDelivery(context.Context, *RemoveDeliveryRequest) (*CartMutationReply, error)
ChangeQuantity(context.Context, *ChangeQuantityRequest) (*CartMutationReply, error)
SetDelivery(context.Context, *SetDeliveryRequest) (*CartMutationReply, error)
SetPickupPoint(context.Context, *SetPickupPointRequest) (*CartMutationReply, error)
// (Checkout RPC removed - handled externally)
SetCartItems(context.Context, *SetCartItemsRequest) (*CartMutationReply, error)
OrderCompleted(context.Context, *OrderCompletedRequest) (*CartMutationReply, error)
GetState(context.Context, *StateRequest) (*StateReply, error)
mustEmbedUnimplementedCartActorServer()
}
// UnimplementedCartActorServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedCartActorServer struct{}
func (UnimplementedCartActorServer) AddRequest(context.Context, *AddRequestRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddRequest not implemented")
}
func (UnimplementedCartActorServer) AddItem(context.Context, *AddItemRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddItem not implemented")
}
func (UnimplementedCartActorServer) RemoveItem(context.Context, *RemoveItemRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemoveItem not implemented")
}
func (UnimplementedCartActorServer) RemoveDelivery(context.Context, *RemoveDeliveryRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemoveDelivery not implemented")
}
func (UnimplementedCartActorServer) ChangeQuantity(context.Context, *ChangeQuantityRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method ChangeQuantity not implemented")
}
func (UnimplementedCartActorServer) SetDelivery(context.Context, *SetDeliveryRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetDelivery not implemented")
}
func (UnimplementedCartActorServer) SetPickupPoint(context.Context, *SetPickupPointRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetPickupPoint not implemented")
}
func (UnimplementedCartActorServer) SetCartItems(context.Context, *SetCartItemsRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetCartItems not implemented")
}
func (UnimplementedCartActorServer) OrderCompleted(context.Context, *OrderCompletedRequest) (*CartMutationReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method OrderCompleted not implemented")
}
func (UnimplementedCartActorServer) GetState(context.Context, *StateRequest) (*StateReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetState not implemented")
}
func (UnimplementedCartActorServer) mustEmbedUnimplementedCartActorServer() {}
func (UnimplementedCartActorServer) testEmbeddedByValue() {}
// UnsafeCartActorServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to CartActorServer will
// result in compilation errors.
type UnsafeCartActorServer interface {
mustEmbedUnimplementedCartActorServer()
}
func RegisterCartActorServer(s grpc.ServiceRegistrar, srv CartActorServer) {
// If the following call pancis, it indicates UnimplementedCartActorServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&CartActor_ServiceDesc, srv)
}
func _CartActor_AddRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AddRequestRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).AddRequest(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_AddRequest_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).AddRequest(ctx, req.(*AddRequestRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_AddItem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AddItemRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).AddItem(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_AddItem_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).AddItem(ctx, req.(*AddItemRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_RemoveItem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RemoveItemRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).RemoveItem(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_RemoveItem_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).RemoveItem(ctx, req.(*RemoveItemRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_RemoveDelivery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RemoveDeliveryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).RemoveDelivery(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_RemoveDelivery_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).RemoveDelivery(ctx, req.(*RemoveDeliveryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_ChangeQuantity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ChangeQuantityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).ChangeQuantity(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_ChangeQuantity_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).ChangeQuantity(ctx, req.(*ChangeQuantityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_SetDelivery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetDeliveryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).SetDelivery(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_SetDelivery_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).SetDelivery(ctx, req.(*SetDeliveryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_SetPickupPoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetPickupPointRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).SetPickupPoint(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_SetPickupPoint_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).SetPickupPoint(ctx, req.(*SetPickupPointRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_SetCartItems_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetCartItemsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).SetCartItems(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_SetCartItems_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).SetCartItems(ctx, req.(*SetCartItemsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_OrderCompleted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OrderCompletedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).OrderCompleted(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_OrderCompleted_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).OrderCompleted(ctx, req.(*OrderCompletedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _CartActor_GetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CartActorServer).GetState(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: CartActor_GetState_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CartActorServer).GetState(ctx, req.(*StateRequest))
}
return interceptor(ctx, in, info, handler)
}
// CartActor_ServiceDesc is the grpc.ServiceDesc for CartActor service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var CartActor_ServiceDesc = grpc.ServiceDesc{
ServiceName: "messages.CartActor",
HandlerType: (*CartActorServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "AddRequest",
Handler: _CartActor_AddRequest_Handler,
},
{
MethodName: "AddItem",
Handler: _CartActor_AddItem_Handler,
},
{
MethodName: "RemoveItem",
Handler: _CartActor_RemoveItem_Handler,
},
{
MethodName: "RemoveDelivery",
Handler: _CartActor_RemoveDelivery_Handler,
},
{
MethodName: "ChangeQuantity",
Handler: _CartActor_ChangeQuantity_Handler,
},
{
MethodName: "SetDelivery",
Handler: _CartActor_SetDelivery_Handler,
},
{
MethodName: "SetPickupPoint",
Handler: _CartActor_SetPickupPoint_Handler,
},
{
MethodName: "SetCartItems",
Handler: _CartActor_SetCartItems_Handler,
},
{
MethodName: "OrderCompleted",
Handler: _CartActor_OrderCompleted_Handler,
},
{
MethodName: "GetState",
Handler: _CartActor_GetState_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "cart_actor.proto",
}

View File

@@ -204,7 +204,7 @@ func (x *NegotiateReply) GetHosts() []string {
// CartIdsReply returns the list of cart IDs (string form) currently owned locally. // CartIdsReply returns the list of cart IDs (string form) currently owned locally.
type CartIdsReply struct { type CartIdsReply struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
CartIds []string `protobuf:"bytes,1,rep,name=cart_ids,json=cartIds,proto3" json:"cart_ids,omitempty"` CartIds []uint64 `protobuf:"varint,1,rep,packed,name=cart_ids,json=cartIds,proto3" json:"cart_ids,omitempty"`
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -239,7 +239,7 @@ func (*CartIdsReply) Descriptor() ([]byte, []int) {
return file_control_plane_proto_rawDescGZIP(), []int{4} return file_control_plane_proto_rawDescGZIP(), []int{4}
} }
func (x *CartIdsReply) GetCartIds() []string { func (x *CartIdsReply) GetCartIds() []uint64 {
if x != nil { if x != nil {
return x.CartIds return x.CartIds
} }
@@ -344,6 +344,60 @@ func (x *ClosingNotice) GetHost() string {
return "" return ""
} }
// OwnershipAnnounce broadcasts first-touch ownership claims for cart IDs.
// First claim wins; receivers SHOULD NOT overwrite an existing different owner.
type OwnershipAnnounce struct {
state protoimpl.MessageState `protogen:"open.v1"`
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // announcing host
CartIds []uint64 `protobuf:"varint,2,rep,packed,name=cart_ids,json=cartIds,proto3" json:"cart_ids,omitempty"` // newly claimed cart ids
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OwnershipAnnounce) Reset() {
*x = OwnershipAnnounce{}
mi := &file_control_plane_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OwnershipAnnounce) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OwnershipAnnounce) ProtoMessage() {}
func (x *OwnershipAnnounce) ProtoReflect() protoreflect.Message {
mi := &file_control_plane_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OwnershipAnnounce.ProtoReflect.Descriptor instead.
func (*OwnershipAnnounce) Descriptor() ([]byte, []int) {
return file_control_plane_proto_rawDescGZIP(), []int{7}
}
func (x *OwnershipAnnounce) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *OwnershipAnnounce) GetCartIds() []uint64 {
if x != nil {
return x.CartIds
}
return nil
}
var File_control_plane_proto protoreflect.FileDescriptor var File_control_plane_proto protoreflect.FileDescriptor
const file_control_plane_proto_rawDesc = "" + const file_control_plane_proto_rawDesc = "" +
@@ -359,17 +413,21 @@ const file_control_plane_proto_rawDesc = "" +
"\x0eNegotiateReply\x12\x14\n" + "\x0eNegotiateReply\x12\x14\n" +
"\x05hosts\x18\x01 \x03(\tR\x05hosts\")\n" + "\x05hosts\x18\x01 \x03(\tR\x05hosts\")\n" +
"\fCartIdsReply\x12\x19\n" + "\fCartIdsReply\x12\x19\n" +
"\bcart_ids\x18\x01 \x03(\tR\acartIds\"F\n" + "\bcart_ids\x18\x01 \x03(\x04R\acartIds\"F\n" +
"\x0eOwnerChangeAck\x12\x1a\n" + "\x0eOwnerChangeAck\x12\x1a\n" +
"\baccepted\x18\x01 \x01(\bR\baccepted\x12\x18\n" + "\baccepted\x18\x01 \x01(\bR\baccepted\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\"#\n" + "\amessage\x18\x02 \x01(\tR\amessage\"#\n" +
"\rClosingNotice\x12\x12\n" + "\rClosingNotice\x12\x12\n" +
"\x04host\x18\x01 \x01(\tR\x04host2\xf4\x01\n" + "\x04host\x18\x01 \x01(\tR\x04host\"B\n" +
"\x11OwnershipAnnounce\x12\x12\n" +
"\x04host\x18\x01 \x01(\tR\x04host\x12\x19\n" +
"\bcart_ids\x18\x02 \x03(\x04R\acartIds2\xc0\x02\n" +
"\fControlPlane\x12,\n" + "\fControlPlane\x12,\n" +
"\x04Ping\x12\x0f.messages.Empty\x1a\x13.messages.PingReply\x12A\n" + "\x04Ping\x12\x0f.messages.Empty\x1a\x13.messages.PingReply\x12A\n" +
"\tNegotiate\x12\x1a.messages.NegotiateRequest\x1a\x18.messages.NegotiateReply\x125\n" + "\tNegotiate\x12\x1a.messages.NegotiateRequest\x1a\x18.messages.NegotiateReply\x125\n" +
"\n" + "\n" +
"GetCartIds\x12\x0f.messages.Empty\x1a\x16.messages.CartIdsReply\x12<\n" + "GetCartIds\x12\x0f.messages.Empty\x1a\x16.messages.CartIdsReply\x12J\n" +
"\x11AnnounceOwnership\x12\x1b.messages.OwnershipAnnounce\x1a\x18.messages.OwnerChangeAck\x12<\n" +
"\aClosing\x12\x17.messages.ClosingNotice\x1a\x18.messages.OwnerChangeAckB.Z,git.tornberg.me/go-cart-actor/proto;messagesb\x06proto3" "\aClosing\x12\x17.messages.ClosingNotice\x1a\x18.messages.OwnerChangeAckB.Z,git.tornberg.me/go-cart-actor/proto;messagesb\x06proto3"
var ( var (
@@ -384,7 +442,7 @@ func file_control_plane_proto_rawDescGZIP() []byte {
return file_control_plane_proto_rawDescData return file_control_plane_proto_rawDescData
} }
var file_control_plane_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_control_plane_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_control_plane_proto_goTypes = []any{ var file_control_plane_proto_goTypes = []any{
(*Empty)(nil), // 0: messages.Empty (*Empty)(nil), // 0: messages.Empty
(*PingReply)(nil), // 1: messages.PingReply (*PingReply)(nil), // 1: messages.PingReply
@@ -393,18 +451,21 @@ var file_control_plane_proto_goTypes = []any{
(*CartIdsReply)(nil), // 4: messages.CartIdsReply (*CartIdsReply)(nil), // 4: messages.CartIdsReply
(*OwnerChangeAck)(nil), // 5: messages.OwnerChangeAck (*OwnerChangeAck)(nil), // 5: messages.OwnerChangeAck
(*ClosingNotice)(nil), // 6: messages.ClosingNotice (*ClosingNotice)(nil), // 6: messages.ClosingNotice
(*OwnershipAnnounce)(nil), // 7: messages.OwnershipAnnounce
} }
var file_control_plane_proto_depIdxs = []int32{ var file_control_plane_proto_depIdxs = []int32{
0, // 0: messages.ControlPlane.Ping:input_type -> messages.Empty 0, // 0: messages.ControlPlane.Ping:input_type -> messages.Empty
2, // 1: messages.ControlPlane.Negotiate:input_type -> messages.NegotiateRequest 2, // 1: messages.ControlPlane.Negotiate:input_type -> messages.NegotiateRequest
0, // 2: messages.ControlPlane.GetCartIds:input_type -> messages.Empty 0, // 2: messages.ControlPlane.GetCartIds:input_type -> messages.Empty
6, // 3: messages.ControlPlane.Closing:input_type -> messages.ClosingNotice 7, // 3: messages.ControlPlane.AnnounceOwnership:input_type -> messages.OwnershipAnnounce
1, // 4: messages.ControlPlane.Ping:output_type -> messages.PingReply 6, // 4: messages.ControlPlane.Closing:input_type -> messages.ClosingNotice
3, // 5: messages.ControlPlane.Negotiate:output_type -> messages.NegotiateReply 1, // 5: messages.ControlPlane.Ping:output_type -> messages.PingReply
4, // 6: messages.ControlPlane.GetCartIds:output_type -> messages.CartIdsReply 3, // 6: messages.ControlPlane.Negotiate:output_type -> messages.NegotiateReply
5, // 7: messages.ControlPlane.Closing:output_type -> messages.OwnerChangeAck 4, // 7: messages.ControlPlane.GetCartIds:output_type -> messages.CartIdsReply
4, // [4:8] is the sub-list for method output_type 5, // 8: messages.ControlPlane.AnnounceOwnership:output_type -> messages.OwnerChangeAck
0, // [0:4] is the sub-list for method input_type 5, // 9: messages.ControlPlane.Closing:output_type -> messages.OwnerChangeAck
5, // [5:10] is the sub-list for method output_type
0, // [0:5] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name 0, // [0:0] is the sub-list for field type_name
@@ -421,7 +482,7 @@ func file_control_plane_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_control_plane_proto_rawDesc), len(file_control_plane_proto_rawDesc)), RawDescriptor: unsafe.Slice(unsafe.StringData(file_control_plane_proto_rawDesc), len(file_control_plane_proto_rawDesc)),
NumEnums: 0, NumEnums: 0,
NumMessages: 7, NumMessages: 8,
NumExtensions: 0, NumExtensions: 0,
NumServices: 1, NumServices: 1,
}, },

View File

@@ -38,7 +38,7 @@ message NegotiateReply {
// CartIdsReply returns the list of cart IDs (string form) currently owned locally. // CartIdsReply returns the list of cart IDs (string form) currently owned locally.
message CartIdsReply { message CartIdsReply {
repeated string cart_ids = 1; repeated uint64 cart_ids = 1;
} }
// OwnerChangeAck retained as response type for Closing RPC (ConfirmOwner removed). // OwnerChangeAck retained as response type for Closing RPC (ConfirmOwner removed).
@@ -52,6 +52,13 @@ message ClosingNotice {
string host = 1; string host = 1;
} }
// OwnershipAnnounce broadcasts first-touch ownership claims for cart IDs.
// First claim wins; receivers SHOULD NOT overwrite an existing different owner.
message OwnershipAnnounce {
string host = 1; // announcing host
repeated uint64 cart_ids = 2; // newly claimed cart ids
}
// ControlPlane defines cluster coordination and ownership operations. // ControlPlane defines cluster coordination and ownership operations.
service ControlPlane { service ControlPlane {
// Ping for liveness; lightweight health signal. // Ping for liveness; lightweight health signal.
@@ -65,6 +72,9 @@ service ControlPlane {
// ConfirmOwner RPC removed (was legacy ownership acknowledgement; ring-based ownership now authoritative) // ConfirmOwner RPC removed (was legacy ownership acknowledgement; ring-based ownership now authoritative)
// Ownership announcement: first-touch claim broadcast (idempotent; best-effort).
rpc AnnounceOwnership(OwnershipAnnounce) returns (OwnerChangeAck);
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
rpc Closing(ClosingNotice) returns (OwnerChangeAck); rpc Closing(ClosingNotice) returns (OwnerChangeAck);
} }

View File

@@ -22,6 +22,7 @@ const (
ControlPlane_Ping_FullMethodName = "/messages.ControlPlane/Ping" ControlPlane_Ping_FullMethodName = "/messages.ControlPlane/Ping"
ControlPlane_Negotiate_FullMethodName = "/messages.ControlPlane/Negotiate" ControlPlane_Negotiate_FullMethodName = "/messages.ControlPlane/Negotiate"
ControlPlane_GetCartIds_FullMethodName = "/messages.ControlPlane/GetCartIds" ControlPlane_GetCartIds_FullMethodName = "/messages.ControlPlane/GetCartIds"
ControlPlane_AnnounceOwnership_FullMethodName = "/messages.ControlPlane/AnnounceOwnership"
ControlPlane_Closing_FullMethodName = "/messages.ControlPlane/Closing" ControlPlane_Closing_FullMethodName = "/messages.ControlPlane/Closing"
) )
@@ -37,6 +38,8 @@ type ControlPlaneClient interface {
Negotiate(ctx context.Context, in *NegotiateRequest, opts ...grpc.CallOption) (*NegotiateReply, error) Negotiate(ctx context.Context, in *NegotiateRequest, opts ...grpc.CallOption) (*NegotiateReply, error)
// GetCartIds lists currently owned cart IDs on this node. // GetCartIds lists currently owned cart IDs on this node.
GetCartIds(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CartIdsReply, error) GetCartIds(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CartIdsReply, error)
// Ownership announcement: first-touch claim broadcast (idempotent; best-effort).
AnnounceOwnership(ctx context.Context, in *OwnershipAnnounce, opts ...grpc.CallOption) (*OwnerChangeAck, error)
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error)
} }
@@ -79,6 +82,16 @@ func (c *controlPlaneClient) GetCartIds(ctx context.Context, in *Empty, opts ...
return out, nil return out, nil
} }
func (c *controlPlaneClient) AnnounceOwnership(ctx context.Context, in *OwnershipAnnounce, opts ...grpc.CallOption) (*OwnerChangeAck, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(OwnerChangeAck)
err := c.cc.Invoke(ctx, ControlPlane_AnnounceOwnership_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controlPlaneClient) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) { func (c *controlPlaneClient) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(OwnerChangeAck) out := new(OwnerChangeAck)
@@ -101,6 +114,8 @@ type ControlPlaneServer interface {
Negotiate(context.Context, *NegotiateRequest) (*NegotiateReply, error) Negotiate(context.Context, *NegotiateRequest) (*NegotiateReply, error)
// GetCartIds lists currently owned cart IDs on this node. // GetCartIds lists currently owned cart IDs on this node.
GetCartIds(context.Context, *Empty) (*CartIdsReply, error) GetCartIds(context.Context, *Empty) (*CartIdsReply, error)
// Ownership announcement: first-touch claim broadcast (idempotent; best-effort).
AnnounceOwnership(context.Context, *OwnershipAnnounce) (*OwnerChangeAck, error)
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error)
mustEmbedUnimplementedControlPlaneServer() mustEmbedUnimplementedControlPlaneServer()
@@ -122,6 +137,9 @@ func (UnimplementedControlPlaneServer) Negotiate(context.Context, *NegotiateRequ
func (UnimplementedControlPlaneServer) GetCartIds(context.Context, *Empty) (*CartIdsReply, error) { func (UnimplementedControlPlaneServer) GetCartIds(context.Context, *Empty) (*CartIdsReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetCartIds not implemented") return nil, status.Errorf(codes.Unimplemented, "method GetCartIds not implemented")
} }
func (UnimplementedControlPlaneServer) AnnounceOwnership(context.Context, *OwnershipAnnounce) (*OwnerChangeAck, error) {
return nil, status.Errorf(codes.Unimplemented, "method AnnounceOwnership not implemented")
}
func (UnimplementedControlPlaneServer) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) { func (UnimplementedControlPlaneServer) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) {
return nil, status.Errorf(codes.Unimplemented, "method Closing not implemented") return nil, status.Errorf(codes.Unimplemented, "method Closing not implemented")
} }
@@ -200,6 +218,24 @@ func _ControlPlane_GetCartIds_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _ControlPlane_AnnounceOwnership_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OwnershipAnnounce)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControlPlaneServer).AnnounceOwnership(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ControlPlane_AnnounceOwnership_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlPlaneServer).AnnounceOwnership(ctx, req.(*OwnershipAnnounce))
}
return interceptor(ctx, in, info, handler)
}
func _ControlPlane_Closing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _ControlPlane_Closing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ClosingNotice) in := new(ClosingNotice)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@@ -237,6 +273,10 @@ var ControlPlane_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetCartIds", MethodName: "GetCartIds",
Handler: _ControlPlane_GetCartIds_Handler, Handler: _ControlPlane_GetCartIds_Handler,
}, },
{
MethodName: "AnnounceOwnership",
Handler: _ControlPlane_AnnounceOwnership_Handler,
},
{ {
MethodName: "Closing", MethodName: "Closing",
Handler: _ControlPlane_Closing_Handler, Handler: _ControlPlane_Closing_Handler,

View File

@@ -1,341 +0,0 @@
package main
import (
"context"
"fmt"
"log"
"time"
proto "git.tornberg.me/go-cart-actor/proto" // generated package name is 'messages'; aliased as proto for consistency
"google.golang.org/grpc"
)
// RemoteGrainGRPC is the gRPC-backed implementation of a remote grain.
// It mirrors the previous RemoteGrain (TCP/frame based) while using the
// new CartActor gRPC service. It implements the Grain interface so that
// SyncedPool can remain largely unchanged when swapping transport layers.
type RemoteGrainGRPC struct {
Id CartId
Host string
client proto.CartActorClient
// Optional: keep the underlying conn so higher-level code can close if needed
conn *grpc.ClientConn
// Per-call timeout settings (tunable)
mutateTimeout time.Duration
stateTimeout time.Duration
}
// NewRemoteGrainGRPC constructs a remote grain adapter from an existing gRPC client.
func NewRemoteGrainGRPC(id CartId, host string, client proto.CartActorClient) *RemoteGrainGRPC {
return &RemoteGrainGRPC{
Id: id,
Host: host,
client: client,
mutateTimeout: 800 * time.Millisecond,
stateTimeout: 400 * time.Millisecond,
}
}
// NewRemoteGrainGRPCWithConn dials the target and creates the gRPC client.
// target should be host:port (where the CartActor service is exposed).
func NewRemoteGrainGRPCWithConn(id CartId, host string, target string, dialOpts ...grpc.DialOption) (*RemoteGrainGRPC, error) {
// NOTE: insecure for initial migration; should be replaced with TLS later.
baseOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()}
baseOpts = append(baseOpts, dialOpts...)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, target, baseOpts...)
if err != nil {
return nil, err
}
client := proto.NewCartActorClient(conn)
return &RemoteGrainGRPC{
Id: id,
Host: host,
client: client,
conn: conn,
mutateTimeout: 800 * time.Millisecond,
stateTimeout: 400 * time.Millisecond,
}, nil
}
func (g *RemoteGrainGRPC) GetId() CartId {
return g.Id
}
// Apply executes a cart mutation via per-mutation RPCs (breaking v2 API)
// and returns a *CartGrain reconstructed from the CartMutationReply state.
func (g *RemoteGrainGRPC) Apply(content interface{}, isReplay bool) (*CartGrain, error) {
if isReplay {
return nil, fmt.Errorf("replay not supported for remote grains")
}
if content == nil {
return nil, fmt.Errorf("nil mutation content")
}
ts := time.Now().Unix()
var invoke func(ctx context.Context) (*proto.CartMutationReply, error)
switch m := content.(type) {
case *proto.AddRequest:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.AddRequest(ctx, &proto.AddRequestRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.AddItem:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.AddItem(ctx, &proto.AddItemRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.RemoveItem:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.RemoveItem(ctx, &proto.RemoveItemRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.RemoveDelivery:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.RemoveDelivery(ctx, &proto.RemoveDeliveryRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.ChangeQuantity:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.ChangeQuantity(ctx, &proto.ChangeQuantityRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.SetDelivery:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.SetDelivery(ctx, &proto.SetDeliveryRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.SetPickupPoint:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.SetPickupPoint(ctx, &proto.SetPickupPointRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.CreateCheckoutOrder:
return nil, fmt.Errorf("CreateCheckoutOrder deprecated: checkout is handled via HTTP endpoint (HandleCheckout)")
case *proto.SetCartRequest:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.SetCartItems(ctx, &proto.SetCartItemsRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
case *proto.OrderCreated:
invoke = func(ctx context.Context) (*proto.CartMutationReply, error) {
return g.client.OrderCompleted(ctx, &proto.OrderCompletedRequest{
CartId: g.Id.String(),
ClientTimestamp: ts,
Payload: m,
})
}
default:
return nil, fmt.Errorf("unsupported mutation type %T", content)
}
if invoke == nil {
return nil, fmt.Errorf("no invocation mapped for mutation %T", content)
}
ctx, cancel := context.WithTimeout(context.Background(), g.mutateTimeout)
defer cancel()
resp, err := invoke(ctx)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
if e := resp.GetError(); e != "" {
return nil, fmt.Errorf("remote mutation failed %d: %s", resp.StatusCode, e)
}
return nil, fmt.Errorf("remote mutation failed %d", resp.StatusCode)
}
state := resp.GetState()
if state == nil {
return nil, fmt.Errorf("mutation reply missing state on success")
}
// Reconstruct a lightweight CartGrain (only fields we expose internally)
grain := &CartGrain{
Id: ToCartId(state.Id),
TotalPrice: state.TotalPrice,
TotalTax: state.TotalTax,
TotalDiscount: state.TotalDiscount,
PaymentInProgress: state.PaymentInProgress,
OrderReference: state.OrderReference,
PaymentStatus: state.PaymentStatus,
}
// Items
for _, it := range state.Items {
if it == nil {
continue
}
outlet := toPtr(it.Outlet)
storeId := toPtr(it.StoreId)
grain.Items = append(grain.Items, &CartItem{
Id: int(it.Id),
ItemId: int(it.ItemId),
Sku: it.Sku,
Name: it.Name,
Price: it.Price,
Quantity: int(it.Qty),
TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice,
TaxRate: int(it.TaxRate),
Brand: it.Brand,
Category: it.Category,
Category2: it.Category2,
Category3: it.Category3,
Category4: it.Category4,
Category5: it.Category5,
Image: it.Image,
ArticleType: it.Type,
SellerId: it.SellerId,
SellerName: it.SellerName,
Disclaimer: it.Disclaimer,
Outlet: outlet,
StoreId: storeId,
Stock: StockStatus(it.Stock),
})
}
// Deliveries
for _, d := range state.Deliveries {
if d == nil {
continue
}
intIds := make([]int, 0, len(d.Items))
for _, id := range d.Items {
intIds = append(intIds, int(id))
}
grain.Deliveries = append(grain.Deliveries, &CartDelivery{
Id: int(d.Id),
Provider: d.Provider,
Price: d.Price,
Items: intIds,
PickupPoint: d.PickupPoint,
})
}
return grain, nil
}
// GetCurrentState retrieves the current cart state using the typed StateReply oneof.
func (g *RemoteGrainGRPC) GetCurrentState() (*CartGrain, error) {
ctx, cancel := context.WithTimeout(context.Background(), g.stateTimeout)
defer cancel()
resp, err := g.client.GetState(ctx, &proto.StateRequest{CartId: g.Id.String()})
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
if e := resp.GetError(); e != "" {
return nil, fmt.Errorf("remote get state failed %d: %s", resp.StatusCode, e)
}
return nil, fmt.Errorf("remote get state failed %d", resp.StatusCode)
}
state := resp.GetState()
if state == nil {
return nil, fmt.Errorf("state reply missing state on success")
}
grain := &CartGrain{
Id: ToCartId(state.Id),
TotalPrice: state.TotalPrice,
TotalTax: state.TotalTax,
TotalDiscount: state.TotalDiscount,
PaymentInProgress: state.PaymentInProgress,
OrderReference: state.OrderReference,
PaymentStatus: state.PaymentStatus,
}
for _, it := range state.Items {
if it == nil {
continue
}
outlet := toPtr(it.Outlet)
storeId := toPtr(it.StoreId)
grain.Items = append(grain.Items, &CartItem{
Id: int(it.Id),
ItemId: int(it.ItemId),
Sku: it.Sku,
Name: it.Name,
Price: it.Price,
Quantity: int(it.Qty),
TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice,
TaxRate: int(it.TaxRate),
Brand: it.Brand,
Category: it.Category,
Category2: it.Category2,
Category3: it.Category3,
Category4: it.Category4,
Category5: it.Category5,
Image: it.Image,
ArticleType: it.Type,
SellerId: it.SellerId,
SellerName: it.SellerName,
Disclaimer: it.Disclaimer,
Outlet: outlet,
StoreId: storeId,
Stock: StockStatus(it.Stock),
})
}
for _, d := range state.Deliveries {
if d == nil {
continue
}
intIds := make([]int, 0, len(d.Items))
for _, id := range d.Items {
intIds = append(intIds, int(id))
}
grain.Deliveries = append(grain.Deliveries, &CartDelivery{
Id: int(d.Id),
Provider: d.Provider,
Price: d.Price,
Items: intIds,
PickupPoint: d.PickupPoint,
})
}
return grain, nil
}
// Close closes the underlying gRPC connection if this adapter created it.
func (g *RemoteGrainGRPC) Close() error {
if g.conn != nil {
return g.conn.Close()
}
return nil
}
// Debug helper to log operations (optional).
func (g *RemoteGrainGRPC) logf(format string, args ...interface{}) {
log.Printf("[remote-grain-grpc host=%s id=%s] %s", g.Host, g.Id.String(), fmt.Sprintf(format, args...))
}

344
ring.go
View File

@@ -1,344 +0,0 @@
package main
import (
"encoding/binary"
"fmt"
"hash/fnv"
"sort"
"strings"
"sync"
)
// ring.go
//
// Consistent hashing ring skeleton for future integration.
// --------------------------------------------------------
// This file introduces a minimal, allocationlight consistent hashing structure
// intended to replace per-cart ownership negotiation. It focuses on:
// * Deterministic lookup: O(log V) via binary search
// * Even(ish) distribution using virtual nodes (vnodes)
// * Epoch / fingerprint tracking to detect membership drift
//
// NOT YET WIRED:
// * SyncedPool integration (ownerForCart, lazy migration)
// * Replication factor > 1
// * Persistent state migration
//
// Safe to import now; unused until explicit integration code is added.
//
// Design Notes
// ------------
// - Hosts contribute `vnodesPerHost` virtual nodes. Higher counts smooth
// distribution at cost of memory (V = hosts * vnodesPerHost).
// - Hash of vnode = FNV1a64(host + "#" + index). For improved quality you
// can swap in xxhash or siphash later without changing API (but doing so
// will reshuffle ownership).
// - Cart ownership lookup uses either cartID.Raw() when provided (uniform
// 64-bit space) or falls back to hashing string forms (legacy).
// - Epoch is monotonically increasing; consumers can fence stale results.
//
// Future Extensions
// -----------------
// - Weighted hosts (proportionally more vnodes).
// - Replication: LookupN(h, n) to return primary + replicas.
// - Streaming / diff-based ring updates (gossip).
// - Hash function injection for deterministic test scenarios.
//
// ---------------------------------------------------------------------------
// Vnode represents a single virtual node position on the ring.
type Vnode struct {
Hash uint64 // position on the ring
Host string // physical host owning this vnode
Index int // per-host vnode index (0..vnodesPerHost-1)
}
// Ring is an immutable consistent hash ring snapshot.
type Ring struct {
Epoch uint64
Vnodes []Vnode // sorted by Hash
hosts []string
fingerprint uint64 // membership fingerprint (order-independent)
}
// RingBuilder accumulates parameters to construct a Ring.
type RingBuilder struct {
epoch uint64
vnodesPerHost int
hosts []string
}
// NewRingBuilder creates a builder with defaults.
func NewRingBuilder() *RingBuilder {
return &RingBuilder{
vnodesPerHost: 64, // a reasonable default for small clusters
}
}
func (b *RingBuilder) WithEpoch(e uint64) *RingBuilder {
b.epoch = e
return b
}
func (b *RingBuilder) WithVnodesPerHost(n int) *RingBuilder {
if n > 0 {
b.vnodesPerHost = n
}
return b
}
func (b *RingBuilder) WithHosts(hosts []string) *RingBuilder {
uniq := make(map[string]struct{}, len(hosts))
out := make([]string, 0, len(hosts))
for _, h := range hosts {
h = strings.TrimSpace(h)
if h == "" {
continue
}
if _, ok := uniq[h]; ok {
continue
}
uniq[h] = struct{}{}
out = append(out, h)
}
sort.Strings(out)
b.hosts = out
return b
}
func (b *RingBuilder) Build() *Ring {
if len(b.hosts) == 0 {
return &Ring{
Epoch: b.epoch,
Vnodes: nil,
hosts: nil,
fingerprint: 0,
}
}
totalVnodes := len(b.hosts) * b.vnodesPerHost
vnodes := make([]Vnode, 0, totalVnodes)
for _, host := range b.hosts {
for i := 0; i < b.vnodesPerHost; i++ {
h := hashVnode(host, i)
vnodes = append(vnodes, Vnode{
Hash: h,
Host: host,
Index: i,
})
}
}
sort.Slice(vnodes, func(i, j int) bool {
if vnodes[i].Hash == vnodes[j].Hash {
// Tie-break deterministically by host then index to avoid instability
if vnodes[i].Host == vnodes[j].Host {
return vnodes[i].Index < vnodes[j].Index
}
return vnodes[i].Host < vnodes[j].Host
}
return vnodes[i].Hash < vnodes[j].Hash
})
fp := fingerprintHosts(b.hosts)
return &Ring{
Epoch: b.epoch,
Vnodes: vnodes,
hosts: append([]string(nil), b.hosts...),
fingerprint: fp,
}
}
// Hosts returns a copy of the host list (sorted).
func (r *Ring) Hosts() []string {
if len(r.hosts) == 0 {
return nil
}
cp := make([]string, len(r.hosts))
copy(cp, r.hosts)
return cp
}
// Fingerprint returns a hash representing the unordered membership set.
func (r *Ring) Fingerprint() uint64 {
return r.fingerprint
}
// Empty indicates ring has no vnodes.
func (r *Ring) Empty() bool {
return len(r.Vnodes) == 0
}
// Lookup returns the vnode owning a given hash value.
func (r *Ring) Lookup(h uint64) Vnode {
if len(r.Vnodes) == 0 {
return Vnode{}
}
// Binary search: first position with Hash >= h
i := sort.Search(len(r.Vnodes), func(i int) bool {
return r.Vnodes[i].Hash >= h
})
if i == len(r.Vnodes) {
return r.Vnodes[0]
}
return r.Vnodes[i]
}
// LookupID selects owner vnode for a CartID (fast path).
func (r *Ring) LookupID(id CartID) Vnode {
return r.Lookup(id.Raw())
}
// LookupString hashes an arbitrary string and looks up owner.
func (r *Ring) LookupString(s string) Vnode {
return r.Lookup(hashKeyString(s))
}
// LookupN returns up to n distinct host vnodes in ring order
// starting from the primary owner of hash h (for replication).
func (r *Ring) LookupN(h uint64, n int) []Vnode {
if n <= 0 || len(r.Vnodes) == 0 {
return nil
}
if n > len(r.hosts) {
n = len(r.hosts)
}
owners := make([]Vnode, 0, n)
seen := make(map[string]struct{}, n)
start := r.Lookup(h)
// Find index of start (can binary search again or linear scan; since we
// already have start.Hash we do another search for clarity)
i := sort.Search(len(r.Vnodes), func(i int) bool {
return r.Vnodes[i].Hash >= start.Hash
})
if i == len(r.Vnodes) {
i = 0
}
for idx := 0; len(owners) < n && idx < len(r.Vnodes); idx++ {
v := r.Vnodes[(i+idx)%len(r.Vnodes)]
if _, ok := seen[v.Host]; ok {
continue
}
seen[v.Host] = struct{}{}
owners = append(owners, v)
}
return owners
}
// DiffHosts compares this ring's membership to another.
func (r *Ring) DiffHosts(other *Ring) (added []string, removed []string) {
if other == nil {
return r.Hosts(), nil
}
cur := make(map[string]struct{}, len(r.hosts))
for _, h := range r.hosts {
cur[h] = struct{}{}
}
oth := make(map[string]struct{}, len(other.hosts))
for _, h := range other.hosts {
oth[h] = struct{}{}
}
for h := range cur {
if _, ok := oth[h]; !ok {
removed = append(removed, h)
}
}
for h := range oth {
if _, ok := cur[h]; !ok {
added = append(added, h)
}
}
sort.Strings(added)
sort.Strings(removed)
return
}
// ---------------------------- Hash Functions ---------------------------------
func hashVnode(host string, idx int) uint64 {
h := fnv.New64a()
_, _ = h.Write([]byte(host))
_, _ = h.Write([]byte{'#'})
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], uint64(idx))
_, _ = h.Write(buf[:])
return h.Sum64()
}
// hashKeyString provides a stable hash for arbitrary string keys (legacy IDs).
func hashKeyString(s string) uint64 {
h := fnv.New64a()
_, _ = h.Write([]byte(s))
return h.Sum64()
}
// fingerprintHosts produces an order-insensitive hash over the host set.
func fingerprintHosts(hosts []string) uint64 {
if len(hosts) == 0 {
return 0
}
h := fnv.New64a()
for _, host := range hosts {
_, _ = h.Write([]byte(host))
_, _ = h.Write([]byte{0})
}
return h.Sum64()
}
// --------------------------- Thread-Safe Wrapper -----------------------------
//
// RingRef offers atomic swap + read semantics. SyncedPool can embed or hold
// one of these to manage live ring updates safely.
type RingRef struct {
mu sync.RWMutex
ring *Ring
}
func NewRingRef(r *Ring) *RingRef {
return &RingRef{ring: r}
}
func (rr *RingRef) Get() *Ring {
rr.mu.RLock()
r := rr.ring
rr.mu.RUnlock()
return r
}
func (rr *RingRef) Set(r *Ring) {
rr.mu.Lock()
rr.ring = r
rr.mu.Unlock()
}
func (rr *RingRef) LookupID(id CartID) Vnode {
r := rr.Get()
if r == nil {
return Vnode{}
}
return r.LookupID(id)
}
// ----------------------------- Debug Utilities -------------------------------
func (r *Ring) String() string {
var b strings.Builder
fmt.Fprintf(&b, "Ring{epoch=%d vnodes=%d hosts=%d}\n", r.Epoch, len(r.Vnodes), len(r.hosts))
limit := len(r.Vnodes)
if limit > 16 {
limit = 16
}
for i := 0; i < limit; i++ {
v := r.Vnodes[i]
fmt.Fprintf(&b, " %02d hash=%016x host=%s idx=%d\n", i, v.Hash, v.Host, v.Index)
}
if len(r.Vnodes) > limit {
fmt.Fprintf(&b, " ... (%d more)\n", len(r.Vnodes)-limit)
}
return b.String()
}

View File

@@ -3,15 +3,19 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"log" "log"
"net/http"
"reflect" "reflect"
"sync" "sync"
"time" "time"
messages "git.tornberg.me/go-cart-actor/proto"
proto "git.tornberg.me/go-cart-actor/proto" proto "git.tornberg.me/go-cart-actor/proto"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
) )
@@ -20,45 +24,81 @@ import (
// //
// Responsibilities: // Responsibilities:
// - Local grain access (delegates to GrainLocalPool) // - Local grain access (delegates to GrainLocalPool)
// - Remote grain proxy management (RemoteGrainGRPC)
// - Cluster membership (AddRemote via discovery + negotiation) // - Cluster membership (AddRemote via discovery + negotiation)
// - Health/ping monitoring & remote removal // - Health/ping monitoring & remote removal
// - Ring based deterministic ownership (no runtime negotiation) // - (Legacy) ring-based ownership removed in first-touch model
// - (Scaffolding) replication factor awareness via ring.LookupN
// //
// Thread-safety: public methods that mutate internal maps lock p.mu (RWMutex). // Thread-safety: public methods that mutate internal maps lock p.mu (RWMutex).
type SyncedPool struct { type SyncedPool struct {
Hostname string LocalHostname string
local *GrainLocalPool local *GrainLocalPool
// New ownership tracking (first-touch / announcement model)
// remoteOwners maps cart id -> owning host (excluding locally owned carts which live in local.grains)
remoteOwners map[CartId]*RemoteHostGRPC
mu sync.RWMutex mu sync.RWMutex
// Remote host state (gRPC only) // Remote host state (gRPC only)
remoteHosts map[string]*RemoteHostGRPC // host -> remote host remoteHosts map[string]*RemoteHostGRPC // host -> remote host
// Remote grain proxies (by cart id)
remoteIndex map[CartId]Grain
// Discovery handler for re-adding hosts after failures // Discovery handler for re-adding hosts after failures
discardedHostHandler *DiscardedHostHandler discardedHostHandler *DiscardedHostHandler
// Consistent hashing ring (immutable snapshot reference)
ringRef *RingRef
// Configuration
vnodesPerHost int
replicationFactor int // RF (>=1). Currently only primary is active; replicas are scaffolding.
} }
// RemoteHostGRPC tracks a remote host's clients & health. // RemoteHostGRPC tracks a remote host's clients & health.
type RemoteHostGRPC struct { type RemoteHostGRPC struct {
Host string Host string
Conn *grpc.ClientConn Conn *grpc.ClientConn
CartClient proto.CartActorClient Transport *http.Transport
Client *http.Client
ControlClient proto.ControlPlaneClient ControlClient proto.ControlPlaneClient
MissedPings int MissedPings int
} }
func (h *RemoteHostGRPC) Name() string {
return h.Host
}
func (h *RemoteHostGRPC) Proxy(id CartId, w http.ResponseWriter, r *http.Request) (bool, error) {
req, err := http.NewRequestWithContext(r.Context(), r.Method, h.Host, r.Body)
if err != nil {
http.Error(w, "proxy build error", http.StatusBadGateway)
return true, err
}
for k, v := range r.Header {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
res, err := h.Client.Do(req)
if err != nil {
http.Error(w, "proxy request error", http.StatusBadGateway)
return true, err
}
defer res.Body.Close()
for k, v := range res.Header {
for _, vv := range v {
w.Header().Add(k, vv)
}
}
w.Header().Set("X-Cart-Owner-Routed", "true")
if res.StatusCode >= 200 && res.StatusCode <= 299 {
w.WriteHeader(res.StatusCode)
_, copyErr := io.Copy(w, res.Body)
if copyErr != nil {
return true, copyErr
}
return true, nil
}
return false, fmt.Errorf("proxy response status %d", res.StatusCode)
}
func (r *RemoteHostGRPC) IsHealthy() bool { func (r *RemoteHostGRPC) IsHealthy() bool {
return r.MissedPings < 3 return r.MissedPings < 3
} }
@@ -68,61 +108,23 @@ var (
Name: "cart_remote_negotiation_total", Name: "cart_remote_negotiation_total",
Help: "The total number of remote negotiations", Help: "The total number of remote negotiations",
}) })
grainSyncCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_grain_sync_total",
Help: "The total number of grain owner changes",
})
connectedRemotes = promauto.NewGauge(prometheus.GaugeOpts{ connectedRemotes = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_connected_remotes", Name: "cart_connected_remotes",
Help: "The number of connected remotes", Help: "The number of connected remotes",
}) })
remoteLookupCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_remote_lookup_total",
Help: "The total number of remote lookups (legacy counter)",
})
// Ring / ownership metrics
ringEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_epoch",
Help: "Current consistent hashing ring epoch (fingerprint-based pseudo-epoch)",
})
ringHosts = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_hosts",
Help: "Number of hosts currently in the ring",
})
ringVnodes = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_vnodes",
Help: "Number of virtual nodes in the ring",
})
ringLookupLocal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_ring_lookup_local_total",
Help: "Ring ownership lookups resolved to the local host",
})
ringLookupRemote = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_ring_lookup_remote_total",
Help: "Ring ownership lookups resolved to a remote host",
})
ringHostShare = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "cart_ring_host_share",
Help: "Fractional share of ring vnodes per host",
}, []string{"host"})
cartMutationsTotal = promauto.NewCounter(prometheus.CounterOpts{ cartMutationsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_mutations_total", Name: "cart_mutations_total",
Help: "Total number of cart state mutations applied (local + remote routed).", Help: "Total number of cart state mutations applied.",
}) })
cartMutationFailuresTotal = promauto.NewCounter(prometheus.CounterOpts{ cartMutationFailuresTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_mutation_failures_total", Name: "cart_mutation_failures_total",
Help: "Total number of failed cart state mutations (local apply errors or remote routing failures).", Help: "Total number of failed cart state mutations.",
}) })
cartMutationLatencySeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{ cartMutationLatencySeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "cart_mutation_latency_seconds", Name: "cart_mutation_latency_seconds",
Help: "Latency of cart mutations (successful or failed) in seconds.", Help: "Latency of cart mutations in seconds.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, []string{"mutation"}) }, []string{"mutation"})
cartActiveGrains = promauto.NewGauge(prometheus.GaugeOpts{ cartActiveGrains = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_active_grains", Name: "cart_active_grains",
Help: "Number of active (resident) local grains.", Help: "Number of active (resident) local grains.",
@@ -131,17 +133,13 @@ var (
func NewSyncedPool(local *GrainLocalPool, hostname string, discovery Discovery) (*SyncedPool, error) { func NewSyncedPool(local *GrainLocalPool, hostname string, discovery Discovery) (*SyncedPool, error) {
p := &SyncedPool{ p := &SyncedPool{
Hostname: hostname, LocalHostname: hostname,
local: local, local: local,
remoteHosts: make(map[string]*RemoteHostGRPC), remoteHosts: make(map[string]*RemoteHostGRPC),
remoteIndex: make(map[CartId]Grain), remoteOwners: make(map[CartId]*RemoteHostGRPC),
discardedHostHandler: NewDiscardedHostHandler(1338), discardedHostHandler: NewDiscardedHostHandler(1338),
vnodesPerHost: 64, // default smoothing factor; adjust if needed
replicationFactor: 1, // RF scaffold; >1 not yet activating replicas
} }
p.discardedHostHandler.SetReconnectHandler(p.AddRemote) p.discardedHostHandler.SetReconnectHandler(p.AddRemote)
// Initialize empty ring (will be rebuilt after first AddRemote or discovery event)
p.rebuildRing()
if discovery != nil { if discovery != nil {
go func() { go func() {
@@ -180,7 +178,7 @@ func NewSyncedPool(local *GrainLocalPool, hostname string, discovery Discovery)
// AddRemote dials a remote host and initializes grain proxies. // AddRemote dials a remote host and initializes grain proxies.
func (p *SyncedPool) AddRemote(host string) { func (p *SyncedPool) AddRemote(host string) {
if host == "" || host == p.Hostname { if host == "" || host == p.LocalHostname {
return return
} }
@@ -192,15 +190,14 @@ func (p *SyncedPool) AddRemote(host string) {
p.mu.Unlock() p.mu.Unlock()
target := fmt.Sprintf("%s:1337", host) target := fmt.Sprintf("%s:1337", host)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) //ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() //defer cancel()
conn, err := grpc.DialContext(ctx, target, grpc.WithInsecure(), grpc.WithBlock()) conn, err := grpc.NewClient(target, grpc.WithTransportCredentials(insecure.NewCredentials())) //grpc.DialContext(ctx, target, grpc.WithInsecure(), grpc.WithBlock())
if err != nil { if err != nil {
log.Printf("AddRemote: dial %s failed: %v", target, err) log.Printf("AddRemote: dial %s failed: %v", target, err)
return return
} }
cartClient := proto.NewCartActorClient(conn)
controlClient := proto.NewControlPlaneClient(conn) controlClient := proto.NewControlPlaneClient(conn)
// Health check (Ping) with limited retries // Health check (Ping) with limited retries
@@ -220,11 +217,22 @@ func (p *SyncedPool) AddRemote(host string) {
return return
} }
} }
transport := &http.Transport{
MaxIdleConns: 100, // Maximum idle connections
MaxIdleConnsPerHost: 100, // Maximum idle connections per host
IdleConnTimeout: 120 * time.Second, // Timeout for idle connections
}
client := &http.Client{
Transport: transport,
Timeout: 10 * time.Second, // Request timeout
}
remote := &RemoteHostGRPC{ remote := &RemoteHostGRPC{
Host: host, Host: host,
Conn: conn, Conn: conn,
CartClient: cartClient, Transport: transport,
Client: client,
ControlClient: controlClient, ControlClient: controlClient,
MissedPings: 0, MissedPings: 0,
} }
@@ -234,7 +242,7 @@ func (p *SyncedPool) AddRemote(host string) {
p.mu.Unlock() p.mu.Unlock()
connectedRemotes.Set(float64(p.RemoteCount())) connectedRemotes.Set(float64(p.RemoteCount()))
// Rebuild consistent hashing ring including this new host // Rebuild consistent hashing ring including this new host
p.rebuildRing() //p.rebuildRing()
log.Printf("Connected to remote host %s", host) log.Printf("Connected to remote host %s", host)
@@ -253,14 +261,18 @@ func (p *SyncedPool) initializeRemote(remote *RemoteHostGRPC) {
return return
} }
count := 0 count := 0
for _, idStr := range reply.CartIds { // Record remote ownership (first-touch model) instead of spawning remote grain proxies.
if idStr == "" { p.mu.Lock()
continue for _, cid := range reply.CartIds {
// Only set if not already claimed (first claim wins)
if _, exists := p.remoteOwners[CartId(cid)]; !exists {
p.remoteOwners[CartId(cid)] = remote
} }
p.SpawnRemoteGrain(ToCartId(idStr), remote.Host)
count++ count++
} }
log.Printf("Remote %s reported %d grains", remote.Host, count) p.mu.Unlock()
log.Printf("Remote %s reported %d remote-owned carts (ownership cached)", remote.Host, count)
} }
// RemoveHost removes remote host and its grains. // RemoveHost removes remote host and its grains.
@@ -270,10 +282,10 @@ func (p *SyncedPool) RemoveHost(host string) {
if exists { if exists {
delete(p.remoteHosts, host) delete(p.remoteHosts, host)
} }
// remove grains pointing to host // purge remote ownership entries for this host
for id, g := range p.remoteIndex { for id, h := range p.remoteOwners {
if rg, ok := g.(*RemoteGrainGRPC); ok && rg.Host == host { if h.Host == host {
delete(p.remoteIndex, id) delete(p.remoteOwners, id)
} }
} }
p.mu.Unlock() p.mu.Unlock()
@@ -283,7 +295,7 @@ func (p *SyncedPool) RemoveHost(host string) {
} }
connectedRemotes.Set(float64(p.RemoteCount())) connectedRemotes.Set(float64(p.RemoteCount()))
// Rebuild ring after host removal // Rebuild ring after host removal
p.rebuildRing() // p.rebuildRing()
} }
// RemoteCount returns number of tracked remote hosts. // RemoteCount returns number of tracked remote hosts.
@@ -294,7 +306,7 @@ func (p *SyncedPool) RemoteCount() int {
} }
func (p *SyncedPool) IsKnown(host string) bool { func (p *SyncedPool) IsKnown(host string) bool {
if host == p.Hostname { if host == p.LocalHostname {
return true return true
} }
p.mu.RLock() p.mu.RLock()
@@ -354,7 +366,7 @@ func (p *SyncedPool) Negotiate() {
p.mu.RLock() p.mu.RLock()
hosts := make([]string, 0, len(p.remoteHosts)+1) hosts := make([]string, 0, len(p.remoteHosts)+1)
hosts = append(hosts, p.Hostname) hosts = append(hosts, p.LocalHostname)
for h := range p.remoteHosts { for h := range p.remoteHosts {
hosts = append(hosts, h) hosts = append(hosts, h)
} }
@@ -364,8 +376,6 @@ func (p *SyncedPool) Negotiate() {
} }
p.mu.RUnlock() p.mu.RUnlock()
changed := false
for _, r := range remotes { for _, r := range remotes {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
reply, err := r.ControlClient.Negotiate(ctx, &proto.NegotiateRequest{KnownHosts: hosts}) reply, err := r.ControlClient.Negotiate(ctx, &proto.NegotiateRequest{KnownHosts: hosts})
@@ -377,49 +387,20 @@ func (p *SyncedPool) Negotiate() {
for _, h := range reply.Hosts { for _, h := range reply.Hosts {
if !p.IsKnown(h) { if !p.IsKnown(h) {
p.AddRemote(h) p.AddRemote(h)
changed = true
} }
} }
} }
// If new hosts were discovered during negotiation, rebuild the ring once at the end. // Ring rebuild removed (first-touch ownership model no longer uses ring)
if changed {
p.rebuildRing()
}
} }
// ------------------------- Grain / Ring Ownership ---------------------------- // ------------------------- Grain / Ring Ownership ----------------------------
// RemoveRemoteGrain removes a remote grain mapping. // RemoveRemoteGrain obsolete in first-touch model (no remote grain proxies retained)
func (p *SyncedPool) RemoveRemoteGrain(id CartId) {
p.mu.Lock()
delete(p.remoteIndex, id)
p.mu.Unlock()
}
// SpawnRemoteGrain creates/updates a remote grain proxy for a given host. // SpawnRemoteGrain removed (remote grain proxies eliminated in first-touch model)
func (p *SyncedPool) SpawnRemoteGrain(id CartId, host string) {
if id.String() == "" {
return
}
p.mu.Lock()
// If local grain exists (legacy key), remove from local map (ownership moved).
if g, ok := p.local.grains[LegacyToCartKey(id)]; ok && g != nil {
delete(p.local.grains, LegacyToCartKey(id))
}
remoteHost, ok := p.remoteHosts[host]
if !ok {
p.mu.Unlock()
log.Printf("SpawnRemoteGrain: host %s unknown (id=%s), attempting AddRemote", host, id)
go p.AddRemote(host)
return
}
rg := NewRemoteGrainGRPC(id, host, remoteHost.CartClient)
p.remoteIndex[id] = rg
p.mu.Unlock()
}
// GetHealthyRemotes returns a copy slice of healthy remote hosts. // GetHealthyRemotes retained (still useful for broadcasting ownership)
func (p *SyncedPool) GetHealthyRemotes() []*RemoteHostGRPC { func (p *SyncedPool) GetHealthyRemotes() []*RemoteHostGRPC {
p.mu.RLock() p.mu.RLock()
defer p.mu.RUnlock() defer p.mu.RUnlock()
@@ -432,151 +413,130 @@ func (p *SyncedPool) GetHealthyRemotes() []*RemoteHostGRPC {
return ret return ret
} }
// rebuildRing reconstructs the consistent hashing ring from current host set
// and updates ring-related metrics.
func (p *SyncedPool) rebuildRing() {
p.mu.RLock()
hosts := make([]string, 0, len(p.remoteHosts)+1)
hosts = append(hosts, p.Hostname)
for h := range p.remoteHosts {
hosts = append(hosts, h)
}
p.mu.RUnlock()
epochSeed := fingerprintHosts(hosts)
builder := NewRingBuilder().
WithHosts(hosts).
WithEpoch(epochSeed).
WithVnodesPerHost(p.vnodesPerHost)
r := builder.Build()
if p.ringRef == nil {
p.ringRef = NewRingRef(r)
} else {
p.ringRef.Set(r)
}
// Metrics
ringEpoch.Set(float64(r.Epoch))
ringHosts.Set(float64(len(r.Hosts())))
ringVnodes.Set(float64(len(r.Vnodes)))
ringHostShare.Reset()
if len(r.Vnodes) > 0 {
perHost := make(map[string]int)
for _, v := range r.Vnodes {
perHost[v.Host]++
}
total := float64(len(r.Vnodes))
for h, c := range perHost {
ringHostShare.WithLabelValues(h).Set(float64(c) / total)
}
}
}
// ForceRingRefresh exposes a manual ring rebuild hook (primarily for tests).
func (p *SyncedPool) ForceRingRefresh() {
p.rebuildRing()
}
// ownersFor returns the ordered list of primary + replica owners for a cart id
// (length min(replicationFactor, #hosts)). Currently only the first (primary)
// is used. This scaffolds future replication work.
func (p *SyncedPool) ownersFor(id CartId) []string {
if p.ringRef == nil || p.replicationFactor <= 0 {
return []string{p.Hostname}
}
r := p.ringRef.Get()
if r == nil || r.Empty() {
return []string{p.Hostname}
}
vnodes := r.LookupN(hashKeyString(id.String()), p.replicationFactor)
out := make([]string, 0, len(vnodes))
seen := make(map[string]struct{}, len(vnodes))
for _, v := range vnodes {
if _, ok := seen[v.Host]; ok {
continue
}
seen[v.Host] = struct{}{}
out = append(out, v.Host)
}
if len(out) == 0 {
out = append(out, p.Hostname)
}
return out
}
// ownerHostFor returns the primary owner host for a given id.
func (p *SyncedPool) ownerHostFor(id CartId) string {
return p.ownersFor(id)[0]
}
// DebugOwnerHost exposes (for tests) the currently computed primary owner host.
func (p *SyncedPool) DebugOwnerHost(id CartId) string {
return p.ownerHostFor(id)
}
func (p *SyncedPool) removeLocalGrain(id CartId) { func (p *SyncedPool) removeLocalGrain(id CartId) {
p.mu.Lock() p.mu.Lock()
delete(p.local.grains, LegacyToCartKey(id)) delete(p.local.grains, uint64(id))
p.mu.Unlock() p.mu.Unlock()
} }
// getGrain returns a local or remote grain. For remote ownership it performs a // ------------------------- First-Touch Ownership Resolution ------------------
// bounded readiness wait (small retries) to reduce first-call failures while
// the remote connection & proxy are initializing. // ErrNotOwner is returned when an operation is attempted on a cart that is
// owned by a different host (according to first-touch ownership mapping).
var ErrNotOwner = fmt.Errorf("not owner")
// resolveOwnerFirstTouch implements the new semantics:
// 1. If local grain exists -> local host owns it.
// 2. Else if remoteOwners has an entry -> return that host.
// 3. Else: claim locally (spawn), insert into remoteOwners map locally for
// idempotency, and asynchronously announce ownership to all remotes.
//
// NOTE: This does NOT (yet) reconcile conflicting announcements; first claim
// wins. Later improvements can add tie-break via timestamp or host ordering.
func (p *SyncedPool) resolveOwnerFirstTouch(id CartId) error {
// Fast local existence check
p.local.mu.RLock()
_, existsLocal := p.local.grains[uint64(id)]
p.local.mu.RUnlock()
if existsLocal {
return nil
}
// Remote ownership map lookup
p.mu.RLock()
remoteHost, foundRemote := p.remoteOwners[id]
p.mu.RUnlock()
if foundRemote && remoteHost.Host != "" {
log.Printf("other owner exists %s", remoteHost.Host)
return nil
}
// Claim: spawn locally
_, err := p.local.GetGrain(id)
if err != nil {
return err
}
// Announce asynchronously
go p.broadcastOwnership([]CartId{id})
return nil
}
// broadcastOwnership sends an AnnounceOwnership RPC to all healthy remotes.
// Best-effort: failures are logged and ignored.
func (p *SyncedPool) broadcastOwnership(ids []CartId) {
if len(ids) == 0 {
return
}
uids := make([]uint64, 0, len(ids))
for _, id := range ids {
uids = append(uids, uint64(id))
}
p.mu.RLock()
defer p.mu.RUnlock()
for _, r := range p.remoteHosts {
if r.IsHealthy() {
go func(rh *RemoteHostGRPC) {
rh.ControlClient.AnnounceOwnership(context.Background(), &messages.OwnershipAnnounce{
Host: p.LocalHostname,
CartIds: uids,
})
}(r)
}
}
}
// AdoptRemoteOwnership processes an incoming ownership announcement for cart ids.
func (p *SyncedPool) AdoptRemoteOwnership(host string, ids []string) {
if host == "" || host == p.LocalHostname {
return
}
remoteHost, ok := p.remoteHosts[host]
if !ok {
log.Printf("remote host does not exist!!")
}
p.mu.Lock()
defer p.mu.Unlock()
for _, s := range ids {
if s == "" {
continue
}
parsed, ok := ParseCartId(s)
if !ok {
continue // skip invalid cart id strings
}
id := parsed
// Do not overwrite if already claimed by another host (first wins).
if existing, ok := p.remoteOwners[id]; ok && existing != remoteHost {
continue
}
// Skip if we own locally (local wins for our own process)
p.local.mu.RLock()
_, localHas := p.local.grains[uint64(id)]
p.local.mu.RUnlock()
if localHas {
continue
}
p.remoteOwners[id] = remoteHost
}
}
// getGrain returns a local grain if this host is (or becomes) the owner under
// the first-touch model. If another host owns the cart, ErrNotOwner is returned.
// Remote grain proxy logic and ring-based spawning have been removed.
func (p *SyncedPool) getGrain(id CartId) (Grain, error) { func (p *SyncedPool) getGrain(id CartId) (Grain, error) {
owner := p.ownerHostFor(id)
if owner == p.Hostname { // Owner is local (either existing or just claimed), fetch/create grain.
ringLookupLocal.Inc()
grain, err := p.local.GetGrain(id) grain, err := p.local.GetGrain(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
p.resolveOwnerFirstTouch(id)
return grain, nil return grain, nil
} }
ringLookupRemote.Inc()
// Kick off remote dial if we don't yet know the owner.
if !p.IsKnown(owner) {
go p.AddRemote(owner)
}
// Fast path existing proxy
p.mu.RLock()
if rg, ok := p.remoteIndex[id]; ok {
p.mu.RUnlock()
remoteLookupCount.Inc()
return rg, nil
}
p.mu.RUnlock()
const (
attempts = 5
sleepPerTry = 40 * time.Millisecond
)
for attempt := 0; attempt < attempts; attempt++ {
// Try to spawn (idempotent if host already known)
if p.IsKnown(owner) {
p.SpawnRemoteGrain(id, owner)
}
// Check again
p.mu.RLock()
if rg, ok := p.remoteIndex[id]; ok {
p.mu.RUnlock()
remoteLookupCount.Inc()
return rg, nil
}
p.mu.RUnlock()
// Last attempt? break to return error.
if attempt == attempts-1 {
break
}
time.Sleep(sleepPerTry)
}
return nil, fmt.Errorf("remote owner %s not yet available for cart %s (after %d attempts)", owner, id.String(), attempts)
}
// Apply applies a single mutation to a grain (local or remote). // Apply applies a single mutation to a grain (local or remote).
// Replication (RF>1) scaffolding: future enhancement will fan-out mutations // Replication (RF>1) scaffolding: future enhancement will fan-out mutations
@@ -584,8 +544,31 @@ func (p *SyncedPool) getGrain(id CartId) (Grain, error) {
func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) { func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) {
grain, err := p.getGrain(id) grain, err := p.getGrain(id)
if err != nil { if err != nil {
log.Printf("could not get grain %v", err)
return nil, err return nil, err
} }
// if err == ErrNotOwner {
// // Remote owner reported but either unreachable or failed earlier in stack.
// // Takeover strategy: remove remote mapping (first-touch override) and claim locally.
// p.mu.Lock()
// delete(p.remoteOwners, id)
// p.mu.Unlock()
// if owner, terr := p.resolveOwnerFirstTouch(id); terr != nil {
// return nil, terr
// } else if owner == p.LocalHostname {
// // Fetch (now-local) grain
// grain, err = p.local.GetGrain(id)
// if err != nil {
// return nil, err
// }
// } else {
// // Another host reclaimed before us; treat as not owner.
// return nil, ErrNotOwner
// }
// } else if err != nil {
// return nil, err
// }
start := time.Now() start := time.Now()
result, applyErr := grain.Apply(mutation, false) result, applyErr := grain.Apply(mutation, false)
@@ -605,10 +588,10 @@ func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error)
if applyErr == nil && result != nil { if applyErr == nil && result != nil {
cartMutationsTotal.Inc() cartMutationsTotal.Inc()
if p.ownerHostFor(id) == p.Hostname { //if p.ownerHostFor(id) == p.LocalHostname {
// Update active grains gauge only for local ownership // Update active grains gauge only for local ownership
cartActiveGrains.Set(float64(p.local.DebugGrainCount())) cartActiveGrains.Set(float64(p.local.DebugGrainCount()))
} //}
} else if applyErr != nil { } else if applyErr != nil {
cartMutationFailuresTotal.Inc() cartMutationFailuresTotal.Inc()
} }
@@ -620,6 +603,7 @@ func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error)
func (p *SyncedPool) Get(id CartId) (*CartGrain, error) { func (p *SyncedPool) Get(id CartId) (*CartGrain, error) {
grain, err := p.getGrain(id) grain, err := p.getGrain(id)
if err != nil { if err != nil {
log.Printf("could not get grain %v", err)
return nil, err return nil, err
} }
return grain.GetCurrentState() return grain.GetCurrentState()
@@ -637,7 +621,7 @@ func (p *SyncedPool) Close() {
for _, r := range remotes { for _, r := range remotes {
go func(rh *RemoteHostGRPC) { go func(rh *RemoteHostGRPC) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
_, err := rh.ControlClient.Closing(ctx, &proto.ClosingNotice{Host: p.Hostname}) _, err := rh.ControlClient.Closing(ctx, &proto.ClosingNotice{Host: p.LocalHostname})
cancel() cancel()
if err != nil { if err != nil {
log.Printf("Close notify to %s failed: %v", rh.Host, err) log.Printf("Close notify to %s failed: %v", rh.Host, err)
@@ -645,3 +629,14 @@ func (p *SyncedPool) Close() {
}(r) }(r)
} }
} }
// Hostname implements the GrainPool interface, returning this node's hostname.
func (p *SyncedPool) Hostname() string {
return p.LocalHostname
}
// OwnerHost returns the primary owning host for a given cart id (ring lookup).
func (p *SyncedPool) OwnerHost(id CartId) (Host, bool) {
ownerHost, ok := p.remoteOwners[id]
return ownerHost, ok
}

View File

@@ -1,8 +0,0 @@
/*
Legacy TCP networking (GenericListener / Frame protocol) has been removed
as part of the gRPC migration. This file intentionally contains no tests.
Keeping an empty Go file (with a package declaration) ensures the old
tcp-connection test target no longer runs without causing build issues.
*/
package main