7 Commits

Author SHA1 Message Date
matst80
f8c8ad56c7 fix checkout again
All checks were successful
Build and Publish / Metadata (push) Successful in 5s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 47s
Build and Publish / BuildAndDeployArm64 (push) Successful in 4m0s
2025-10-10 16:00:20 +00:00
matst80
09a68db8d5 update propertynames
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 49s
Build and Publish / BuildAndDeployArm64 (push) Successful in 3m56s
2025-10-10 14:43:51 +00:00
matst80
30c89a0394 metadata on arm
All checks were successful
Build and Publish / Metadata (push) Successful in 4s
Build and Publish / BuildAndDeployArm64 (push) Successful in 4m11s
Build and Publish / BuildAndDeployAmd64 (push) Successful in 44s
2025-10-10 13:59:27 +00:00
matst80
d6563d0b3a fix docker build
Some checks failed
Build and Publish / Metadata (push) Has been cancelled
Build and Publish / BuildAndDeployAmd64 (push) Has been cancelled
Build and Publish / BuildAndDeployArm64 (push) Has been cancelled
2025-10-10 13:56:48 +00:00
matst80
2a2ce247d5 more stuff
Some checks failed
Build and Publish / BuildAndDeploy (push) Successful in 4m45s
Build and Publish / BuildAndDeployAmd64 (push) Has been cancelled
2025-10-10 13:47:42 +00:00
matst80
159253b8b0 more refactoring
Some checks failed
Build and Publish / BuildAndDeploy (push) Successful in 3m6s
Build and Publish / BuildAndDeployAmd64 (push) Has been cancelled
2025-10-10 13:22:36 +00:00
matst80
c30be581cd revert port
Some checks failed
Build and Publish / BuildAndDeploy (push) Successful in 3m2s
Build and Publish / BuildAndDeployAmd64 (push) Has been cancelled
2025-10-10 12:10:37 +00:00
27 changed files with 2790 additions and 1086 deletions

68
.dockerignore Normal file
View File

@@ -0,0 +1,68 @@
# .dockerignore for go-cart-actor
#
# Goal: Keep Docker build context lean & reproducible.
# Adjust as project structure evolves.
# Version control & CI metadata
.git
.git/
.gitignore
.github
# Local tooling / editors
.vscode
.idea
*.iml
# Build artifacts / outputs
bin/
build/
dist/
out/
coverage/
*.coverprofile
# Temporary files
*.tmp
*.log
tmp/
.tmp/
# Dependency/vendor caches (not used; rely on go modules download)
vendor/
# Examples / scripts (adjust if you actually need them in build context)
examples/
scripts/
# Docs (retain README.md explicitly)
docs/
CHANGELOG*
**/*.md
!README.md
# Tests (not needed for production build)
**/*_test.go
# Node / frontend artifacts (if any future addition)
node_modules/
# Docker / container metadata not needed inside image
Dockerfile
# Editor swap/backup files
*~
*.swp
# Go race / profiling outputs
*.pprof
# Security / secret placeholders (ensure real secrets never copied)
*.secret
*.key
*.pem
# Keep proto and generated code (do NOT ignore proto/)
!proto/
# End of file

View File

@@ -1,30 +1,77 @@
name: Build and Publish name: Build and Publish
run-name: ${{ gitea.actor }} is building 🚀 run-name: ${{ gitea.actor }} build 🚀
on: [push] on: [push]
jobs: jobs:
Metadata:
runs-on: arm64
outputs:
version: ${{ steps.meta.outputs.version }}
git_commit: ${{ steps.meta.outputs.git_commit }}
build_date: ${{ steps.meta.outputs.build_date }}
steps:
- name: Checkout
uses: actions/checkout@v4
- id: meta
name: Derive build metadata
run: |
GIT_COMMIT=$(git rev-parse HEAD)
if git describe --tags --exact-match >/dev/null 2>&1; then
VERSION=$(git describe --tags --exact-match)
else
VERSION=$(git rev-parse --short=12 HEAD)
fi
BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
echo "git_commit=$GIT_COMMIT" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "build_date=$BUILD_DATE" >> $GITHUB_OUTPUT
BuildAndDeployAmd64: BuildAndDeployAmd64:
needs: Metadata
runs-on: amd64 runs-on: amd64
steps: steps:
- name: Check out repository code - uses: actions/checkout@v4
uses: actions/checkout@v4 - name: Build amd64 image
- name: Build docker image run: |
run: docker build --progress=plain -t registry.knatofs.se/go-cart-actor-amd64:latest . docker build \
- name: Push to registry --build-arg VERSION=${{ needs.Metadata.outputs.version }} \
run: docker push registry.knatofs.se/go-cart-actor-amd64:latest --build-arg GIT_COMMIT=${{ needs.Metadata.outputs.git_commit }} \
- name: Deploy to Kubernetes --build-arg BUILD_DATE=${{ needs.Metadata.outputs.build_date }} \
--progress=plain \
-t registry.knatofs.se/go-cart-actor-amd64:latest \
-t registry.knatofs.se/go-cart-actor-amd64:${{ needs.Metadata.outputs.version }} \
.
- name: Push amd64 images
run: |
docker push registry.knatofs.se/go-cart-actor-amd64:latest
docker push registry.knatofs.se/go-cart-actor-amd64:${{ needs.Metadata.outputs.version }}
- name: Apply deployment manifests
run: kubectl apply -f deployment/deployment.yaml -n cart run: kubectl apply -f deployment/deployment.yaml -n cart
- name: Rollout amd64 deployment - name: Rollout amd64 deployment (pin to version)
run: kubectl rollout restart deployment/cart-actor-x86 -n cart run: |
kubectl set image deployment/cart-actor-x86 -n cart cart-actor-amd64=registry.knatofs.se/go-cart-actor-amd64:${{ needs.Metadata.outputs.version }}
kubectl rollout status deployment/cart-actor-x86 -n cart
BuildAndDeploy: BuildAndDeployArm64:
needs: Metadata
runs-on: arm64 runs-on: arm64
steps: steps:
- name: Check out repository code - uses: actions/checkout@v4
uses: actions/checkout@v4 - name: Build arm64 image
- name: Build docker image run: |
run: docker build --progress=plain -t registry.knatofs.se/go-cart-actor . docker build \
- name: Push to registry --build-arg VERSION=${{ needs.Metadata.outputs.version }} \
run: docker push registry.knatofs.se/go-cart-actor --build-arg GIT_COMMIT=${{ needs.Metadata.outputs.git_commit }} \
- name: Rollout arm64 deployment --build-arg BUILD_DATE=${{ needs.Metadata.outputs.build_date }} \
run: kubectl rollout restart deployment/cart-actor-arm64 -n cart --progress=plain \
-t registry.knatofs.se/go-cart-actor:latest \
-t registry.knatofs.se/go-cart-actor:${{ needs.Metadata.outputs.version }} \
.
- name: Push arm64 images
run: |
docker push registry.knatofs.se/go-cart-actor:latest
docker push registry.knatofs.se/go-cart-actor:${{ needs.Metadata.outputs.version }}
- name: Rollout arm64 deployment (pin to version)
run: |
kubectl set image deployment/cart-actor-arm64 -n cart cart-actor-arm64=registry.knatofs.se/go-cart-actor:${{ needs.Metadata.outputs.version }}
kubectl rollout status deployment/cart-actor-arm64 -n cart

View File

@@ -1,17 +1,75 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1.7
#
# Multi-stage build:
# 1. Build static binary with pinned Go version (matching go.mod).
# 2. Copy into distroless static nonroot runtime image.
#
# Build args (optional):
# VERSION - semantic/app version (default: dev)
# GIT_COMMIT - git SHA (default: unknown)
# BUILD_DATE - RFC3339 build timestamp
#
# Example build:
# docker build \
# --build-arg VERSION=$(git describe --tags --always) \
# --build-arg GIT_COMMIT=$(git rev-parse HEAD) \
# --build-arg BUILD_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ) \
# -t go-cart-actor:dev .
#
# If you add subpackages or directories, no Dockerfile change needed (COPY . .).
# Ensure a .dockerignore exists to keep context lean.
FROM golang:alpine AS build-stage ############################
WORKDIR /app # Build Stage
############################
FROM golang:1.25-alpine AS build
WORKDIR /src
# Build metadata (can be overridden at build time)
ARG VERSION=dev
ARG GIT_COMMIT=unknown
ARG BUILD_DATE=unknown
# Ensure reproducible static build
# Multi-arch build args (TARGETOS/TARGETARCH provided automatically by buildx)
ARG TARGETOS
ARG TARGETARCH
ENV CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH}
# Dependency caching
COPY go.mod go.sum ./ COPY go.mod go.sum ./
RUN go mod download RUN --mount=type=cache,target=/go/pkg/mod \
go mod download
COPY proto ./proto # Copy full source (relay on .dockerignore to prune)
COPY *.go ./ COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o /go-cart-actor # (Optional) If you do NOT check in generated protobuf code, uncomment generation:
# RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@latest && \
# go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest && \
# protoc --go_out=. --go_opt=paths=source_relative \
# --go-grpc_out=. --go-grpc_opt=paths=source_relative \
# proto/*.proto
FROM gcr.io/distroless/base-debian11 # Build with minimal binary size and embedded metadata
RUN --mount=type=cache,target=/go/build-cache \
go build -trimpath -ldflags="-s -w \
-X main.Version=${VERSION} \
-X main.GitCommit=${GIT_COMMIT} \
-X main.BuildDate=${BUILD_DATE}" \
-o /out/go-cart-actor .
############################
# Runtime Stage
############################
# Using distroless static (nonroot) for minimal surface area.
FROM gcr.io/distroless/static-debian12:nonroot AS runtime
WORKDIR / WORKDIR /
COPY --from=build-stage /go-cart-actor /go-cart-actor COPY --from=build /out/go-cart-actor /go-cart-actor
# Document (not expose forcibly) typical ports: 8080 (HTTP), 1337 (gRPC)
EXPOSE 8080 1337
USER nonroot:nonroot
ENTRYPOINT ["/go-cart-actor"] ENTRYPOINT ["/go-cart-actor"]

View File

@@ -45,6 +45,7 @@ help:
@echo " protogen Generate protobuf & gRPC code" @echo " protogen Generate protobuf & gRPC code"
@echo " clean_proto Remove generated *.pb.go files in $(PROTO_DIR)" @echo " clean_proto Remove generated *.pb.go files in $(PROTO_DIR)"
@echo " verify_proto Ensure no root-level *.pb.go files (old layout)" @echo " verify_proto Ensure no root-level *.pb.go files (old layout)"
@echo " tidy Run go mod tidy" @echo " tidy Run go mod tidy"
@echo " build Build the module" @echo " build Build the module"
@echo " test Run tests (verbose)" @echo " test Run tests (verbose)"
@@ -89,6 +90,18 @@ verify_proto:
fi fi
@echo "$(GREEN)Proto layout OK (no root-level *.pb.go files).$(RESET)" @echo "$(GREEN)Proto layout OK (no root-level *.pb.go files).$(RESET)"
tidy: tidy:
@echo "$(YELLOW)Running go mod tidy...$(RESET)" @echo "$(YELLOW)Running go mod tidy...$(RESET)"
$(GO) mod tidy $(GO) mod tidy

View File

@@ -1,5 +1,36 @@
# Go Cart Actor # Go Cart Actor
## Migration Notes (Ring-based Ownership Transition)
This release removes the legacy ConfirmOwner ownership negotiation RPC in favor of deterministic ownership via the consistent hashing ring.
Summary of changes:
- ConfirmOwner RPC removed from the ControlPlane service.
- OwnerChangeRequest message removed (was only used by ConfirmOwner).
- OwnerChangeAck retained solely as the response type for the Closing RPC.
- SyncedPool now relies exclusively on the ring for ownership (no quorum negotiation).
- Remote proxy creation includes a bounded readiness retry to reduce first-call failures.
- New Prometheus ring metrics:
- cart_ring_epoch
- cart_ring_hosts
- cart_ring_vnodes
- cart_ring_host_share{host}
- cart_ring_lookup_local_total
- cart_ring_lookup_remote_total
Action required for consumers:
1. Regenerate protobuf code after pulling (requires protoc-gen-go and protoc-gen-go-grpc installed).
2. Remove any client code or automation invoking ConfirmOwner (calls will now return UNIMPLEMENTED if using stale generated stubs).
3. Update monitoring/alerts that referenced ConfirmOwner or ownership quorum failures—use ring metrics instead.
4. If you previously interpreted “ownership flapping” via ConfirmOwner logs, now check for:
- Rapid changes in ring epoch (cart_ring_epoch)
- Host churn (cart_ring_hosts)
- Imbalance in vnode distribution (cart_ring_host_share)
No data migration is necessary; cart IDs and grain state are unaffected.
---
A distributed cart management system using the actor model pattern. A distributed cart management system using the actor model pattern.
## Prerequisites ## Prerequisites
@@ -240,8 +271,8 @@ Responsibilities:
1. Discovery integration (via a `Discovery` interface) adds/removes hosts. 1. Discovery integration (via a `Discovery` interface) adds/removes hosts.
2. Periodic ping health checks (ControlPlane.Ping). 2. Periodic ping health checks (ControlPlane.Ping).
3. Ownership negotiation: 3. Ring-based deterministic ownership:
- On first contention / unknown owner, node calls `ConfirmOwner` on peers to achieve quorum before making a local grain authoritative. - Ownership is derived directly from the consistent hashing ring (no quorum RPC or `ConfirmOwner`).
4. Remote spawning: 4. Remote spawning:
- When a remote host reports its cart ids (`GetCartIds`), the pool creates remote proxies for fast routing. - When a remote host reports its cart ids (`GetCartIds`), the pool creates remote proxies for fast routing.
@@ -270,7 +301,6 @@ Defined in `proto/control_plane.proto`:
| `Ping` | Liveness; increments missed ping counter if failing. | | `Ping` | Liveness; increments missed ping counter if failing. |
| `Negotiate` | Merges membership views; used after discovery events. | | `Negotiate` | Merges membership views; used after discovery events. |
| `GetCartIds` | Enumerate locally owned carts for remote index seeding. | | `GetCartIds` | Enumerate locally owned carts for remote index seeding. |
| `ConfirmOwner` | Quorum acknowledgment for ownership claim. |
| `Closing` | Graceful shutdown notice; peers remove host & associated remote grains. | | `Closing` | Graceful shutdown notice; peers remove host & associated remote grains. |
### Ownership / Quorum Rules ### Ownership / Quorum Rules
@@ -347,7 +377,7 @@ Defined in `proto/control_plane.proto`:
## gRPC Interfaces ## gRPC Interfaces
- **CartActor**: Per-mutation unary RPCs + `GetState`. (Checkout logic intentionally excluded; handled at HTTP layer.) - **CartActor**: Per-mutation unary RPCs + `GetState`. (Checkout logic intentionally excluded; handled at HTTP layer.)
- **ControlPlane**: Cluster coordination (Ping, Negotiate, ConfirmOwner, etc.). - **ControlPlane**: Cluster coordination (Ping, Negotiate, GetCartIds, Closing) — ownership now ring-determined (no ConfirmOwner).
**Ports** (default / implied): **Ports** (default / implied):
- CartActor & ControlPlane share the same gRPC server/listener (single port, e.g. `:1337`). - CartActor & ControlPlane share the same gRPC server/listener (single port, e.g. `:1337`).
@@ -396,7 +426,7 @@ Defined in `proto/control_plane.proto`:
``` ```
Client -> HTTP Handler -> SyncedPool -> (local?) -> Registry -> Grain State Client -> HTTP Handler -> SyncedPool -> (local?) -> Registry -> Grain State
\-> (remote?) -> RemoteGrainGRPC -> gRPC -> Remote CartActor -> Registry -> Grain \-> (remote?) -> RemoteGrainGRPC -> gRPC -> Remote CartActor -> Registry -> Grain
ControlPlane: Discovery Events <-> Negotiation/Ping/ConfirmOwner <-> SyncedPool state ControlPlane: Discovery Events <-> Negotiation/Ping <-> SyncedPool state (ring determines ownership)
``` ```
--- ---
@@ -407,7 +437,7 @@ ControlPlane: Discovery Events <-> Negotiation/Ping/ConfirmOwner <-> SyncedPool
|---------|--------------|--------| |---------|--------------|--------|
| New cart every request | Secure cookie over plain HTTP or not sending cookie jar | Disable Secure locally or use HTTPS & proper curl `-b` | | New cart every request | Secure cookie over plain HTTP or not sending cookie jar | Disable Secure locally or use HTTPS & proper curl `-b` |
| Unsupported mutation error | Missing registry handler | Add `RegisterMutation` for that proto | | Unsupported mutation error | Missing registry handler | Add `RegisterMutation` for that proto |
| Ownership flapping | Quorum failing due to intermittent peers | Investigate `ConfirmOwner` errors / network | | Ownership imbalance | Ring host distribution skew or rapid host churn | Examine `cart_ring_host_share`, `cart_ring_hosts`, and logs for host add/remove; rebalance or investigate instability |
| Remote mutation latency | Network / serialization overhead | Consider batching or colocating hot carts | | Remote mutation latency | Network / serialization overhead | Consider batching or colocating hot carts |
| Checkout returns 500 | Klarna call failed | Inspect logs; no grain state mutated | | Checkout returns 500 | Klarna call failed | Inspect logs; no grain state mutated |

327
cart_id.go Normal file
View File

@@ -0,0 +1,327 @@
package main
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"strings"
)
// cart_id.go
//
// Compact CartID implementation using 64 bits of cryptographic randomness,
// base62 encoded (0-9 A-Z a-z). Typical length is 11 characters (since 62^11 > 2^64).
//
// Motivation:
// * Shorter identifiers for cookies / URLs than legacy padded 16-byte CartId
// * O(1) hashing (raw uint64) for consistent hashing ring integration
// * Extremely low collision probability (birthday bound negligible at scale)
//
// Backward Compatibility Strategy (Phased):
// Phase 1: Introduce CartID helpers while continuing to accept legacy CartId.
// Phase 2: Internally migrate maps to key by uint64 (CartID.Raw()).
// Phase 3: Canonicalize all inbound IDs to short base62; reissue Set-Cart-Id header.
//
// NOTE:
// The legacy type `CartId [16]byte` is still present elsewhere; helper
// UpgradeLegacyCartId bridges that representation to the new form without
// breaking deterministic mapping for existing carts.
//
// Security / Predictability:
// Uses crypto/rand for generation. If ever required, you can layer an
// HMAC-based derivation for additional secrecy. Current approach already
// provides 64 bits of entropy (brute force infeasible for practical risk).
//
// Future Extensions:
// * Time-sortable IDs: prepend a 48-bit timestamp field and encode 80 bits.
// * Add metrics counters for: generated_new, parsed_existing, legacy_fallback.
// * Add a pool of pre-generated IDs for ultra-low-latency hot paths (rarely needed).
//
// Public Surface Summary:
// NewCartID() (CartID, error)
// ParseCartID(string) (CartID, bool)
// FallbackFromString(string) CartID
// UpgradeLegacyCartId(CartId) CartID
// CanonicalizeIncoming(string) (CartID, bool /*wasGenerated*/, error)
//
// Encoding Details:
// encodeBase62 / decodeBase62 maintain a stable alphabet. DO NOT change
// alphabet order once IDs are in circulation, or previously issued IDs
// will change meaning.
//
// Zero Values:
// The zero value CartID{} has raw=0, txt="0". Treat it as valid but
// usually you will call NewCartID instead.
//
// ---------------------------------------------------------------------------
const base62Alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
// Precomputed reverse lookup table for decode (255 = invalid).
var base62Rev [256]byte
func init() {
for i := range base62Rev {
base62Rev[i] = 0xFF
}
for i := 0; i < len(base62Alphabet); i++ {
base62Rev[base62Alphabet[i]] = byte(i)
}
}
// CartID is the compact representation of a cart identifier.
// raw: 64-bit entropy (also used directly for consistent hashing).
// txt: cached base62 textual form.
type CartID struct {
raw uint64
txt string
}
// String returns the canonical base62 encoded ID.
func (c CartID) String() string {
if c.txt == "" { // lazily encode if constructed manually
c.txt = encodeBase62(c.raw)
}
return c.txt
}
// Raw returns the 64-bit numeric value (useful for hashing / ring lookup).
func (c CartID) Raw() uint64 {
return c.raw
}
// IsZero reports whether this CartID is the zero value.
func (c CartID) IsZero() bool {
return c.raw == 0
}
// NewCartID generates a new cryptographically random 64-bit ID.
func NewCartID() (CartID, error) {
var b [8]byte
if _, err := rand.Read(b[:]); err != nil {
return CartID{}, fmt.Errorf("NewCartID: %w", err)
}
u := binary.BigEndian.Uint64(b[:])
// Reject zero if you want to avoid ever producing "0" (optional).
if u == 0 {
// Extremely unlikely; recurse once.
return NewCartID()
}
return CartID{raw: u, txt: encodeBase62(u)}, nil
}
// MustNewCartID panics on failure (suitable for tests / initialization).
func MustNewCartID() CartID {
id, err := NewCartID()
if err != nil {
panic(err)
}
return id
}
// ParseCartID attempts to parse a base62 canonical ID.
// Returns (id, true) if fully valid; (zero, false) otherwise.
func ParseCartID(s string) (CartID, bool) {
if len(s) == 0 {
return CartID{}, false
}
// Basic length sanity; allow a bit of headroom for future timestamp variant.
if len(s) > 16 {
return CartID{}, false
}
u, ok := decodeBase62(s)
if !ok {
return CartID{}, false
}
return CartID{raw: u, txt: s}, true
}
// FallbackFromString produces a deterministic CartID from arbitrary input
// using a 64-bit FNV-1a hash. This allows legacy or malformed IDs to map
// consistently into the new scheme (collision probability still low).
func FallbackFromString(s string) CartID {
const (
offset64 = 1469598103934665603
prime64 = 1099511628211
)
h := uint64(offset64)
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return CartID{raw: h, txt: encodeBase62(h)}
}
// UpgradeLegacyCartId converts the old 16-byte CartId (padded) to CartID
// by hashing its trimmed string form. Keeps stable mapping across restarts.
func UpgradeLegacyCartId(old CartId) CartID {
return FallbackFromString(old.String())
}
// CanonicalizeIncoming normalizes user-provided ID strings.
// Behavior:
//
// Empty string -> generate new ID (wasGenerated = true)
// Valid base62 -> parse and return (wasGenerated = false)
// Anything else -> fallback deterministic hash (wasGenerated = false)
//
// Errors only occur if crypto/rand fails during generation.
func CanonicalizeIncoming(s string) (CartID, bool, error) {
if s == "" {
id, err := NewCartID()
return id, true, err
}
if cid, ok := ParseCartID(s); ok {
return cid, false, nil
}
// Legacy heuristic: if length == 16 and contains non-base62 chars, treat as legacy padded ID.
if len(s) == 16 && !isAllBase62(s) {
return FallbackFromString(strings.TrimRight(s, "\x00")), false, nil
}
return FallbackFromString(s), false, nil
}
// isAllBase62 returns true if every byte is in the base62 alphabet.
func isAllBase62(s string) bool {
for i := 0; i < len(s); i++ {
if base62Rev[s[i]] == 0xFF {
return false
}
}
return true
}
// encodeBase62 turns a uint64 into base62 text.
// Complexity: O(log_62 n) ~ at most 11 iterations for 64 bits.
func encodeBase62(u uint64) string {
if u == 0 {
return "0"
}
// 62^11 = 743008370688 > 2^39; 62^11 > 2^64? Actually 62^11 ~= 5.18e19 < 2^64 (1.84e19)? 2^64 ≈ 1.84e19.
// 62^11 ≈ 5.18e19 > 2^64? Correction: 2^64 ≈ 1.844e19, so 62^11 > 2^64. Thus 11 chars suffice.
var buf [11]byte
i := len(buf)
for u > 0 {
i--
buf[i] = base62Alphabet[u%62]
u /= 62
}
return string(buf[i:])
}
// decodeBase62 converts a base62 string to uint64.
// Returns (value, false) if any invalid character appears.
func decodeBase62(s string) (uint64, bool) {
var v uint64
for i := 0; i < len(s); i++ {
c := s[i]
d := base62Rev[c]
if d == 0xFF {
return 0, false
}
v = v*62 + uint64(d)
}
return v, true
}
// ErrInvalidCartID can be returned by higher-level validation layers if you decide
// to reject fallback-derived IDs (currently unused here).
var ErrInvalidCartID = errors.New("invalid cart id")
// ---------------------------------------------------------------------------
// Legacy / Compatibility Conversion Helpers
// ---------------------------------------------------------------------------
// CartIDToLegacy converts a CartID (base62) into the legacy fixed-size CartId
// ([16]byte) by copying the textual form (truncated or zero-padded).
// NOTE: If the base62 string is longer than 16 (should not happen with current
// 64-bit space), it will be truncated.
func CartIDToLegacy(c CartID) CartId {
var id CartId
txt := c.String()
copy(id[:], []byte(txt))
return id
}
// LegacyToCartID upgrades a legacy CartId (padded) to a CartID by hashing its
// trimmed string form (deterministic). This preserves stable mapping without
// depending on original randomness.
func LegacyToCartID(old CartId) CartID {
return UpgradeLegacyCartId(old)
}
// CartIDToKey returns the numeric key representation (uint64) for map indexing.
func CartIDToKey(c CartID) uint64 {
return c.Raw()
}
// LegacyToCartKey converts a legacy CartId to the numeric key via deterministic
// fallback hashing. (Uses the same logic as LegacyToCartID then returns raw.)
func LegacyToCartKey(old CartId) uint64 {
return LegacyToCartID(old).Raw()
}
// ---------------------- Optional Helper Utilities ----------------------------
// CartIDOrNew tries to parse s; if empty OR invalid returns a fresh ID.
func CartIDOrNew(s string) (CartID, bool /*wasParsed*/, error) {
if cid, ok := ParseCartID(s); ok {
return cid, true, nil
}
id, err := NewCartID()
return id, false, err
}
// MustParseCartID panics if s is not a valid base62 ID (useful in tests).
func MustParseCartID(s string) CartID {
if cid, ok := ParseCartID(s); ok {
return cid
}
panic(fmt.Sprintf("invalid CartID: %s", s))
}
// DebugString returns a verbose description (for logging / diagnostics).
func (c CartID) DebugString() string {
return fmt.Sprintf("CartID(raw=%d txt=%s)", c.raw, c.String())
}
// Equal compares two CartIDs by raw value.
func (c CartID) Equal(other CartID) bool {
return c.raw == other.raw
}
// CanonicalizeOrLegacy preserves legacy (non-base62) IDs without altering their
// textual form, avoiding the previous behavior where fallback hashing replaced
// the original string with a base62-encoded hash (which broke deterministic
// key derivation across mixed call paths).
//
// Behavior:
// - s == "" -> generate new CartID (generatedNew = true, wasBase62 = true)
// - base62 ok -> return parsed CartID (generatedNew = false, wasBase62 = true)
// - otherwise -> treat as legacy: raw = hash(s), txt = original s
//
// Returns:
//
// cid - CartID (txt preserved for legacy inputs)
// generatedNew - true only when a brand new ID was created due to empty input
// wasBase62 - true if the input was already canonical base62 (or generated)
// err - only set if crypto/rand fails when generating a new ID
func CanonicalizeOrLegacy(s string) (cid CartID, generatedNew bool, wasBase62 bool, err error) {
if s == "" {
id, e := NewCartID()
if e != nil {
return CartID{}, false, false, e
}
return id, true, true, nil
}
if parsed, ok := ParseCartID(s); ok {
return parsed, false, true, nil
}
// Legacy path: keep original text so downstream legacy-to-key hashing
// (which uses the visible string) yields consistent keys across code paths.
hashCID := FallbackFromString(s)
// Preserve original textual form
hashCID.txt = s
return hashCID, false, false, nil
}

259
cart_id_test.go Normal file
View File

@@ -0,0 +1,259 @@
package main
import (
"crypto/rand"
"encoding/binary"
"fmt"
mrand "math/rand"
"testing"
)
// TestEncodeDecodeBase62RoundTrip verifies encodeBase62/decodeBase62 are inverse.
func TestEncodeDecodeBase62RoundTrip(t *testing.T) {
mrand.Seed(42)
for i := 0; i < 1000; i++ {
// Random 64-bit value
v := mrand.Uint64()
s := encodeBase62(v)
dec, ok := decodeBase62(s)
if !ok {
t.Fatalf("decodeBase62 failed for %d encoded=%s", v, s)
}
if dec != v {
t.Fatalf("round trip mismatch: have %d got %d (encoded=%s)", v, dec, s)
}
}
// Explicit zero test
if s := encodeBase62(0); s != "0" {
t.Fatalf("expected encodeBase62(0) == \"0\", got %q", s)
}
if v, ok := decodeBase62("0"); !ok || v != 0 {
t.Fatalf("decodeBase62(0) unexpected result v=%d ok=%v", v, ok)
}
}
// TestNewCartIDUniqueness generates a number of IDs and checks for duplicates.
func TestNewCartIDUniqueness(t *testing.T) {
const n = 10000
seen := make(map[string]struct{}, n)
for i := 0; i < n; i++ {
id, err := NewCartID()
if err != nil {
t.Fatalf("NewCartID error: %v", err)
}
s := id.String()
if _, exists := seen[s]; exists {
t.Fatalf("duplicate CartID generated: %s", s)
}
seen[s] = struct{}{}
if id.IsZero() {
t.Fatalf("NewCartID returned zero value")
}
}
}
// TestParseCartIDValidation tests parsing of valid and invalid base62 strings.
func TestParseCartIDValidation(t *testing.T) {
id, err := NewCartID()
if err != nil {
t.Fatalf("NewCartID error: %v", err)
}
parsed, ok := ParseCartID(id.String())
if !ok {
t.Fatalf("ParseCartID failed for valid id %s", id)
}
if parsed.raw != id.raw {
t.Fatalf("parsed raw mismatch: %d vs %d", parsed.raw, id.raw)
}
if _, ok := ParseCartID(""); ok {
t.Fatalf("expected empty string to be invalid")
}
// Invalid char ('-')
if _, ok := ParseCartID("abc-123"); ok {
t.Fatalf("expected invalid chars to fail parse")
}
// Overly long ( >16 )
if _, ok := ParseCartID("1234567890abcdefg"); ok {
t.Fatalf("expected overly long string to fail parse")
}
}
// TestFallbackDeterminism ensures fallback hashing is deterministic.
func TestFallbackDeterminism(t *testing.T) {
inputs := []string{
"legacy-cart-1",
"legacy-cart-2",
"UPPER_lower_123",
"🚀unicode", // unicode bytes (will hash byte sequence)
}
for _, in := range inputs {
a := FallbackFromString(in)
b := FallbackFromString(in)
if a.raw != b.raw || a.String() != b.String() {
t.Fatalf("fallback mismatch for %q: %+v vs %+v", in, a, b)
}
}
// Distinct inputs should almost always differ; sample check
a := FallbackFromString("distinct-A")
b := FallbackFromString("distinct-B")
if a.raw == b.raw {
t.Fatalf("unexpected identical fallback hashes for distinct inputs")
}
}
// TestCanonicalizeIncomingBehavior covers main control flow branches.
func TestCanonicalizeIncomingBehavior(t *testing.T) {
// Empty => new id
id1, generated, err := CanonicalizeIncoming("")
if err != nil || !generated || id1.IsZero() {
t.Fatalf("CanonicalizeIncoming empty failed: id=%v gen=%v err=%v", id1, generated, err)
}
// Valid base62 => parse; no generation
id2, gen2, err := CanonicalizeIncoming(id1.String())
if err != nil || gen2 || id2.raw != id1.raw {
t.Fatalf("CanonicalizeIncoming parse mismatch: id2=%v gen2=%v err=%v", id2, gen2, err)
}
// Legacy-like random containing invalid chars -> fallback
fallbackInput := "legacy\x00\x00padding"
id3, gen3, err := CanonicalizeIncoming(fallbackInput)
if err != nil || gen3 {
t.Fatalf("CanonicalizeIncoming fallback unexpected: id3=%v gen3=%v err=%v", id3, gen3, err)
}
// Deterministic fallback
id4, _, _ := CanonicalizeIncoming(fallbackInput)
if id3.raw != id4.raw {
t.Fatalf("fallback canonicalization not deterministic")
}
}
// TestUpgradeLegacyCartId ensures mapping of old CartId is stable.
func TestUpgradeLegacyCartId(t *testing.T) {
var legacy CartId
copy(legacy[:], []byte("legacy-123456789")) // 15 bytes + padding
up1 := UpgradeLegacyCartId(legacy)
up2 := UpgradeLegacyCartId(legacy)
if up1.raw != up2.raw {
t.Fatalf("UpgradeLegacyCartId not deterministic: %v vs %v", up1, up2)
}
if up1.String() != up2.String() {
t.Fatalf("UpgradeLegacyCartId string mismatch: %s vs %s", up1, up2)
}
}
// BenchmarkNewCartID gives a rough idea of generation cost.
func BenchmarkNewCartID(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := NewCartID(); err != nil {
b.Fatalf("error: %v", err)
}
}
}
// BenchmarkEncodeBase62 measures encode speed in isolation.
func BenchmarkEncodeBase62(b *testing.B) {
// Random sample of values
samples := make([]uint64, 1024)
for i := range samples {
var buf [8]byte
if _, err := rand.Read(buf[:]); err != nil {
b.Fatalf("rand: %v", err)
}
samples[i] = binary.BigEndian.Uint64(buf[:])
}
b.ResetTimer()
var sink string
for i := 0; i < b.N; i++ {
sink = encodeBase62(samples[i%len(samples)])
}
_ = sink
}
// BenchmarkDecodeBase62 measures decode speed.
func BenchmarkDecodeBase62(b *testing.B) {
// Pre-encode
encoded := make([]string, 1024)
for i := range encoded {
encoded[i] = encodeBase62(uint64(i)<<32 | uint64(i))
}
b.ResetTimer()
var sum uint64
for i := 0; i < b.N; i++ {
v, ok := decodeBase62(encoded[i%len(encoded)])
if !ok {
b.Fatalf("decode failed")
}
sum ^= v
}
_ = sum
}
// TestLookupNDeterminism (ring integration smoke test) ensures LookupN
// returns distinct hosts and stable ordering for a fixed ring.
func TestLookupNDeterminism(t *testing.T) {
rb := NewRingBuilder().WithEpoch(1).WithVnodesPerHost(8).WithHosts([]string{"a", "b", "c"})
ring := rb.Build()
if ring.Empty() {
t.Fatalf("expected non-empty ring")
}
id := MustNewCartID()
owners1 := ring.LookupN(id.Raw(), 3)
owners2 := ring.LookupN(id.Raw(), 3)
if len(owners1) != len(owners2) {
t.Fatalf("LookupN length mismatch")
}
for i := range owners1 {
if owners1[i].Host != owners2[i].Host {
t.Fatalf("LookupN ordering instability at %d: %v vs %v", i, owners1[i], owners2[i])
}
}
// Distinct host constraint
seen := map[string]struct{}{}
for _, v := range owners1 {
if _, ok := seen[v.Host]; ok {
t.Fatalf("duplicate host in LookupN result: %v", owners1)
}
seen[v.Host] = struct{}{}
}
}
// TestRingFingerprintChanges ensures fingerprint updates with membership changes.
func TestRingFingerprintChanges(t *testing.T) {
b1 := NewRingBuilder().WithEpoch(1).WithHosts([]string{"node1", "node2"})
r1 := b1.Build()
b2 := NewRingBuilder().WithEpoch(2).WithHosts([]string{"node1", "node2", "node3"})
r2 := b2.Build()
if r1.Fingerprint() == r2.Fingerprint() {
t.Fatalf("expected differing fingerprints after host set change")
}
}
// TestRingDiffHosts verifies added/removed host detection.
func TestRingDiffHosts(t *testing.T) {
r1 := NewRingBuilder().WithEpoch(1).WithHosts([]string{"a", "b"}).Build()
r2 := NewRingBuilder().WithEpoch(2).WithHosts([]string{"b", "c"}).Build()
added, removed := r1.DiffHosts(r2)
if fmt.Sprintf("%v", added) != "[c]" {
t.Fatalf("expected added [c], got %v", added)
}
if fmt.Sprintf("%v", removed) != "[a]" {
t.Fatalf("expected removed [a], got %v", removed)
}
}
// TestRingLookupConsistency ensures direct Lookup and LookupID are aligned.
func TestRingLookupConsistency(t *testing.T) {
ring := NewRingBuilder().WithEpoch(1).WithHosts([]string{"alpha", "beta"}).WithVnodesPerHost(4).Build()
id, _ := ParseCartID("1")
if id.IsZero() {
t.Fatalf("expected parsed id non-zero")
}
v1 := ring.Lookup(id.Raw())
v2 := ring.LookupID(id)
if v1.Host != v2.Host || v1.Hash != v2.Hash {
t.Fatalf("Lookup vs LookupID mismatch: %+v vs %+v", v1, v2)
}
}

View File

@@ -32,11 +32,11 @@ func ToCartState(c *CartGrain) *messages.CartState {
items = append(items, &messages.CartItemState{ items = append(items, &messages.CartItemState{
Id: int64(it.Id), Id: int64(it.Id),
SourceItemId: int64(it.ItemId), ItemId: int64(it.ItemId),
Sku: it.Sku, Sku: it.Sku,
Name: it.Name, Name: it.Name,
UnitPrice: it.Price, Price: it.Price,
Quantity: int32(it.Quantity), Qty: int32(it.Quantity),
TotalPrice: it.TotalPrice, TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax, TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice, OrgPrice: it.OrgPrice,
@@ -49,7 +49,7 @@ func ToCartState(c *CartGrain) *messages.CartState {
Category4: it.Category4, Category4: it.Category4,
Category5: it.Category5, Category5: it.Category5,
Image: it.Image, Image: it.Image,
ArticleType: it.ArticleType, Type: it.ArticleType,
SellerId: it.SellerId, SellerId: it.SellerId,
SellerName: it.SellerName, SellerName: it.SellerName,
Disclaimer: it.Disclaimer, Disclaimer: it.Disclaimer,
@@ -84,13 +84,13 @@ func ToCartState(c *CartGrain) *messages.CartState {
Id: int64(d.Id), Id: int64(d.Id),
Provider: d.Provider, Provider: d.Provider,
Price: d.Price, Price: d.Price,
ItemIds: itemIds, Items: itemIds,
PickupPoint: pp, PickupPoint: pp,
}) })
} }
return &messages.CartState{ return &messages.CartState{
CartId: c.Id.String(), Id: c.Id.String(),
Items: items, Items: items,
TotalPrice: c.TotalPrice, TotalPrice: c.TotalPrice,
TotalTax: c.TotalTax, TotalTax: c.TotalTax,
@@ -111,7 +111,7 @@ func FromCartState(cs *messages.CartState, g *CartGrain) *CartGrain {
if g == nil { if g == nil {
g = &CartGrain{} g = &CartGrain{}
} }
g.Id = ToCartId(cs.CartId) g.Id = ToCartId(cs.Id)
g.TotalPrice = cs.TotalPrice g.TotalPrice = cs.TotalPrice
g.TotalTax = cs.TotalTax g.TotalTax = cs.TotalTax
g.TotalDiscount = cs.TotalDiscount g.TotalDiscount = cs.TotalDiscount
@@ -129,11 +129,11 @@ func FromCartState(cs *messages.CartState, g *CartGrain) *CartGrain {
storeId := toPtr(it.StoreId) storeId := toPtr(it.StoreId)
g.Items = append(g.Items, &CartItem{ g.Items = append(g.Items, &CartItem{
Id: int(it.Id), Id: int(it.Id),
ItemId: int(it.SourceItemId), ItemId: int(it.ItemId),
Sku: it.Sku, Sku: it.Sku,
Name: it.Name, Name: it.Name,
Price: it.UnitPrice, Price: it.Price,
Quantity: int(it.Quantity), Quantity: int(it.Qty),
TotalPrice: it.TotalPrice, TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax, TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice, OrgPrice: it.OrgPrice,
@@ -145,7 +145,7 @@ func FromCartState(cs *messages.CartState, g *CartGrain) *CartGrain {
Category4: it.Category4, Category4: it.Category4,
Category5: it.Category5, Category5: it.Category5,
Image: it.Image, Image: it.Image,
ArticleType: it.ArticleType, ArticleType: it.Type,
SellerId: it.SellerId, SellerId: it.SellerId,
SellerName: it.SellerName, SellerName: it.SellerName,
Disclaimer: it.Disclaimer, Disclaimer: it.Disclaimer,
@@ -165,8 +165,9 @@ func FromCartState(cs *messages.CartState, g *CartGrain) *CartGrain {
if d == nil { if d == nil {
continue continue
} }
intIds := make([]int, 0, len(d.ItemIds))
for _, id := range d.ItemIds { intIds := make([]int, 0, len(d.Items))
for _, id := range d.Items {
intIds = append(intIds, int(id)) intIds = append(intIds, int(id))
} }
var pp *messages.PickupPoint var pp *messages.PickupPoint

View File

@@ -1,276 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: klarna-api-credentials
data:
username: ZjQzZDY3YjEtNzA2Yy00NTk2LTliNTgtYjg1YjU2NDEwZTUw
password: a2xhcm5hX3Rlc3RfYXBpX0trUWhWVE5yYVZsV2FsTnhTRVp3Y1ZSSFF5UkVNRmxyY25Kd1AxSndQMGdzWmpRelpEWTNZakV0TnpBMll5MDBOVGsyTFRsaU5UZ3RZamcxWWpVMk5ERXdaVFV3TERFc2JUUkNjRFpWU1RsTllsSk1aMlEyVEc4MmRVODNZMkozUlRaaFdEZDViV3AwYkhGV1JqTjVNQzlaYXow
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cart-actor
arch: amd64
name: cart-actor-x86
spec:
replicas: 0
selector:
matchLabels:
app: cart-actor
arch: amd64
template:
metadata:
labels:
app: cart-actor
actor-pool: cart
arch: amd64
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: NotIn
values:
- arm64
volumes:
- name: data
nfs:
path: /i-data/7a8af061/nfs/cart-actor-no
server: 10.10.1.10
imagePullSecrets:
- name: regcred
serviceAccountName: default
containers:
- image: registry.knatofs.se/go-cart-actor-amd64:latest
name: cart-actor-amd64
imagePullPolicy: Always
lifecycle:
preStop:
exec:
command: ["sleep", "15"]
ports:
- containerPort: 8080
name: web
- containerPort: 1234
name: echo
- containerPort: 1337
name: rpc
- containerPort: 1338
name: quorum
livenessProbe:
httpGet:
path: /livez
port: web
failureThreshold: 1
periodSeconds: 10
readinessProbe:
httpGet:
path: /readyz
port: web
failureThreshold: 2
initialDelaySeconds: 2
periodSeconds: 10
volumeMounts:
- mountPath: "/data"
name: data
resources:
limits:
memory: "768Mi"
requests:
memory: "70Mi"
cpu: "1200m"
env:
- name: TZ
value: "Europe/Stockholm"
- name: KLARNA_API_USERNAME
valueFrom:
secretKeyRef:
name: klarna-api-credentials
key: username
- name: KLARNA_API_PASSWORD
valueFrom:
secretKeyRef:
name: klarna-api-credentials
key: password
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: AMQP_URL
value: "amqp://admin:12bananer@rabbitmq.dev:5672/"
- name: BASE_URL
value: "https://s10n-no.tornberg.me"
- name: CART_BASE_URL
value: "https://cart-no.tornberg.me"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cart-actor
arch: arm64
name: cart-actor-arm64
spec:
replicas: 3
selector:
matchLabels:
app: cart-actor
arch: arm64
template:
metadata:
labels:
app: cart-actor
actor-pool: cart
arch: arm64
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- masterpi
- key: kubernetes.io/arch
operator: In
values:
- arm64
volumes:
- name: data
nfs:
path: /i-data/7a8af061/nfs/cart-actor-no
server: 10.10.1.10
imagePullSecrets:
- name: regcred
serviceAccountName: default
containers:
- image: registry.knatofs.se/go-cart-actor:latest
name: cart-actor-arm64
imagePullPolicy: Always
lifecycle:
preStop:
exec:
command: ["sleep", "15"]
ports:
- containerPort: 8080
name: web
- containerPort: 1234
name: echo
- containerPort: 1337
name: rpc
- containerPort: 1338
name: quorum
livenessProbe:
httpGet:
path: /livez
port: web
failureThreshold: 1
periodSeconds: 10
readinessProbe:
httpGet:
path: /readyz
port: web
failureThreshold: 2
initialDelaySeconds: 2
periodSeconds: 10
volumeMounts:
- mountPath: "/data"
name: data
resources:
limits:
memory: "768Mi"
requests:
memory: "70Mi"
cpu: "1200m"
env:
- name: TZ
value: "Europe/Stockholm"
- name: KLARNA_API_USERNAME
valueFrom:
secretKeyRef:
name: klarna-api-credentials
key: username
- name: KLARNA_API_PASSWORD
valueFrom:
secretKeyRef:
name: klarna-api-credentials
key: password
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: AMQP_URL
value: "amqp://admin:12bananer@rabbitmq.dev:5672/"
- name: BASE_URL
value: "https://s10n-no.tornberg.me"
- name: CART_BASE_URL
value: "https://cart-no.tornberg.me"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
---
kind: Service
apiVersion: v1
metadata:
name: cart-echo
spec:
selector:
app: cart-actor
type: LoadBalancer
ports:
- name: echo
port: 1234
---
kind: Service
apiVersion: v1
metadata:
name: cart-actor
annotations:
prometheus.io/port: "8080"
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
spec:
selector:
app: cart-actor
ports:
- name: web
port: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: cart-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
# nginx.ingress.kubernetes.io/affinity: "cookie"
# nginx.ingress.kubernetes.io/session-cookie-name: "cart-affinity"
# nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
# nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"
nginx.ingress.kubernetes.io/proxy-body-size: 4m
spec:
ingressClassName: nginx
tls:
- hosts:
- cart-no.tornberg.me
secretName: cart-actor-no-tls-secret
rules:
- host: cart-no.tornberg.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: cart-actor
port:
number: 8080

View File

@@ -55,12 +55,8 @@ spec:
ports: ports:
- containerPort: 8080 - containerPort: 8080
name: web name: web
- containerPort: 1234
name: echo
- containerPort: 1337 - containerPort: 1337
name: rpc name: rpc
- containerPort: 1338
name: quorum
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /livez path: /livez
@@ -117,7 +113,7 @@ metadata:
arch: arm64 arch: arm64
name: cart-actor-arm64 name: cart-actor-arm64
spec: spec:
replicas: 0 replicas: 3
selector: selector:
matchLabels: matchLabels:
app: cart-actor app: cart-actor
@@ -161,12 +157,8 @@ spec:
ports: ports:
- containerPort: 8080 - containerPort: 8080
name: web name: web
- containerPort: 1234
name: echo
- containerPort: 1337 - containerPort: 1337
name: rpc name: rpc
- containerPort: 1338
name: quorum
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /livez path: /livez
@@ -217,18 +209,6 @@ spec:
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1
metadata:
name: cart-echo
spec:
selector:
app: cart-actor
type: LoadBalancer
ports:
- name: echo
port: 1234
---
kind: Service
apiVersion: v1
metadata: metadata:
name: cart-actor name: cart-actor
annotations: annotations:

View File

@@ -1,4 +1,4 @@
apiVersion: autoscaling/v1 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
metadata: metadata:
name: cart-scaler-amd name: cart-scaler-amd
@@ -9,9 +9,47 @@ spec:
name: cart-actor-x86 name: cart-actor-x86
minReplicas: 3 minReplicas: 3
maxReplicas: 9 maxReplicas: 9
targetCPUUtilizationPercentage: 30 behavior:
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
policies:
- type: Percent
value: 50
periodSeconds: 60
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
# Future custom metric (example):
# - type: Pods
# pods:
# metric:
# name: cart_mutations_per_second
# target:
# type: AverageValue
# averageValue: "15"
# - type: Object
# object:
# describedObject:
# apiVersion: networking.k8s.io/v1
# kind: Ingress
# name: cart-ingress
# metric:
# name: http_requests_per_second
# target:
# type: Value
# value: "100"
--- ---
apiVersion: autoscaling/v1 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
metadata: metadata:
name: cart-scaler-arm name: cart-scaler-arm
@@ -22,4 +60,42 @@ spec:
name: cart-actor-arm64 name: cart-actor-arm64
minReplicas: 3 minReplicas: 3
maxReplicas: 9 maxReplicas: 9
targetCPUUtilizationPercentage: 30 behavior:
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
policies:
- type: Percent
value: 50
periodSeconds: 60
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
# Future custom metric (example):
# - type: Pods
# pods:
# metric:
# name: cart_mutations_per_second
# target:
# type: AverageValue
# averageValue: "15"
# - type: Object
# object:
# describedObject:
# apiVersion: networking.k8s.io/v1
# kind: Ingress
# name: cart-ingress
# metric:
# name: http_requests_per_second
# target:
# type: Value
# value: "100"

View File

@@ -2,6 +2,7 @@ package main
import ( import (
"context" "context"
"sync"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -75,3 +76,97 @@ func NewK8sDiscovery(client *kubernetes.Clientset) *K8sDiscovery {
client: client, client: client,
} }
} }
// MockDiscovery is an in-memory Discovery implementation for tests.
// It allows deterministic injection of host additions/removals without
// depending on Kubernetes API machinery.
type MockDiscovery struct {
mu sync.RWMutex
hosts []string
events chan HostChange
closed bool
started bool
}
// NewMockDiscovery creates a mock discovery with an initial host list.
func NewMockDiscovery(initial []string) *MockDiscovery {
cp := make([]string, len(initial))
copy(cp, initial)
return &MockDiscovery{
hosts: cp,
events: make(chan HostChange, 32),
}
}
// Discover returns the current host snapshot.
func (m *MockDiscovery) Discover() ([]string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
cp := make([]string, len(m.hosts))
copy(cp, m.hosts)
return cp, nil
}
// Watch returns a channel that will receive HostChange events.
// The channel is buffered; AddHost/RemoveHost push events non-blockingly.
func (m *MockDiscovery) Watch() (<-chan HostChange, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed {
return nil, context.Canceled
}
m.started = true
return m.events, nil
}
// AddHost inserts a new host (if absent) and emits an Added event.
func (m *MockDiscovery) AddHost(host string) {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed {
return
}
for _, h := range m.hosts {
if h == host {
return
}
}
m.hosts = append(m.hosts, host)
if m.started {
m.events <- HostChange{Host: host, Type: watch.Added}
}
}
// RemoveHost removes a host (if present) and emits a Deleted event.
func (m *MockDiscovery) RemoveHost(host string) {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed {
return
}
idx := -1
for i, h := range m.hosts {
if h == host {
idx = i
break
}
}
if idx == -1 {
return
}
m.hosts = append(m.hosts[:idx], m.hosts[idx+1:]...)
if m.started {
m.events <- HostChange{Host: host, Type: watch.Deleted}
}
}
// Close closes the event channel (idempotent).
func (m *MockDiscovery) Close() {
m.mu.Lock()
defer m.mu.Unlock()
if m.closed {
return
}
m.closed = true
close(m.events)
}

102
frames.go
View File

@@ -1,102 +0,0 @@
package main
// Minimal frame abstractions retained after removal of the legacy TCP/frame
// networking layer. These types remain only to avoid a wide cascading refactor
// across existing grain / pool logic that still constructs and passes
// FrameWithPayload objects internally.
//
// The original responsibilities this replaces:
// - Binary framing, checksums, network IO
// - Distinction between request / reply frame types
//
// What remains:
// - A light weight container (FrameWithPayload) used as an inprocess
// envelope for status code + typed marker + payload bytes (JSON or proto).
// - Message / status constants referenced in existing code paths.
//
// Recommended future cleanup (postmigration):
// - Remove FrameType entirely and replace with enumerated semantic results
// or error values.
// - Replace FrameWithPayload with a struct { Status int; Data []byte }.
// - Remove remote_* reply type branching once all callers rely on gRPC
// status + strongly typed responses.
//
// For now we keep this minimal surface to keep the gRPC migration focused.
type (
// FrameType is a symbolic identifier carried through existing code paths.
// No ordering or bit semantics are required anymore.
FrameType uint32
StatusCode uint32
)
type Frame struct {
Type FrameType
StatusCode StatusCode
Length uint32
// Checksum retained for compatibility; no longer validated.
Checksum uint32
}
// FrameWithPayload wraps a Frame with an opaque payload.
// Payload usually contains JSON encoded cart state or an error message.
type FrameWithPayload struct {
Frame
Payload []byte
}
// -----------------------------------------------------------------------------
// Legacy Frame Type Constants (minimal subset still referenced)
// -----------------------------------------------------------------------------
const (
RemoteGetState = FrameType(0x01)
RemoteHandleMutation = FrameType(0x02)
ResponseBody = FrameType(0x03) // (rarely used; kept for completeness)
RemoteGetStateReply = FrameType(0x04)
RemoteHandleMutationReply = FrameType(0x05)
RemoteCreateOrderReply = FrameType(0x06)
)
// MakeFrameWithPayload constructs an inprocess frame wrapper.
// Length & Checksum are filled for backward compatibility (no validation logic
// depends on the checksum anymore).
func MakeFrameWithPayload(msg FrameType, statusCode StatusCode, payload []byte) FrameWithPayload {
length := uint32(len(payload))
return FrameWithPayload{
Frame: Frame{
Type: msg,
StatusCode: statusCode,
Length: length,
Checksum: (uint32(msg) + uint32(statusCode) + length) / 8, // simple legacy formula
},
Payload: payload,
}
}
// Clone creates a shallow copy of the frame, duplicating the payload slice.
func (f *FrameWithPayload) Clone() *FrameWithPayload {
if f == nil {
return nil
}
cp := make([]byte, len(f.Payload))
copy(cp, f.Payload)
return &FrameWithPayload{
Frame: f.Frame,
Payload: cp,
}
}
// NewErrorFrame helper for creating an error frame with a textual payload.
func NewErrorFrame(msg FrameType, code StatusCode, err error) FrameWithPayload {
var b []byte
if err != nil {
b = []byte(err.Error())
}
return MakeFrameWithPayload(msg, code, b)
}
// IsSuccess returns true if the status code indicates success in the
// conventional HTTP style range (200299). This mirrors previous usage patterns.
func (f *FrameWithPayload) IsSuccess() bool {
return f != nil && f.StatusCode >= 200 && f.StatusCode < 300
}

View File

@@ -10,6 +10,23 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
) )
// grain-pool.go
//
// Migration Note:
// This file has been migrated to use uint64 cart keys internally (derived
// from the new CartID base62 representation). For backward compatibility,
// a deprecated legacy map keyed by CartId is maintained so existing code
// that directly indexes pool.grains with a CartId continues to compile
// until the full refactor across SyncedPool / remoteIndex is completed.
//
// Authoritative storage: grains (map[uint64]*CartGrain)
// Legacy compatibility: grainsLegacy (map[CartId]*CartGrain) - kept in sync.
//
// Once all external usages are updated to rely on helper accessors,
// grainsLegacy can be removed.
//
// ---------------------------------------------------------------------------
var ( var (
poolGrains = promauto.NewGauge(prometheus.GaugeOpts{ poolGrains = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_grains_in_pool", Name: "cart_grains_in_pool",
@@ -25,49 +42,71 @@ var (
}) })
) )
// GrainPool interface remains legacy-compatible.
type GrainPool interface { type GrainPool interface {
Apply(id CartId, mutation interface{}) (*CartGrain, error) Apply(id CartId, mutation interface{}) (*CartGrain, error)
Get(id CartId) (*CartGrain, error) Get(id CartId) (*CartGrain, error)
} }
// Ttl keeps expiry info
type Ttl struct { type Ttl struct {
Expires time.Time Expires time.Time
Grain *CartGrain Grain *CartGrain
} }
// GrainLocalPool now stores grains keyed by uint64 (CartKey).
type GrainLocalPool struct { type GrainLocalPool struct {
mu sync.RWMutex mu sync.RWMutex
grains map[CartId]*CartGrain grains map[uint64]*CartGrain // authoritative only
expiry []Ttl expiry []Ttl
spawn func(id CartId) (*CartGrain, error) spawn func(id CartId) (*CartGrain, error)
Ttl time.Duration Ttl time.Duration
PoolSize int PoolSize int
} }
// NewGrainLocalPool constructs a new pool.
func NewGrainLocalPool(size int, ttl time.Duration, spawn func(id CartId) (*CartGrain, error)) *GrainLocalPool { func NewGrainLocalPool(size int, ttl time.Duration, spawn func(id CartId) (*CartGrain, error)) *GrainLocalPool {
ret := &GrainLocalPool{ ret := &GrainLocalPool{
spawn: spawn, spawn: spawn,
grains: make(map[CartId]*CartGrain), grains: make(map[uint64]*CartGrain),
expiry: make([]Ttl, 0), expiry: make([]Ttl, 0),
Ttl: ttl, Ttl: ttl,
PoolSize: size, PoolSize: size,
} }
cartPurge := time.NewTicker(time.Minute) cartPurge := time.NewTicker(time.Minute)
go func() { go func() {
<-cartPurge.C for range cartPurge.C {
ret.Purge() ret.Purge()
}
}() }()
return ret return ret
} }
// keyFromCartId derives the uint64 key from a legacy CartId deterministically.
func keyFromCartId(id CartId) uint64 {
return LegacyToCartKey(id)
}
// storeGrain indexes a grain in both maps.
func (p *GrainLocalPool) storeGrain(id CartId, g *CartGrain) {
k := keyFromCartId(id)
p.grains[k] = g
}
// deleteGrain removes a grain from both maps.
func (p *GrainLocalPool) deleteGrain(id CartId) {
k := keyFromCartId(id)
delete(p.grains, k)
}
// SetAvailable pre-populates placeholder entries (legacy signature).
func (p *GrainLocalPool) SetAvailable(availableWithLastChangeUnix map[CartId]int64) { func (p *GrainLocalPool) SetAvailable(availableWithLastChangeUnix map[CartId]int64) {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
for id := range availableWithLastChangeUnix { for id := range availableWithLastChangeUnix {
if _, ok := p.grains[id]; !ok { k := keyFromCartId(id)
p.grains[id] = nil if _, ok := p.grains[k]; !ok {
p.grains[k] = nil
p.expiry = append(p.expiry, Ttl{ p.expiry = append(p.expiry, Ttl{
Expires: time.Now().Add(p.Ttl), Expires: time.Now().Add(p.Ttl),
Grain: nil, Grain: nil,
@@ -76,13 +115,19 @@ func (p *GrainLocalPool) SetAvailable(availableWithLastChangeUnix map[CartId]int
} }
} }
// Purge removes expired grains.
func (p *GrainLocalPool) Purge() { func (p *GrainLocalPool) Purge() {
lastChangeTime := time.Now().Add(-p.Ttl) lastChangeTime := time.Now().Add(-p.Ttl)
keepChanged := lastChangeTime.Unix() keepChanged := lastChangeTime.Unix()
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
for i := 0; i < len(p.expiry); i++ { for i := 0; i < len(p.expiry); i++ {
item := p.expiry[i] item := p.expiry[i]
if item.Grain == nil {
continue
}
if item.Expires.Before(time.Now()) { if item.Expires.Before(time.Now()) {
if item.Grain.GetLastChange() > keepChanged { if item.Grain.GetLastChange() > keepChanged {
log.Printf("Expired item %s changed, keeping", item.Grain.GetId()) log.Printf("Expired item %s changed, keeping", item.Grain.GetId())
@@ -90,12 +135,12 @@ func (p *GrainLocalPool) Purge() {
p.expiry = append(p.expiry[:i], p.expiry[i+1:]...) p.expiry = append(p.expiry[:i], p.expiry[i+1:]...)
p.expiry = append(p.expiry, item) p.expiry = append(p.expiry, item)
} else { } else {
// move last to end (noop)
p.expiry = append(p.expiry[:i], item) p.expiry = append(p.expiry[:i], item)
} }
} else { } else {
log.Printf("Item %s expired", item.Grain.GetId()) log.Printf("Item %s expired", item.Grain.GetId())
delete(p.grains, item.Grain.GetId()) p.deleteGrain(item.Grain.GetId())
if i < len(p.expiry)-1 { if i < len(p.expiry)-1 {
p.expiry = append(p.expiry[:i], p.expiry[i+1:]...) p.expiry = append(p.expiry[:i], p.expiry[i+1:]...)
} else { } else {
@@ -108,40 +153,69 @@ func (p *GrainLocalPool) Purge() {
} }
} }
// GetGrains returns a legacy view of grains (copy) for compatibility.
func (p *GrainLocalPool) GetGrains() map[CartId]*CartGrain { func (p *GrainLocalPool) GetGrains() map[CartId]*CartGrain {
return p.grains p.mu.RLock()
defer p.mu.RUnlock()
out := make(map[CartId]*CartGrain, len(p.grains))
for _, g := range p.grains {
if g != nil {
out[g.GetId()] = g
}
}
return out
} }
func (p *GrainLocalPool) GetGrain(id CartId) (*CartGrain, error) { // statsUpdate updates Prometheus gauges asynchronously.
var err error func (p *GrainLocalPool) statsUpdate() {
// p.mu.RLock() go func(size int) {
// defer p.mu.RUnlock() l := float64(size)
grain, ok := p.grains[id]
grainLookups.Inc()
if grain == nil || !ok {
if len(p.grains) >= p.PoolSize {
if p.expiry[0].Expires.Before(time.Now()) {
delete(p.grains, p.expiry[0].Grain.GetId())
p.expiry = p.expiry[1:]
} else {
return nil, fmt.Errorf("pool is full")
}
}
grain, err = p.spawn(id)
p.mu.Lock()
p.grains[id] = grain
p.mu.Unlock()
}
go func() {
l := float64(len(p.grains))
ps := float64(p.PoolSize) ps := float64(p.PoolSize)
poolUsage.Set(l / ps) poolUsage.Set(l / ps)
poolGrains.Set(l) poolGrains.Set(l)
poolSize.Set(ps) poolSize.Set(ps)
}() }(len(p.grains))
}
// GetGrain retrieves or spawns a grain (legacy id signature).
func (p *GrainLocalPool) GetGrain(id CartId) (*CartGrain, error) {
grainLookups.Inc()
k := keyFromCartId(id)
p.mu.RLock()
grain, ok := p.grains[k]
p.mu.RUnlock()
var err error
if grain == nil || !ok {
p.mu.Lock()
// Re-check under write lock
grain, ok = p.grains[k]
if grain == nil || !ok {
// Capacity check
if len(p.grains) >= p.PoolSize && len(p.expiry) > 0 {
if p.expiry[0].Expires.Before(time.Now()) && p.expiry[0].Grain != nil {
oldId := p.expiry[0].Grain.GetId()
p.deleteGrain(oldId)
p.expiry = p.expiry[1:]
} else {
p.mu.Unlock()
return nil, fmt.Errorf("pool is full")
}
}
grain, err = p.spawn(id)
if err == nil {
p.storeGrain(id, grain)
}
}
p.mu.Unlock()
p.statsUpdate()
}
return grain, err return grain, err
} }
// Apply applies a mutation (legacy compatibility).
func (p *GrainLocalPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) { func (p *GrainLocalPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) {
grain, err := p.GetGrain(id) grain, err := p.GetGrain(id)
if err != nil || grain == nil { if err != nil || grain == nil {
@@ -150,6 +224,21 @@ func (p *GrainLocalPool) Apply(id CartId, mutation interface{}) (*CartGrain, err
return grain.Apply(mutation, false) return grain.Apply(mutation, false)
} }
// Get returns current state (legacy wrapper).
func (p *GrainLocalPool) Get(id CartId) (*CartGrain, error) { func (p *GrainLocalPool) Get(id CartId) (*CartGrain, error) {
return p.GetGrain(id) return p.GetGrain(id)
} }
// DebugGrainCount returns counts for debugging.
func (p *GrainLocalPool) DebugGrainCount() (authoritative int) {
p.mu.RLock()
defer p.mu.RUnlock()
return len(p.grains)
}
// UnsafePointerToLegacyMap exposes the legacy map pointer (for transitional
// tests that still poke the field directly). DO NOT rely on this long-term.
func (p *GrainLocalPool) UnsafePointerToLegacyMap() uintptr {
// Legacy map removed; retained only to satisfy any transitional callers.
return 0
}

View File

@@ -32,7 +32,18 @@ func NewCartActorGRPCServer(pool GrainPool, syncedPool *SyncedPool) *cartActorGR
// applyMutation routes a single cart mutation to the target grain (used by per-mutation RPC handlers). // applyMutation routes a single cart mutation to the target grain (used by per-mutation RPC handlers).
func (s *cartActorGRPCServer) applyMutation(cartID string, mutation interface{}) *messages.CartMutationReply { func (s *cartActorGRPCServer) applyMutation(cartID string, mutation interface{}) *messages.CartMutationReply {
grain, err := s.pool.Apply(ToCartId(cartID), mutation) // Canonicalize or preserve legacy id (do NOT hash-rewrite legacy textual ids)
cid, _, wasBase62, cerr := CanonicalizeOrLegacy(cartID)
if cerr != nil {
return &messages.CartMutationReply{
StatusCode: 500,
Result: &messages.CartMutationReply_Error{Error: fmt.Sprintf("cart_id canonicalization failed: %v", cerr)},
ServerTimestamp: time.Now().Unix(),
}
}
_ = wasBase62 // placeholder; future: propagate canonical id in reply metadata
legacy := CartIDToLegacy(cid)
grain, err := s.pool.Apply(legacy, mutation)
if err != nil { if err != nil {
return &messages.CartMutationReply{ return &messages.CartMutationReply{
StatusCode: 500, StatusCode: 500,
@@ -159,9 +170,17 @@ func (s *cartActorGRPCServer) GetState(ctx context.Context, req *messages.StateR
Result: &messages.StateReply_Error{Error: "cart_id is required"}, Result: &messages.StateReply_Error{Error: "cart_id is required"},
}, nil }, nil
} }
cartID := ToCartId(req.GetCartId()) // Canonicalize / upgrade incoming cart id (preserve legacy strings)
cid, _, _, cerr := CanonicalizeOrLegacy(req.GetCartId())
if cerr != nil {
return &messages.StateReply{
StatusCode: 500,
Result: &messages.StateReply_Error{Error: fmt.Sprintf("cart_id canonicalization failed: %v", cerr)},
}, nil
}
legacy := CartIDToLegacy(cid)
grain, err := s.pool.Get(cartID) grain, err := s.pool.Get(legacy)
if err != nil { if err != nil {
return &messages.StateReply{ return &messages.StateReply{
StatusCode: 500, StatusCode: 500,
@@ -177,6 +196,64 @@ func (s *cartActorGRPCServer) GetState(ctx context.Context, req *messages.StateR
}, nil }, nil
} }
// ControlPlane: Ping
func (s *cartActorGRPCServer) Ping(ctx context.Context, _ *messages.Empty) (*messages.PingReply, error) {
return &messages.PingReply{
Host: s.syncedPool.Hostname,
UnixTime: time.Now().Unix(),
}, nil
}
// ControlPlane: Negotiate (merge host views)
func (s *cartActorGRPCServer) Negotiate(ctx context.Context, req *messages.NegotiateRequest) (*messages.NegotiateReply, error) {
hostSet := make(map[string]struct{})
// Caller view
for _, h := range req.GetKnownHosts() {
if h != "" {
hostSet[h] = struct{}{}
}
}
// This host
hostSet[s.syncedPool.Hostname] = struct{}{}
// Known remotes
s.syncedPool.mu.RLock()
for h := range s.syncedPool.remoteHosts {
hostSet[h] = struct{}{}
}
s.syncedPool.mu.RUnlock()
out := make([]string, 0, len(hostSet))
for h := range hostSet {
out = append(out, h)
}
return &messages.NegotiateReply{Hosts: out}, nil
}
// ControlPlane: GetCartIds (locally owned carts only)
func (s *cartActorGRPCServer) GetCartIds(ctx context.Context, _ *messages.Empty) (*messages.CartIdsReply, error) {
s.syncedPool.local.mu.RLock()
ids := make([]string, 0, len(s.syncedPool.local.grains))
for _, g := range s.syncedPool.local.grains {
if g == nil {
continue
}
ids = append(ids, g.GetId().String())
}
s.syncedPool.local.mu.RUnlock()
return &messages.CartIdsReply{CartIds: ids}, nil
}
// ControlPlane: Closing (peer shutdown notification)
func (s *cartActorGRPCServer) Closing(ctx context.Context, req *messages.ClosingNotice) (*messages.OwnerChangeAck, error) {
if req.GetHost() != "" {
s.syncedPool.RemoveHost(req.GetHost())
}
return &messages.OwnerChangeAck{
Accepted: true,
Message: "removed host",
}, nil
}
// StartGRPCServer configures and starts the unified gRPC server on the given address. // StartGRPCServer configures and starts the unified gRPC server on the given address.
// It registers both the CartActor and ControlPlane services. // It registers both the CartActor and ControlPlane services.
func StartGRPCServer(addr string, pool GrainPool, syncedPool *SyncedPool) (*grpc.Server, error) { func StartGRPCServer(addr string, pool GrainPool, syncedPool *SyncedPool) (*grpc.Server, error) {

10
main.go
View File

@@ -249,20 +249,20 @@ func main() {
return return
} }
cartId := ToCartId(cookie.Value) cartId := ToCartId(cookie.Value)
_, err = syncedServer.pool.Apply(cartId, getCheckoutOrder(r.Host, cartId)) order, err = syncedServer.CreateOrUpdateCheckout(r.Host, cartId)
if err != nil { if err != nil {
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error())) w.Write([]byte(err.Error()))
} }
// v2: Apply now returns *CartGrain; order creation handled inside grain (no payload to unmarshal) // v2: Apply now returns *CartGrain; order creation handled inside grain (no payload to unmarshal)
} else { } else {
prevOrder, err := KlarnaInstance.GetOrder(orderId) order, err = KlarnaInstance.GetOrder(orderId)
if err != nil { if err != nil {
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error())) w.Write([]byte(err.Error()))
return return
} }
order = prevOrder
} }
w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Header().Set("Permissions-Policy", "payment=(self \"https://js.stripe.com\" \"https://m.stripe.network\" \"https://js.playground.kustom.co\")") w.Header().Set("Permissions-Policy", "payment=(self \"https://js.stripe.com\" \"https://m.stripe.network\" \"https://js.playground.kustom.co\")")
@@ -377,8 +377,8 @@ func main() {
done <- true done <- true
}() }()
log.Print("Server started at port 8083") log.Print("Server started at port 8080")
go http.ListenAndServe(":8083", mux) go http.ListenAndServe(":8080", mux)
<-done <-done
} }

View File

@@ -0,0 +1,182 @@
package main
import (
"context"
"fmt"
"testing"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"google.golang.org/grpc"
)
// TestMultiNodeOwnershipNegotiation spins up two gRPC servers (nodeA, nodeB),
// manually links their SyncedPools (bypassing AddRemote's fixed port assumption),
// and verifies that only one node becomes the owner of a new cart while the
// other can still apply a mutation via the remote proxy path.
//
// NOTE:
// - We manually inject RemoteHostGRPC entries because AddRemote() hard-codes
// port 1337; to run two distinct servers concurrently we need distinct ports.
// - This test asserts single ownership consistency rather than the complete
// quorum semantics (which depend on real discovery + AddRemote).
func TestMultiNodeOwnershipNegotiation(t *testing.T) {
// Allocate distinct ports for the two nodes.
const (
addrA = "127.0.0.1:18081"
addrB = "127.0.0.1:18082"
hostA = "nodeA"
hostB = "nodeB"
)
// Create local grain pools.
poolA := NewGrainLocalPool(1024, time.Minute, spawn)
poolB := NewGrainLocalPool(1024, time.Minute, spawn)
// Create synced pools (no discovery).
syncedA, err := NewSyncedPool(poolA, hostA, nil)
if err != nil {
t.Fatalf("nodeA NewSyncedPool error: %v", err)
}
syncedB, err := NewSyncedPool(poolB, hostB, nil)
if err != nil {
t.Fatalf("nodeB NewSyncedPool error: %v", err)
}
// Start gRPC servers (CartActor + ControlPlane) on different ports.
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
grpcSrvB, err := StartGRPCServer(addrB, poolB, syncedB)
if err != nil {
t.Fatalf("StartGRPCServer B error: %v", err)
}
defer grpcSrvB.GracefulStop()
// Helper to connect one pool to the other's server (manual AddRemote equivalent).
link := func(src *SyncedPool, remoteHost, remoteAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
conn, dialErr := grpc.DialContext(ctx, remoteAddr, grpc.WithInsecure(), grpc.WithBlock())
if dialErr != nil {
t.Fatalf("dial %s (%s) failed: %v", remoteHost, remoteAddr, dialErr)
}
cartClient := messages.NewCartActorClient(conn)
controlClient := messages.NewControlPlaneClient(conn)
src.mu.Lock()
src.remoteHosts[remoteHost] = &RemoteHostGRPC{
Host: remoteHost,
Conn: conn,
CartClient: cartClient,
ControlClient: controlClient,
}
src.mu.Unlock()
}
// Cross-link the two pools.
link(syncedA, hostB, addrB)
link(syncedB, hostA, addrA)
// Rebuild rings after manual cross-link so deterministic ownership works immediately.
syncedA.ForceRingRefresh()
syncedB.ForceRingRefresh()
// Allow brief stabilization (control plane pings / no real negotiation needed here).
time.Sleep(200 * time.Millisecond)
// Create a deterministic cart id for test readability.
cartID := ToCartId(fmt.Sprintf("cart-%d", time.Now().UnixNano()))
// Mutation payload (ring-determined ownership; no assumption about which node owns).
addItem := &messages.AddItem{
ItemId: 1,
Quantity: 1,
Price: 1500,
OrgPrice: 1500,
Sku: "sku-test-multi",
Name: "Multi Node Test",
Image: "/test.png",
Stock: 2,
Tax: 2500,
Country: "se",
}
// Determine ring owner and set primary / secondary references.
ownerHost := syncedA.DebugOwnerHost(cartID)
var ownerSynced, otherSynced *SyncedPool
var ownerPool, otherPool *GrainLocalPool
switch ownerHost {
case hostA:
ownerSynced, ownerPool = syncedA, poolA
otherSynced, otherPool = syncedB, poolB
case hostB:
ownerSynced, ownerPool = syncedB, poolB
otherSynced, otherPool = syncedA, poolA
default:
t.Fatalf("unexpected ring owner %s (expected %s or %s)", ownerHost, hostA, hostB)
}
// Apply mutation on the ring-designated owner.
if _, err := ownerSynced.Apply(cartID, addItem); err != nil {
t.Fatalf("owner %s Apply addItem error: %v", ownerHost, err)
}
// Validate owner pool has the grain and the other does not.
if _, ok := ownerPool.GetGrains()[cartID]; !ok {
t.Fatalf("expected owner %s to have local grain", ownerHost)
}
if _, ok := otherPool.GetGrains()[cartID]; ok {
t.Fatalf("non-owner unexpectedly holds local grain")
}
// Prepare change mutation to be applied from the non-owner (should route remotely).
change := &messages.ChangeQuantity{
Id: 1, // line id after first AddItem
Quantity: 2,
}
// Apply remotely via the non-owner.
if _, err := otherSynced.Apply(cartID, change); err != nil {
t.Fatalf("non-owner remote Apply changeQuantity error: %v", err)
}
// Remote re-mutation already performed via otherSynced; removed duplicate block.
// NodeB local grain assertion:
// Only assert absence if nodeB is NOT the ring-designated owner. If nodeB is the owner,
// it is expected to have a local grain (previous generic ownership assertions already ran).
if ownerHost != hostB {
if _, local := poolB.GetGrains()[cartID]; local {
t.Fatalf("nodeB unexpectedly created local grain (ownership duplication)")
}
}
// Fetch state from nodeB to ensure we see updated quantity (2).
grainStateB, err := syncedB.Get(cartID)
if err != nil {
t.Fatalf("nodeB Get error: %v", err)
}
if len(grainStateB.Items) != 1 || grainStateB.Items[0].Quantity != 2 {
t.Fatalf("nodeB observed inconsistent state: items=%d qty=%d (expected 1 / 2)",
len(grainStateB.Items),
func() int {
if len(grainStateB.Items) == 0 {
return -1
}
return grainStateB.Items[0].Quantity
}(),
)
}
// Cross-check from nodeA (authoritative) to ensure state matches.
grainStateA, err := syncedA.Get(cartID)
if err != nil {
t.Fatalf("nodeA Get error: %v", err)
}
if grainStateA.Items[0].Quantity != 2 {
t.Fatalf("nodeA authoritative state mismatch: expected qty=2 got %d", grainStateA.Items[0].Quantity)
}
}

304
multi_node_three_test.go Normal file
View File

@@ -0,0 +1,304 @@
package main
import (
"context"
"fmt"
"testing"
"time"
messages "git.tornberg.me/go-cart-actor/proto"
"google.golang.org/grpc"
)
// TestThreeNodeMajorityOwnership validates ring-determined ownership and routing
// in a 3-node cluster (A,B,C) using the consistent hashing ring (no quorum RPC).
// The previous ConfirmOwner / quorum semantics have been removed; ownership is
// deterministic and derived from the ring.
//
// It validates:
// 1. The ring selects exactly one primary owner for a new cart.
// 2. Other nodes (B,C) do NOT create local grains for the cart.
// 3. Remote proxies are installed lazily so remote mutations can route.
// 4. A remote mutation from one non-owner updates state visible on another.
// 5. Authoritative state on the owner matches remote observations.
// 6. (Future) This scaffolds replication tests when RF>1 is enabled.
//
// (Legacy comments about ConfirmOwner acceptance thresholds have been removed.)
// (Function name retained for historical continuity.)
func TestThreeNodeMajorityOwnership(t *testing.T) {
const (
addrA = "127.0.0.1:18181"
addrB = "127.0.0.1:18182"
addrC = "127.0.0.1:18183"
hostA = "nodeA3"
hostB = "nodeB3"
hostC = "nodeC3"
)
// Local grain pools
poolA := NewGrainLocalPool(1024, time.Minute, spawn)
poolB := NewGrainLocalPool(1024, time.Minute, spawn)
poolC := NewGrainLocalPool(1024, time.Minute, spawn)
// Synced pools (no discovery)
syncedA, err := NewSyncedPool(poolA, hostA, nil)
if err != nil {
t.Fatalf("nodeA NewSyncedPool error: %v", err)
}
syncedB, err := NewSyncedPool(poolB, hostB, nil)
if err != nil {
t.Fatalf("nodeB NewSyncedPool error: %v", err)
}
syncedC, err := NewSyncedPool(poolC, hostC, nil)
if err != nil {
t.Fatalf("nodeC NewSyncedPool error: %v", err)
}
// Start gRPC servers
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
grpcSrvB, err := StartGRPCServer(addrB, poolB, syncedB)
if err != nil {
t.Fatalf("StartGRPCServer B error: %v", err)
}
defer grpcSrvB.GracefulStop()
grpcSrvC, err := StartGRPCServer(addrC, poolC, syncedC)
if err != nil {
t.Fatalf("StartGRPCServer C error: %v", err)
}
defer grpcSrvC.GracefulStop()
// Helper for manual cross-link (since AddRemote assumes fixed port)
link := func(src *SyncedPool, remoteHost, remoteAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
conn, dialErr := grpc.DialContext(ctx, remoteAddr, grpc.WithInsecure(), grpc.WithBlock())
if dialErr != nil {
t.Fatalf("dial %s (%s) failed: %v", remoteHost, remoteAddr, dialErr)
}
cartClient := messages.NewCartActorClient(conn)
controlClient := messages.NewControlPlaneClient(conn)
src.mu.Lock()
src.remoteHosts[remoteHost] = &RemoteHostGRPC{
Host: remoteHost,
Conn: conn,
CartClient: cartClient,
ControlClient: controlClient,
}
src.mu.Unlock()
}
// Full mesh (each node knows all others)
link(syncedA, hostB, addrB)
link(syncedA, hostC, addrC)
link(syncedB, hostA, addrA)
link(syncedB, hostC, addrC)
link(syncedC, hostA, addrA)
link(syncedC, hostB, addrB)
// Rebuild rings after manual linking so ownership resolution is immediate.
syncedA.ForceRingRefresh()
syncedB.ForceRingRefresh()
syncedC.ForceRingRefresh()
// Allow brief stabilization
time.Sleep(200 * time.Millisecond)
// Deterministic-ish cart id
cartID := ToCartId(fmt.Sprintf("cart3-%d", time.Now().UnixNano()))
addItem := &messages.AddItem{
ItemId: 10,
Quantity: 1,
Price: 5000,
OrgPrice: 5000,
Sku: "sku-3node",
Name: "Three Node Test",
Image: "/t.png",
Stock: 10,
Tax: 2500,
Country: "se",
}
// Determine ring-designated owner (may be any of the three hosts)
ownerPre := syncedA.DebugOwnerHost(cartID)
if ownerPre != hostA && ownerPre != hostB && ownerPre != hostC {
t.Fatalf("ring returned unexpected owner %s (not in set {%s,%s,%s})", ownerPre, hostA, hostB, hostC)
}
var ownerSynced *SyncedPool
var ownerPool *GrainLocalPool
switch ownerPre {
case hostA:
ownerSynced, ownerPool = syncedA, poolA
case hostB:
ownerSynced, ownerPool = syncedB, poolB
case hostC:
ownerSynced, ownerPool = syncedC, poolC
}
// Pick two distinct non-owner nodes for remote mutation assertions
var remote1Synced, remote2Synced *SyncedPool
switch ownerPre {
case hostA:
remote1Synced, remote2Synced = syncedB, syncedC
case hostB:
remote1Synced, remote2Synced = syncedA, syncedC
case hostC:
remote1Synced, remote2Synced = syncedA, syncedB
}
// Apply on the ring-designated owner
if _, err := ownerSynced.Apply(cartID, addItem); err != nil {
t.Fatalf("owner %s Apply addItem error: %v", ownerPre, err)
}
// Small wait for remote proxy spawn (ring ownership already deterministic)
time.Sleep(150 * time.Millisecond)
// Assert only nodeA has local grain
localCount := 0
if _, ok := poolA.GetGrains()[cartID]; ok {
localCount++
}
if _, ok := poolB.GetGrains()[cartID]; ok {
localCount++
}
if _, ok := poolC.GetGrains()[cartID]; ok {
localCount++
}
if localCount != 1 {
t.Fatalf("expected exactly 1 local grain, got %d", localCount)
}
if _, ok := ownerPool.GetGrains()[cartID]; !ok {
t.Fatalf("expected owner %s to hold local grain", ownerPre)
}
// Remote proxies may not pre-exist; first remote mutation will trigger SpawnRemoteGrain lazily.
// Issue remote mutation from one non-owner -> ChangeQuantity (increase)
change := &messages.ChangeQuantity{
Id: 1,
Quantity: 3,
}
if _, err := remote1Synced.Apply(cartID, change); err != nil {
t.Fatalf("remote mutation (remote1) changeQuantity error: %v", err)
}
// Validate updated state visible via nodeC
stateC, err := remote2Synced.Get(cartID)
if err != nil {
t.Fatalf("nodeC Get error: %v", err)
}
if len(stateC.Items) != 1 || stateC.Items[0].Quantity != 3 {
t.Fatalf("nodeC observed state mismatch: items=%d qty=%d (expected 1 / 3)",
len(stateC.Items),
func() int {
if len(stateC.Items) == 0 {
return -1
}
return stateC.Items[0].Quantity
}(),
)
}
// Cross-check authoritative nodeA
stateA, err := syncedA.Get(cartID)
if err != nil {
t.Fatalf("nodeA Get error: %v", err)
}
if stateA.Items[0].Quantity != 3 {
t.Fatalf("nodeA authoritative state mismatch: expected qty=3 got %d", stateA.Items[0].Quantity)
}
}
// TestThreeNodeDiscoveryMajorityOwnership (placeholder)
// This test is a scaffold demonstrating how a MockDiscovery would be wired
// once AddRemote supports host:port (currently hard-coded to :1337).
// It is skipped to avoid flakiness / false negatives until the production
// AddRemote logic is enhanced to parse dynamic ports or the test harness
// provides consistent port mapping.
func TestThreeNodeDiscoveryMajorityOwnership(t *testing.T) {
t.Skip("Pending enhancement: AddRemote needs host:port support to fully exercise discovery-based multi-node linking")
// Example skeleton (non-functional with current AddRemote implementation):
//
// md := NewMockDiscovery([]string{"nodeB3", "nodeC3"})
// poolA := NewGrainLocalPool(1024, time.Minute, spawn)
// syncedA, err := NewSyncedPool(poolA, "nodeA3", md)
// if err != nil {
// t.Fatalf("NewSyncedPool with mock discovery error: %v", err)
// }
// // Start server for nodeA (would also need servers for nodeB3/nodeC3 on expected ports)
// // grpcSrvA, _ := StartGRPCServer(":1337", poolA, syncedA)
// // defer grpcSrvA.GracefulStop()
//
// // Dynamically add a host via discovery
// // md.AddHost("nodeB3")
// // time.Sleep(100 * time.Millisecond) // allow AddRemote attempt
//
// // Assertions would verify syncedA.remoteHosts contains "nodeB3"
}
// TestHostRemovalAndErrorWithMockDiscovery validates behavior when:
// 1. Discovery reports a host that cannot be dialed (AddRemote error path)
// 2. That host is then removed (Deleted event) without leaving residual state
// 3. A second failing host is added afterward (ensuring watcher still processes events)
//
// NOTE: Because AddRemote currently hard-codes :1337 and we are NOT starting a
// real server for the bogus hosts, the dial will fail and the remote host should
// never appear in remoteHosts. This intentionally exercises the error logging
// path: "AddRemote: dial ... failed".
func TestHostRemovalAndErrorWithMockDiscovery(t *testing.T) {
// Start a real node A (acts as the observing node)
const addrA = "127.0.0.1:18281"
hostA := "nodeA-md"
poolA := NewGrainLocalPool(128, time.Minute, spawn)
// Mock discovery starts with one bogus host that will fail to connect.
md := NewMockDiscovery([]string{"bogus-host-1"})
syncedA, err := NewSyncedPool(poolA, hostA, md)
if err != nil {
t.Fatalf("NewSyncedPool error: %v", err)
}
grpcSrvA, err := StartGRPCServer(addrA, poolA, syncedA)
if err != nil {
t.Fatalf("StartGRPCServer A error: %v", err)
}
defer grpcSrvA.GracefulStop()
// Kick off watch processing by starting Watch() (NewSyncedPool does this internally
// when discovery is non-nil, but we ensure events channel is active).
// The initial bogus host should trigger AddRemote -> dial failure.
time.Sleep(300 * time.Millisecond)
syncedA.mu.RLock()
if len(syncedA.remoteHosts) != 0 {
syncedA.mu.RUnlock()
t.Fatalf("expected 0 remoteHosts after failing dial, got %d", len(syncedA.remoteHosts))
}
syncedA.mu.RUnlock()
// Remove the bogus host (should not panic; no entry to clean up).
md.RemoveHost("bogus-host-1")
time.Sleep(100 * time.Millisecond)
// Add another bogus host to ensure watcher still alive.
md.AddHost("bogus-host-2")
time.Sleep(300 * time.Millisecond)
syncedA.mu.RLock()
if len(syncedA.remoteHosts) != 0 {
syncedA.mu.RUnlock()
t.Fatalf("expected 0 remoteHosts after second failing dial, got %d", len(syncedA.remoteHosts))
}
syncedA.mu.RUnlock()
// Clean up discovery
md.Close()
}

View File

@@ -216,78 +216,115 @@ func (s *PoolServer) HandleConfirmation(w http.ResponseWriter, r *http.Request,
return json.NewEncoder(w).Encode(order) return json.NewEncoder(w).Encode(order)
} }
func (s *PoolServer) HandleCheckout(w http.ResponseWriter, r *http.Request, id CartId) error { func (s *PoolServer) CreateOrUpdateCheckout(host string, id CartId) (*CheckoutOrder, error) {
// Build checkout meta (URLs derived from host)
meta := &CheckoutMeta{ meta := &CheckoutMeta{
Terms: fmt.Sprintf("https://%s/terms", r.Host), Terms: fmt.Sprintf("https://%s/terms", host),
Checkout: fmt.Sprintf("https://%s/checkout?order_id={checkout.order.id}", r.Host), Checkout: fmt.Sprintf("https://%s/checkout?order_id={checkout.order.id}", host),
Confirmation: fmt.Sprintf("https://%s/confirmation/{checkout.order.id}", r.Host), Confirmation: fmt.Sprintf("https://%s/confirmation/{checkout.order.id}", host),
Validation: fmt.Sprintf("https://%s/validate", r.Host), Validation: fmt.Sprintf("https://%s/validate", host),
Push: fmt.Sprintf("https://%s/push?order_id={checkout.order.id}", r.Host), Push: fmt.Sprintf("https://%s/push?order_id={checkout.order.id}", host),
Country: getCountryFromHost(r.Host), Country: getCountryFromHost(host),
} }
// Get current grain state (may be local or remote) // Get current grain state (may be local or remote)
grain, err := s.pool.Get(id) grain, err := s.pool.Get(id)
if err != nil { if err != nil {
return err return nil, err
} }
// Build pure checkout payload // Build pure checkout payload
payload, _, err := BuildCheckoutOrderPayload(grain, meta) payload, _, err := BuildCheckoutOrderPayload(grain, meta)
if err != nil { if err != nil {
return err return nil, err
} }
// Call Klarna (create or update)
var klarnaOrder *CheckoutOrder
if grain.OrderReference != "" { if grain.OrderReference != "" {
klarnaOrder, err = KlarnaInstance.UpdateOrder(grain.OrderReference, bytes.NewReader(payload)) return KlarnaInstance.UpdateOrder(grain.OrderReference, bytes.NewReader(payload))
} else { } else {
klarnaOrder, err = KlarnaInstance.CreateOrder(bytes.NewReader(payload)) return KlarnaInstance.CreateOrder(bytes.NewReader(payload))
} }
}
func (s *PoolServer) ApplyCheckoutStarted(klarnaOrder *CheckoutOrder, id CartId) (*CartGrain, error) {
// Persist initialization state via mutation (best-effort)
return s.pool.Apply(id, &messages.InitializeCheckout{
OrderId: klarnaOrder.ID,
Status: klarnaOrder.Status,
PaymentInProgress: true,
})
}
func (s *PoolServer) HandleCheckout(w http.ResponseWriter, r *http.Request, id CartId) error {
klarnaOrder, err := s.CreateOrUpdateCheckout(r.Host, id)
if err != nil { if err != nil {
return err return err
} }
// Persist initialization state via mutation (best-effort) s.ApplyCheckoutStarted(klarnaOrder, id)
if _, applyErr := s.pool.Apply(id, &messages.InitializeCheckout{
OrderId: klarnaOrder.ID,
Status: klarnaOrder.Status,
PaymentInProgress: true,
}); applyErr != nil {
log.Printf("InitializeCheckout apply error: %v", applyErr)
}
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(klarnaOrder) return json.NewEncoder(w).Encode(klarnaOrder)
} }
func NewCartId() CartId { func NewCartId() CartId {
// Deprecated: legacy random/time based cart id generator.
// Retained for compatibility; new code should prefer canonical CartID path.
cid, err := NewCartID()
if err != nil {
// Fallback to legacy method only if crypto/rand fails
id := time.Now().UnixNano() + rand.Int63() id := time.Now().UnixNano() + rand.Int63()
return ToCartId(fmt.Sprintf("%d", id)) return ToCartId(fmt.Sprintf("%d", id))
} }
return CartIDToLegacy(cid)
}
func CookieCartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error { func CookieCartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error { return func(w http.ResponseWriter, r *http.Request) error {
var cartId CartId // Extract / normalize cookie (preserve legacy textual IDs without rewriting).
cartIdCookie := r.CookiesNamed("cartid") var legacy CartId
if cartIdCookie == nil || len(cartIdCookie) == 0 { cookies := r.CookiesNamed("cartid")
cartId = NewCartId() if len(cookies) == 0 {
// No cookie -> generate new canonical base62 id.
cid, generated, _, err := CanonicalizeOrLegacy("")
if err != nil {
return fmt.Errorf("failed to generate cart id: %w", err)
}
legacy = CartIDToLegacy(cid)
if generated {
http.SetCookie(w, &http.Cookie{ http.SetCookie(w, &http.Cookie{
Name: "cartid", Name: "cartid",
Value: cartId.String(), Value: cid.String(),
Secure: r.TLS != nil, Secure: r.TLS != nil,
HttpOnly: true, HttpOnly: true,
Path: "/", Path: "/",
Expires: time.Now().AddDate(0, 0, 14), Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode, SameSite: http.SameSiteLaxMode,
}) })
} else { w.Header().Set("Set-Cart-Id", cid.String())
cartId = ToCartId(cartIdCookie[0].Value)
} }
return fn(w, r, cartId) } else {
raw := cookies[0].Value
cid, generated, wasBase62, err := CanonicalizeOrLegacy(raw)
if err != nil {
return fmt.Errorf("failed to canonicalize cart id: %w", err)
}
legacy = CartIDToLegacy(cid)
// Only set a new cookie if we actually generated a brand-new ID (empty input).
// For legacy (non-base62) ids we preserve the original text and do not overwrite.
if generated && wasBase62 {
http.SetCookie(w, &http.Cookie{
Name: "cartid",
Value: cid.String(),
Secure: r.TLS != nil,
HttpOnly: true,
Path: "/",
Expires: time.Now().AddDate(0, 0, 14),
SameSite: http.SameSiteLaxMode,
})
w.Header().Set("Set-Cart-Id", cid.String())
}
}
return fn(w, r, legacy)
} }
} }
@@ -308,8 +345,18 @@ func (s *PoolServer) RemoveCartCookie(w http.ResponseWriter, r *http.Request, ca
func CartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error { func CartIdHandler(fn func(w http.ResponseWriter, r *http.Request, cartId CartId) error) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error { return func(w http.ResponseWriter, r *http.Request) error {
cartId := ToCartId(r.PathValue("id")) raw := r.PathValue("id")
return fn(w, r, cartId) cid, generated, wasBase62, err := CanonicalizeOrLegacy(raw)
if err != nil {
return fmt.Errorf("invalid cart id: %w", err)
}
legacy := CartIDToLegacy(cid)
// Only emit Set-Cart-Id header if we produced a brand-new canonical id
// AND it is base62 (avoid rewriting legacy textual identifiers).
if generated && wasBase62 {
w.Header().Set("Set-Cart-Id", cid.String())
}
return fn(w, r, legacy)
} }
} }

View File

@@ -855,20 +855,19 @@ func (x *OrderCompletedRequest) GetPayload() *OrderCreated {
return nil return nil
} }
// ----------------------------------------------------------------------------- // Excerpt: updated messages for camelCase JSON output
// Cart state snapshot (unchanged from v1 except envelope removal context)
// -----------------------------------------------------------------------------
type CartState struct { type CartState struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
CartId string `protobuf:"bytes,1,opt,name=cart_id,json=cartId,proto3" json:"cart_id,omitempty"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // was cart_id
Items []*CartItemState `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` Items []*CartItemState `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
TotalPrice int64 `protobuf:"varint,3,opt,name=total_price,json=totalPrice,proto3" json:"total_price,omitempty"` TotalPrice int64 `protobuf:"varint,3,opt,name=totalPrice,proto3" json:"totalPrice,omitempty"` // was total_price
TotalTax int64 `protobuf:"varint,4,opt,name=total_tax,json=totalTax,proto3" json:"total_tax,omitempty"` TotalTax int64 `protobuf:"varint,4,opt,name=totalTax,proto3" json:"totalTax,omitempty"` // was total_tax
TotalDiscount int64 `protobuf:"varint,5,opt,name=total_discount,json=totalDiscount,proto3" json:"total_discount,omitempty"` TotalDiscount int64 `protobuf:"varint,5,opt,name=totalDiscount,proto3" json:"totalDiscount,omitempty"` // was total_discount
Deliveries []*DeliveryState `protobuf:"bytes,6,rep,name=deliveries,proto3" json:"deliveries,omitempty"` Deliveries []*DeliveryState `protobuf:"bytes,6,rep,name=deliveries,proto3" json:"deliveries,omitempty"`
PaymentInProgress bool `protobuf:"varint,7,opt,name=payment_in_progress,json=paymentInProgress,proto3" json:"payment_in_progress,omitempty"` PaymentInProgress bool `protobuf:"varint,7,opt,name=paymentInProgress,proto3" json:"paymentInProgress,omitempty"` // was payment_in_progress
OrderReference string `protobuf:"bytes,8,opt,name=order_reference,json=orderReference,proto3" json:"order_reference,omitempty"` OrderReference string `protobuf:"bytes,8,opt,name=orderReference,proto3" json:"orderReference,omitempty"` // was order_reference
PaymentStatus string `protobuf:"bytes,9,opt,name=payment_status,json=paymentStatus,proto3" json:"payment_status,omitempty"` PaymentStatus string `protobuf:"bytes,9,opt,name=paymentStatus,proto3" json:"paymentStatus,omitempty"` // was payment_status
Processing bool `protobuf:"varint,10,opt,name=processing,proto3" json:"processing,omitempty"` // NEW (mirrors legacy CartGrain.processing)
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -903,9 +902,9 @@ func (*CartState) Descriptor() ([]byte, []int) {
return file_cart_actor_proto_rawDescGZIP(), []int{13} return file_cart_actor_proto_rawDescGZIP(), []int{13}
} }
func (x *CartState) GetCartId() string { func (x *CartState) GetId() string {
if x != nil { if x != nil {
return x.CartId return x.Id
} }
return "" return ""
} }
@@ -966,19 +965,26 @@ func (x *CartState) GetPaymentStatus() string {
return "" return ""
} }
func (x *CartState) GetProcessing() bool {
if x != nil {
return x.Processing
}
return false
}
type CartItemState struct { type CartItemState struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
SourceItemId int64 `protobuf:"varint,2,opt,name=source_item_id,json=sourceItemId,proto3" json:"source_item_id,omitempty"` ItemId int64 `protobuf:"varint,2,opt,name=itemId,proto3" json:"itemId,omitempty"` // was source_item_id
Sku string `protobuf:"bytes,3,opt,name=sku,proto3" json:"sku,omitempty"` Sku string `protobuf:"bytes,3,opt,name=sku,proto3" json:"sku,omitempty"`
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
UnitPrice int64 `protobuf:"varint,5,opt,name=unit_price,json=unitPrice,proto3" json:"unit_price,omitempty"` Price int64 `protobuf:"varint,5,opt,name=price,proto3" json:"price,omitempty"` // was unit_price
Quantity int32 `protobuf:"varint,6,opt,name=quantity,proto3" json:"quantity,omitempty"` Qty int32 `protobuf:"varint,6,opt,name=qty,proto3" json:"qty,omitempty"` // was quantity
TotalPrice int64 `protobuf:"varint,7,opt,name=total_price,json=totalPrice,proto3" json:"total_price,omitempty"` TotalPrice int64 `protobuf:"varint,7,opt,name=totalPrice,proto3" json:"totalPrice,omitempty"` // was total_price
TotalTax int64 `protobuf:"varint,8,opt,name=total_tax,json=totalTax,proto3" json:"total_tax,omitempty"` TotalTax int64 `protobuf:"varint,8,opt,name=totalTax,proto3" json:"totalTax,omitempty"` // was total_tax
OrgPrice int64 `protobuf:"varint,9,opt,name=org_price,json=orgPrice,proto3" json:"org_price,omitempty"` OrgPrice int64 `protobuf:"varint,9,opt,name=orgPrice,proto3" json:"orgPrice,omitempty"` // was org_price
TaxRate int32 `protobuf:"varint,10,opt,name=tax_rate,json=taxRate,proto3" json:"tax_rate,omitempty"` TaxRate int32 `protobuf:"varint,10,opt,name=taxRate,proto3" json:"taxRate,omitempty"` // was tax_rate
TotalDiscount int64 `protobuf:"varint,11,opt,name=total_discount,json=totalDiscount,proto3" json:"total_discount,omitempty"` TotalDiscount int64 `protobuf:"varint,11,opt,name=totalDiscount,proto3" json:"totalDiscount,omitempty"`
Brand string `protobuf:"bytes,12,opt,name=brand,proto3" json:"brand,omitempty"` Brand string `protobuf:"bytes,12,opt,name=brand,proto3" json:"brand,omitempty"`
Category string `protobuf:"bytes,13,opt,name=category,proto3" json:"category,omitempty"` Category string `protobuf:"bytes,13,opt,name=category,proto3" json:"category,omitempty"`
Category2 string `protobuf:"bytes,14,opt,name=category2,proto3" json:"category2,omitempty"` Category2 string `protobuf:"bytes,14,opt,name=category2,proto3" json:"category2,omitempty"`
@@ -986,12 +992,12 @@ type CartItemState struct {
Category4 string `protobuf:"bytes,16,opt,name=category4,proto3" json:"category4,omitempty"` Category4 string `protobuf:"bytes,16,opt,name=category4,proto3" json:"category4,omitempty"`
Category5 string `protobuf:"bytes,17,opt,name=category5,proto3" json:"category5,omitempty"` Category5 string `protobuf:"bytes,17,opt,name=category5,proto3" json:"category5,omitempty"`
Image string `protobuf:"bytes,18,opt,name=image,proto3" json:"image,omitempty"` Image string `protobuf:"bytes,18,opt,name=image,proto3" json:"image,omitempty"`
ArticleType string `protobuf:"bytes,19,opt,name=article_type,json=articleType,proto3" json:"article_type,omitempty"` Type string `protobuf:"bytes,19,opt,name=type,proto3" json:"type,omitempty"` // was article_type
SellerId string `protobuf:"bytes,20,opt,name=seller_id,json=sellerId,proto3" json:"seller_id,omitempty"` SellerId string `protobuf:"bytes,20,opt,name=sellerId,proto3" json:"sellerId,omitempty"` // was seller_id
SellerName string `protobuf:"bytes,21,opt,name=seller_name,json=sellerName,proto3" json:"seller_name,omitempty"` SellerName string `protobuf:"bytes,21,opt,name=sellerName,proto3" json:"sellerName,omitempty"` // was seller_name
Disclaimer string `protobuf:"bytes,22,opt,name=disclaimer,proto3" json:"disclaimer,omitempty"` Disclaimer string `protobuf:"bytes,22,opt,name=disclaimer,proto3" json:"disclaimer,omitempty"`
Outlet string `protobuf:"bytes,23,opt,name=outlet,proto3" json:"outlet,omitempty"` Outlet string `protobuf:"bytes,23,opt,name=outlet,proto3" json:"outlet,omitempty"`
StoreId string `protobuf:"bytes,24,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` StoreId string `protobuf:"bytes,24,opt,name=storeId,proto3" json:"storeId,omitempty"` // was store_id
Stock int32 `protobuf:"varint,25,opt,name=stock,proto3" json:"stock,omitempty"` Stock int32 `protobuf:"varint,25,opt,name=stock,proto3" json:"stock,omitempty"`
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@@ -1034,9 +1040,9 @@ func (x *CartItemState) GetId() int64 {
return 0 return 0
} }
func (x *CartItemState) GetSourceItemId() int64 { func (x *CartItemState) GetItemId() int64 {
if x != nil { if x != nil {
return x.SourceItemId return x.ItemId
} }
return 0 return 0
} }
@@ -1055,16 +1061,16 @@ func (x *CartItemState) GetName() string {
return "" return ""
} }
func (x *CartItemState) GetUnitPrice() int64 { func (x *CartItemState) GetPrice() int64 {
if x != nil { if x != nil {
return x.UnitPrice return x.Price
} }
return 0 return 0
} }
func (x *CartItemState) GetQuantity() int32 { func (x *CartItemState) GetQty() int32 {
if x != nil { if x != nil {
return x.Quantity return x.Qty
} }
return 0 return 0
} }
@@ -1153,9 +1159,9 @@ func (x *CartItemState) GetImage() string {
return "" return ""
} }
func (x *CartItemState) GetArticleType() string { func (x *CartItemState) GetType() string {
if x != nil { if x != nil {
return x.ArticleType return x.Type
} }
return "" return ""
} }
@@ -1207,8 +1213,8 @@ type DeliveryState struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"`
Price int64 `protobuf:"varint,3,opt,name=price,proto3" json:"price,omitempty"` Price int64 `protobuf:"varint,3,opt,name=price,proto3" json:"price,omitempty"`
ItemIds []int64 `protobuf:"varint,4,rep,packed,name=item_ids,json=itemIds,proto3" json:"item_ids,omitempty"` Items []int64 `protobuf:"varint,4,rep,packed,name=items,proto3" json:"items,omitempty"` // was item_ids
PickupPoint *PickupPoint `protobuf:"bytes,5,opt,name=pickup_point,json=pickupPoint,proto3" json:"pickup_point,omitempty"` // Defined in messages.proto PickupPoint *PickupPoint `protobuf:"bytes,5,opt,name=pickupPoint,proto3" json:"pickupPoint,omitempty"` // was pickup_point
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -1264,9 +1270,9 @@ func (x *DeliveryState) GetPrice() int64 {
return 0 return 0
} }
func (x *DeliveryState) GetItemIds() []int64 { func (x *DeliveryState) GetItems() []int64 {
if x != nil { if x != nil {
return x.ItemIds return x.Items
} }
return nil return nil
} }
@@ -1348,58 +1354,64 @@ const file_cart_actor_proto_rawDesc = "" +
"\acart_id\x18\x01 \x01(\tR\x06cartId\x12)\n" + "\acart_id\x18\x01 \x01(\tR\x06cartId\x12)\n" +
"\x10client_timestamp\x18\x02 \x01(\x03R\x0fclientTimestamp\x120\n" + "\x10client_timestamp\x18\x02 \x01(\x03R\x0fclientTimestamp\x120\n" +
"\apayload\x18\n" + "\apayload\x18\n" +
" \x01(\v2\x16.messages.OrderCreatedR\apayload\"\xf1\x02\n" + " \x01(\v2\x16.messages.OrderCreatedR\apayload\"\x81\x03\n" +
"\tCartState\x12\x17\n" + "\tCartState\x12\x0e\n" +
"\acart_id\x18\x01 \x01(\tR\x06cartId\x12-\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12-\n" +
"\x05items\x18\x02 \x03(\v2\x17.messages.CartItemStateR\x05items\x12\x1f\n" + "\x05items\x18\x02 \x03(\v2\x17.messages.CartItemStateR\x05items\x12\x1e\n" +
"\vtotal_price\x18\x03 \x01(\x03R\n" + "\n" +
"totalPrice\x12\x1b\n" + "totalPrice\x18\x03 \x01(\x03R\n" +
"\ttotal_tax\x18\x04 \x01(\x03R\btotalTax\x12%\n" + "totalPrice\x12\x1a\n" +
"\x0etotal_discount\x18\x05 \x01(\x03R\rtotalDiscount\x127\n" + "\btotalTax\x18\x04 \x01(\x03R\btotalTax\x12$\n" +
"\rtotalDiscount\x18\x05 \x01(\x03R\rtotalDiscount\x127\n" +
"\n" + "\n" +
"deliveries\x18\x06 \x03(\v2\x17.messages.DeliveryStateR\n" + "deliveries\x18\x06 \x03(\v2\x17.messages.DeliveryStateR\n" +
"deliveries\x12.\n" + "deliveries\x12,\n" +
"\x13payment_in_progress\x18\a \x01(\bR\x11paymentInProgress\x12'\n" + "\x11paymentInProgress\x18\a \x01(\bR\x11paymentInProgress\x12&\n" +
"\x0forder_reference\x18\b \x01(\tR\x0eorderReference\x12%\n" + "\x0eorderReference\x18\b \x01(\tR\x0eorderReference\x12$\n" +
"\x0epayment_status\x18\t \x01(\tR\rpaymentStatus\"\xcd\x05\n" + "\rpaymentStatus\x18\t \x01(\tR\rpaymentStatus\x12\x1e\n" +
"\rCartItemState\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\x12$\n" +
"\x0esource_item_id\x18\x02 \x01(\x03R\fsourceItemId\x12\x10\n" +
"\x03sku\x18\x03 \x01(\tR\x03sku\x12\x12\n" +
"\x04name\x18\x04 \x01(\tR\x04name\x12\x1d\n" +
"\n" + "\n" +
"unit_price\x18\x05 \x01(\x03R\tunitPrice\x12\x1a\n" + "processing\x18\n" +
"\bquantity\x18\x06 \x01(\x05R\bquantity\x12\x1f\n" + " \x01(\bR\n" +
"\vtotal_price\x18\a \x01(\x03R\n" + "processing\"\x95\x05\n" +
"totalPrice\x12\x1b\n" + "\rCartItemState\x12\x0e\n" +
"\ttotal_tax\x18\b \x01(\x03R\btotalTax\x12\x1b\n" + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x16\n" +
"\torg_price\x18\t \x01(\x03R\borgPrice\x12\x19\n" + "\x06itemId\x18\x02 \x01(\x03R\x06itemId\x12\x10\n" +
"\btax_rate\x18\n" + "\x03sku\x18\x03 \x01(\tR\x03sku\x12\x12\n" +
" \x01(\x05R\ataxRate\x12%\n" + "\x04name\x18\x04 \x01(\tR\x04name\x12\x14\n" +
"\x0etotal_discount\x18\v \x01(\x03R\rtotalDiscount\x12\x14\n" + "\x05price\x18\x05 \x01(\x03R\x05price\x12\x10\n" +
"\x03qty\x18\x06 \x01(\x05R\x03qty\x12\x1e\n" +
"\n" +
"totalPrice\x18\a \x01(\x03R\n" +
"totalPrice\x12\x1a\n" +
"\btotalTax\x18\b \x01(\x03R\btotalTax\x12\x1a\n" +
"\borgPrice\x18\t \x01(\x03R\borgPrice\x12\x18\n" +
"\ataxRate\x18\n" +
" \x01(\x05R\ataxRate\x12$\n" +
"\rtotalDiscount\x18\v \x01(\x03R\rtotalDiscount\x12\x14\n" +
"\x05brand\x18\f \x01(\tR\x05brand\x12\x1a\n" + "\x05brand\x18\f \x01(\tR\x05brand\x12\x1a\n" +
"\bcategory\x18\r \x01(\tR\bcategory\x12\x1c\n" + "\bcategory\x18\r \x01(\tR\bcategory\x12\x1c\n" +
"\tcategory2\x18\x0e \x01(\tR\tcategory2\x12\x1c\n" + "\tcategory2\x18\x0e \x01(\tR\tcategory2\x12\x1c\n" +
"\tcategory3\x18\x0f \x01(\tR\tcategory3\x12\x1c\n" + "\tcategory3\x18\x0f \x01(\tR\tcategory3\x12\x1c\n" +
"\tcategory4\x18\x10 \x01(\tR\tcategory4\x12\x1c\n" + "\tcategory4\x18\x10 \x01(\tR\tcategory4\x12\x1c\n" +
"\tcategory5\x18\x11 \x01(\tR\tcategory5\x12\x14\n" + "\tcategory5\x18\x11 \x01(\tR\tcategory5\x12\x14\n" +
"\x05image\x18\x12 \x01(\tR\x05image\x12!\n" + "\x05image\x18\x12 \x01(\tR\x05image\x12\x12\n" +
"\farticle_type\x18\x13 \x01(\tR\varticleType\x12\x1b\n" + "\x04type\x18\x13 \x01(\tR\x04type\x12\x1a\n" +
"\tseller_id\x18\x14 \x01(\tR\bsellerId\x12\x1f\n" + "\bsellerId\x18\x14 \x01(\tR\bsellerId\x12\x1e\n" +
"\vseller_name\x18\x15 \x01(\tR\n" + "\n" +
"sellerName\x18\x15 \x01(\tR\n" +
"sellerName\x12\x1e\n" + "sellerName\x12\x1e\n" +
"\n" + "\n" +
"disclaimer\x18\x16 \x01(\tR\n" + "disclaimer\x18\x16 \x01(\tR\n" +
"disclaimer\x12\x16\n" + "disclaimer\x12\x16\n" +
"\x06outlet\x18\x17 \x01(\tR\x06outlet\x12\x19\n" + "\x06outlet\x18\x17 \x01(\tR\x06outlet\x12\x18\n" +
"\bstore_id\x18\x18 \x01(\tR\astoreId\x12\x14\n" + "\astoreId\x18\x18 \x01(\tR\astoreId\x12\x14\n" +
"\x05stock\x18\x19 \x01(\x05R\x05stock\"\xa6\x01\n" + "\x05stock\x18\x19 \x01(\x05R\x05stock\"\xa0\x01\n" +
"\rDeliveryState\x12\x0e\n" + "\rDeliveryState\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\x12\x1a\n" + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1a\n" +
"\bprovider\x18\x02 \x01(\tR\bprovider\x12\x14\n" + "\bprovider\x18\x02 \x01(\tR\bprovider\x12\x14\n" +
"\x05price\x18\x03 \x01(\x03R\x05price\x12\x19\n" + "\x05price\x18\x03 \x01(\x03R\x05price\x12\x14\n" +
"\bitem_ids\x18\x04 \x03(\x03R\aitemIds\x128\n" + "\x05items\x18\x04 \x03(\x03R\x05items\x127\n" +
"\fpickup_point\x18\x05 \x01(\v2\x15.messages.PickupPointR\vpickupPoint2\xed\x05\n" + "\vpickupPoint\x18\x05 \x01(\v2\x15.messages.PickupPointR\vpickupPoint2\xed\x05\n" +
"\tCartActor\x12F\n" + "\tCartActor\x12F\n" +
"\n" + "\n" +
"AddRequest\x12\x1b.messages.AddRequestRequest\x1a\x1b.messages.CartMutationReply\x12@\n" + "AddRequest\x12\x1b.messages.AddRequestRequest\x1a\x1b.messages.CartMutationReply\x12@\n" +
@@ -1471,7 +1483,7 @@ var file_cart_actor_proto_depIdxs = []int32{
25, // 11: messages.OrderCompletedRequest.payload:type_name -> messages.OrderCreated 25, // 11: messages.OrderCompletedRequest.payload:type_name -> messages.OrderCreated
14, // 12: messages.CartState.items:type_name -> messages.CartItemState 14, // 12: messages.CartState.items:type_name -> messages.CartItemState
15, // 13: messages.CartState.deliveries:type_name -> messages.DeliveryState 15, // 13: messages.CartState.deliveries:type_name -> messages.DeliveryState
26, // 14: messages.DeliveryState.pickup_point:type_name -> messages.PickupPoint 26, // 14: messages.DeliveryState.pickupPoint:type_name -> messages.PickupPoint
3, // 15: messages.CartActor.AddRequest:input_type -> messages.AddRequestRequest 3, // 15: messages.CartActor.AddRequest:input_type -> messages.AddRequestRequest
4, // 16: messages.CartActor.AddItem:input_type -> messages.AddItemRequest 4, // 16: messages.CartActor.AddItem:input_type -> messages.AddItemRequest
5, // 17: messages.CartActor.RemoveItem:input_type -> messages.RemoveItemRequest 5, // 17: messages.CartActor.RemoveItem:input_type -> messages.RemoveItemRequest

View File

@@ -109,33 +109,32 @@ message OrderCompletedRequest {
OrderCreated payload = 10; OrderCreated payload = 10;
} }
// ----------------------------------------------------------------------------- // Excerpt: updated messages for camelCase JSON output
// Cart state snapshot (unchanged from v1 except envelope removal context)
// -----------------------------------------------------------------------------
message CartState { message CartState {
string cart_id = 1; string id = 1; // was cart_id
repeated CartItemState items = 2; repeated CartItemState items = 2;
int64 total_price = 3; int64 totalPrice = 3; // was total_price
int64 total_tax = 4; int64 totalTax = 4; // was total_tax
int64 total_discount = 5; int64 totalDiscount = 5; // was total_discount
repeated DeliveryState deliveries = 6; repeated DeliveryState deliveries = 6;
bool payment_in_progress = 7; bool paymentInProgress = 7; // was payment_in_progress
string order_reference = 8; string orderReference = 8; // was order_reference
string payment_status = 9; string paymentStatus = 9; // was payment_status
bool processing = 10; // NEW (mirrors legacy CartGrain.processing)
} }
message CartItemState { message CartItemState {
int64 id = 1; int64 id = 1;
int64 source_item_id = 2; int64 itemId = 2; // was source_item_id
string sku = 3; string sku = 3;
string name = 4; string name = 4;
int64 unit_price = 5; int64 price = 5; // was unit_price
int32 quantity = 6; int32 qty = 6; // was quantity
int64 total_price = 7; int64 totalPrice = 7; // was total_price
int64 total_tax = 8; int64 totalTax = 8; // was total_tax
int64 org_price = 9; int64 orgPrice = 9; // was org_price
int32 tax_rate = 10; int32 taxRate = 10; // was tax_rate
int64 total_discount = 11; int64 totalDiscount = 11;
string brand = 12; string brand = 12;
string category = 13; string category = 13;
string category2 = 14; string category2 = 14;
@@ -143,12 +142,12 @@ message CartItemState {
string category4 = 16; string category4 = 16;
string category5 = 17; string category5 = 17;
string image = 18; string image = 18;
string article_type = 19; string type = 19; // was article_type
string seller_id = 20; string sellerId = 20; // was seller_id
string seller_name = 21; string sellerName = 21; // was seller_name
string disclaimer = 22; string disclaimer = 22;
string outlet = 23; string outlet = 23;
string store_id = 24; string storeId = 24; // was store_id
int32 stock = 25; int32 stock = 25;
} }
@@ -156,8 +155,8 @@ message DeliveryState {
int64 id = 1; int64 id = 1;
string provider = 2; string provider = 2;
int64 price = 3; int64 price = 3;
repeated int64 item_ids = 4; repeated int64 items = 4; // was item_ids
PickupPoint pickup_point = 5; // Defined in messages.proto PickupPoint pickupPoint = 5; // was pickup_point
} }
// (CheckoutRequest / CheckoutReply removed - checkout handled at HTTP layer) // (CheckoutRequest / CheckoutReply removed - checkout handled at HTTP layer)

View File

@@ -246,60 +246,7 @@ func (x *CartIdsReply) GetCartIds() []string {
return nil return nil
} }
// OwnerChangeRequest notifies peers that ownership of a cart moved (or is moving) to new_host. // OwnerChangeAck retained as response type for Closing RPC (ConfirmOwner removed).
type OwnerChangeRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
CartId string `protobuf:"bytes,1,opt,name=cart_id,json=cartId,proto3" json:"cart_id,omitempty"`
NewHost string `protobuf:"bytes,2,opt,name=new_host,json=newHost,proto3" json:"new_host,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OwnerChangeRequest) Reset() {
*x = OwnerChangeRequest{}
mi := &file_control_plane_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OwnerChangeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OwnerChangeRequest) ProtoMessage() {}
func (x *OwnerChangeRequest) ProtoReflect() protoreflect.Message {
mi := &file_control_plane_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OwnerChangeRequest.ProtoReflect.Descriptor instead.
func (*OwnerChangeRequest) Descriptor() ([]byte, []int) {
return file_control_plane_proto_rawDescGZIP(), []int{5}
}
func (x *OwnerChangeRequest) GetCartId() string {
if x != nil {
return x.CartId
}
return ""
}
func (x *OwnerChangeRequest) GetNewHost() string {
if x != nil {
return x.NewHost
}
return ""
}
// OwnerChangeAck indicates acceptance or rejection of an ownership change.
type OwnerChangeAck struct { type OwnerChangeAck struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Accepted bool `protobuf:"varint,1,opt,name=accepted,proto3" json:"accepted,omitempty"` Accepted bool `protobuf:"varint,1,opt,name=accepted,proto3" json:"accepted,omitempty"`
@@ -310,7 +257,7 @@ type OwnerChangeAck struct {
func (x *OwnerChangeAck) Reset() { func (x *OwnerChangeAck) Reset() {
*x = OwnerChangeAck{} *x = OwnerChangeAck{}
mi := &file_control_plane_proto_msgTypes[6] mi := &file_control_plane_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -322,7 +269,7 @@ func (x *OwnerChangeAck) String() string {
func (*OwnerChangeAck) ProtoMessage() {} func (*OwnerChangeAck) ProtoMessage() {}
func (x *OwnerChangeAck) ProtoReflect() protoreflect.Message { func (x *OwnerChangeAck) ProtoReflect() protoreflect.Message {
mi := &file_control_plane_proto_msgTypes[6] mi := &file_control_plane_proto_msgTypes[5]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -335,7 +282,7 @@ func (x *OwnerChangeAck) ProtoReflect() protoreflect.Message {
// Deprecated: Use OwnerChangeAck.ProtoReflect.Descriptor instead. // Deprecated: Use OwnerChangeAck.ProtoReflect.Descriptor instead.
func (*OwnerChangeAck) Descriptor() ([]byte, []int) { func (*OwnerChangeAck) Descriptor() ([]byte, []int) {
return file_control_plane_proto_rawDescGZIP(), []int{6} return file_control_plane_proto_rawDescGZIP(), []int{5}
} }
func (x *OwnerChangeAck) GetAccepted() bool { func (x *OwnerChangeAck) GetAccepted() bool {
@@ -362,7 +309,7 @@ type ClosingNotice struct {
func (x *ClosingNotice) Reset() { func (x *ClosingNotice) Reset() {
*x = ClosingNotice{} *x = ClosingNotice{}
mi := &file_control_plane_proto_msgTypes[7] mi := &file_control_plane_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -374,7 +321,7 @@ func (x *ClosingNotice) String() string {
func (*ClosingNotice) ProtoMessage() {} func (*ClosingNotice) ProtoMessage() {}
func (x *ClosingNotice) ProtoReflect() protoreflect.Message { func (x *ClosingNotice) ProtoReflect() protoreflect.Message {
mi := &file_control_plane_proto_msgTypes[7] mi := &file_control_plane_proto_msgTypes[6]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -387,7 +334,7 @@ func (x *ClosingNotice) ProtoReflect() protoreflect.Message {
// Deprecated: Use ClosingNotice.ProtoReflect.Descriptor instead. // Deprecated: Use ClosingNotice.ProtoReflect.Descriptor instead.
func (*ClosingNotice) Descriptor() ([]byte, []int) { func (*ClosingNotice) Descriptor() ([]byte, []int) {
return file_control_plane_proto_rawDescGZIP(), []int{7} return file_control_plane_proto_rawDescGZIP(), []int{6}
} }
func (x *ClosingNotice) GetHost() string { func (x *ClosingNotice) GetHost() string {
@@ -412,21 +359,17 @@ const file_control_plane_proto_rawDesc = "" +
"\x0eNegotiateReply\x12\x14\n" + "\x0eNegotiateReply\x12\x14\n" +
"\x05hosts\x18\x01 \x03(\tR\x05hosts\")\n" + "\x05hosts\x18\x01 \x03(\tR\x05hosts\")\n" +
"\fCartIdsReply\x12\x19\n" + "\fCartIdsReply\x12\x19\n" +
"\bcart_ids\x18\x01 \x03(\tR\acartIds\"H\n" + "\bcart_ids\x18\x01 \x03(\tR\acartIds\"F\n" +
"\x12OwnerChangeRequest\x12\x17\n" +
"\acart_id\x18\x01 \x01(\tR\x06cartId\x12\x19\n" +
"\bnew_host\x18\x02 \x01(\tR\anewHost\"F\n" +
"\x0eOwnerChangeAck\x12\x1a\n" + "\x0eOwnerChangeAck\x12\x1a\n" +
"\baccepted\x18\x01 \x01(\bR\baccepted\x12\x18\n" + "\baccepted\x18\x01 \x01(\bR\baccepted\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\"#\n" + "\amessage\x18\x02 \x01(\tR\amessage\"#\n" +
"\rClosingNotice\x12\x12\n" + "\rClosingNotice\x12\x12\n" +
"\x04host\x18\x01 \x01(\tR\x04host2\xbc\x02\n" + "\x04host\x18\x01 \x01(\tR\x04host2\xf4\x01\n" +
"\fControlPlane\x12,\n" + "\fControlPlane\x12,\n" +
"\x04Ping\x12\x0f.messages.Empty\x1a\x13.messages.PingReply\x12A\n" + "\x04Ping\x12\x0f.messages.Empty\x1a\x13.messages.PingReply\x12A\n" +
"\tNegotiate\x12\x1a.messages.NegotiateRequest\x1a\x18.messages.NegotiateReply\x125\n" + "\tNegotiate\x12\x1a.messages.NegotiateRequest\x1a\x18.messages.NegotiateReply\x125\n" +
"\n" + "\n" +
"GetCartIds\x12\x0f.messages.Empty\x1a\x16.messages.CartIdsReply\x12F\n" + "GetCartIds\x12\x0f.messages.Empty\x1a\x16.messages.CartIdsReply\x12<\n" +
"\fConfirmOwner\x12\x1c.messages.OwnerChangeRequest\x1a\x18.messages.OwnerChangeAck\x12<\n" +
"\aClosing\x12\x17.messages.ClosingNotice\x1a\x18.messages.OwnerChangeAckB.Z,git.tornberg.me/go-cart-actor/proto;messagesb\x06proto3" "\aClosing\x12\x17.messages.ClosingNotice\x1a\x18.messages.OwnerChangeAckB.Z,git.tornberg.me/go-cart-actor/proto;messagesb\x06proto3"
var ( var (
@@ -441,30 +384,27 @@ func file_control_plane_proto_rawDescGZIP() []byte {
return file_control_plane_proto_rawDescData return file_control_plane_proto_rawDescData
} }
var file_control_plane_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_control_plane_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_control_plane_proto_goTypes = []any{ var file_control_plane_proto_goTypes = []any{
(*Empty)(nil), // 0: messages.Empty (*Empty)(nil), // 0: messages.Empty
(*PingReply)(nil), // 1: messages.PingReply (*PingReply)(nil), // 1: messages.PingReply
(*NegotiateRequest)(nil), // 2: messages.NegotiateRequest (*NegotiateRequest)(nil), // 2: messages.NegotiateRequest
(*NegotiateReply)(nil), // 3: messages.NegotiateReply (*NegotiateReply)(nil), // 3: messages.NegotiateReply
(*CartIdsReply)(nil), // 4: messages.CartIdsReply (*CartIdsReply)(nil), // 4: messages.CartIdsReply
(*OwnerChangeRequest)(nil), // 5: messages.OwnerChangeRequest (*OwnerChangeAck)(nil), // 5: messages.OwnerChangeAck
(*OwnerChangeAck)(nil), // 6: messages.OwnerChangeAck (*ClosingNotice)(nil), // 6: messages.ClosingNotice
(*ClosingNotice)(nil), // 7: messages.ClosingNotice
} }
var file_control_plane_proto_depIdxs = []int32{ var file_control_plane_proto_depIdxs = []int32{
0, // 0: messages.ControlPlane.Ping:input_type -> messages.Empty 0, // 0: messages.ControlPlane.Ping:input_type -> messages.Empty
2, // 1: messages.ControlPlane.Negotiate:input_type -> messages.NegotiateRequest 2, // 1: messages.ControlPlane.Negotiate:input_type -> messages.NegotiateRequest
0, // 2: messages.ControlPlane.GetCartIds:input_type -> messages.Empty 0, // 2: messages.ControlPlane.GetCartIds:input_type -> messages.Empty
5, // 3: messages.ControlPlane.ConfirmOwner:input_type -> messages.OwnerChangeRequest 6, // 3: messages.ControlPlane.Closing:input_type -> messages.ClosingNotice
7, // 4: messages.ControlPlane.Closing:input_type -> messages.ClosingNotice 1, // 4: messages.ControlPlane.Ping:output_type -> messages.PingReply
1, // 5: messages.ControlPlane.Ping:output_type -> messages.PingReply 3, // 5: messages.ControlPlane.Negotiate:output_type -> messages.NegotiateReply
3, // 6: messages.ControlPlane.Negotiate:output_type -> messages.NegotiateReply 4, // 6: messages.ControlPlane.GetCartIds:output_type -> messages.CartIdsReply
4, // 7: messages.ControlPlane.GetCartIds:output_type -> messages.CartIdsReply 5, // 7: messages.ControlPlane.Closing:output_type -> messages.OwnerChangeAck
6, // 8: messages.ControlPlane.ConfirmOwner:output_type -> messages.OwnerChangeAck 4, // [4:8] is the sub-list for method output_type
6, // 9: messages.ControlPlane.Closing:output_type -> messages.OwnerChangeAck 0, // [0:4] is the sub-list for method input_type
5, // [5:10] is the sub-list for method output_type
0, // [0:5] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name 0, // [0:0] is the sub-list for field type_name
@@ -481,7 +421,7 @@ func file_control_plane_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_control_plane_proto_rawDesc), len(file_control_plane_proto_rawDesc)), RawDescriptor: unsafe.Slice(unsafe.StringData(file_control_plane_proto_rawDesc), len(file_control_plane_proto_rawDesc)),
NumEnums: 0, NumEnums: 0,
NumMessages: 8, NumMessages: 7,
NumExtensions: 0, NumExtensions: 0,
NumServices: 1, NumServices: 1,
}, },

View File

@@ -11,7 +11,7 @@ option go_package = "git.tornberg.me/go-cart-actor/proto;messages";
// Responsibilities: // Responsibilities:
// - Liveness (Ping) // - Liveness (Ping)
// - Membership negotiation (Negotiate) // - Membership negotiation (Negotiate)
// - Cart ownership change propagation (ConfirmOwner) // - Deterministic ring-based ownership (ConfirmOwner RPC removed)
// - Cart ID listing for remote grain spawning (GetCartIds) // - Cart ID listing for remote grain spawning (GetCartIds)
// - Graceful shutdown notifications (Closing) // - Graceful shutdown notifications (Closing)
// No authentication / TLS is defined initially (can be added later). // No authentication / TLS is defined initially (can be added later).
@@ -41,13 +41,7 @@ message CartIdsReply {
repeated string cart_ids = 1; repeated string cart_ids = 1;
} }
// OwnerChangeRequest notifies peers that ownership of a cart moved (or is moving) to new_host. // OwnerChangeAck retained as response type for Closing RPC (ConfirmOwner removed).
message OwnerChangeRequest {
string cart_id = 1;
string new_host = 2;
}
// OwnerChangeAck indicates acceptance or rejection of an ownership change.
message OwnerChangeAck { message OwnerChangeAck {
bool accepted = 1; bool accepted = 1;
string message = 2; string message = 2;
@@ -69,8 +63,7 @@ service ControlPlane {
// GetCartIds lists currently owned cart IDs on this node. // GetCartIds lists currently owned cart IDs on this node.
rpc GetCartIds(Empty) returns (CartIdsReply); rpc GetCartIds(Empty) returns (CartIdsReply);
// ConfirmOwner announces/asks peers to acknowledge ownership transfer. // ConfirmOwner RPC removed (was legacy ownership acknowledgement; ring-based ownership now authoritative)
rpc ConfirmOwner(OwnerChangeRequest) returns (OwnerChangeAck);
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
rpc Closing(ClosingNotice) returns (OwnerChangeAck); rpc Closing(ClosingNotice) returns (OwnerChangeAck);

View File

@@ -22,7 +22,6 @@ const (
ControlPlane_Ping_FullMethodName = "/messages.ControlPlane/Ping" ControlPlane_Ping_FullMethodName = "/messages.ControlPlane/Ping"
ControlPlane_Negotiate_FullMethodName = "/messages.ControlPlane/Negotiate" ControlPlane_Negotiate_FullMethodName = "/messages.ControlPlane/Negotiate"
ControlPlane_GetCartIds_FullMethodName = "/messages.ControlPlane/GetCartIds" ControlPlane_GetCartIds_FullMethodName = "/messages.ControlPlane/GetCartIds"
ControlPlane_ConfirmOwner_FullMethodName = "/messages.ControlPlane/ConfirmOwner"
ControlPlane_Closing_FullMethodName = "/messages.ControlPlane/Closing" ControlPlane_Closing_FullMethodName = "/messages.ControlPlane/Closing"
) )
@@ -38,8 +37,6 @@ type ControlPlaneClient interface {
Negotiate(ctx context.Context, in *NegotiateRequest, opts ...grpc.CallOption) (*NegotiateReply, error) Negotiate(ctx context.Context, in *NegotiateRequest, opts ...grpc.CallOption) (*NegotiateReply, error)
// GetCartIds lists currently owned cart IDs on this node. // GetCartIds lists currently owned cart IDs on this node.
GetCartIds(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CartIdsReply, error) GetCartIds(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CartIdsReply, error)
// ConfirmOwner announces/asks peers to acknowledge ownership transfer.
ConfirmOwner(ctx context.Context, in *OwnerChangeRequest, opts ...grpc.CallOption) (*OwnerChangeAck, error)
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error)
} }
@@ -82,16 +79,6 @@ func (c *controlPlaneClient) GetCartIds(ctx context.Context, in *Empty, opts ...
return out, nil return out, nil
} }
func (c *controlPlaneClient) ConfirmOwner(ctx context.Context, in *OwnerChangeRequest, opts ...grpc.CallOption) (*OwnerChangeAck, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(OwnerChangeAck)
err := c.cc.Invoke(ctx, ControlPlane_ConfirmOwner_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controlPlaneClient) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) { func (c *controlPlaneClient) Closing(ctx context.Context, in *ClosingNotice, opts ...grpc.CallOption) (*OwnerChangeAck, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(OwnerChangeAck) out := new(OwnerChangeAck)
@@ -114,8 +101,6 @@ type ControlPlaneServer interface {
Negotiate(context.Context, *NegotiateRequest) (*NegotiateReply, error) Negotiate(context.Context, *NegotiateRequest) (*NegotiateReply, error)
// GetCartIds lists currently owned cart IDs on this node. // GetCartIds lists currently owned cart IDs on this node.
GetCartIds(context.Context, *Empty) (*CartIdsReply, error) GetCartIds(context.Context, *Empty) (*CartIdsReply, error)
// ConfirmOwner announces/asks peers to acknowledge ownership transfer.
ConfirmOwner(context.Context, *OwnerChangeRequest) (*OwnerChangeAck, error)
// Closing announces graceful shutdown so peers can proactively adjust. // Closing announces graceful shutdown so peers can proactively adjust.
Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error)
mustEmbedUnimplementedControlPlaneServer() mustEmbedUnimplementedControlPlaneServer()
@@ -137,9 +122,6 @@ func (UnimplementedControlPlaneServer) Negotiate(context.Context, *NegotiateRequ
func (UnimplementedControlPlaneServer) GetCartIds(context.Context, *Empty) (*CartIdsReply, error) { func (UnimplementedControlPlaneServer) GetCartIds(context.Context, *Empty) (*CartIdsReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetCartIds not implemented") return nil, status.Errorf(codes.Unimplemented, "method GetCartIds not implemented")
} }
func (UnimplementedControlPlaneServer) ConfirmOwner(context.Context, *OwnerChangeRequest) (*OwnerChangeAck, error) {
return nil, status.Errorf(codes.Unimplemented, "method ConfirmOwner not implemented")
}
func (UnimplementedControlPlaneServer) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) { func (UnimplementedControlPlaneServer) Closing(context.Context, *ClosingNotice) (*OwnerChangeAck, error) {
return nil, status.Errorf(codes.Unimplemented, "method Closing not implemented") return nil, status.Errorf(codes.Unimplemented, "method Closing not implemented")
} }
@@ -218,24 +200,6 @@ func _ControlPlane_GetCartIds_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _ControlPlane_ConfirmOwner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OwnerChangeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControlPlaneServer).ConfirmOwner(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ControlPlane_ConfirmOwner_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlPlaneServer).ConfirmOwner(ctx, req.(*OwnerChangeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ControlPlane_Closing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _ControlPlane_Closing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ClosingNotice) in := new(ClosingNotice)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@@ -273,10 +237,6 @@ var ControlPlane_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetCartIds", MethodName: "GetCartIds",
Handler: _ControlPlane_GetCartIds_Handler, Handler: _ControlPlane_GetCartIds_Handler,
}, },
{
MethodName: "ConfirmOwner",
Handler: _ControlPlane_ConfirmOwner_Handler,
},
{ {
MethodName: "Closing", MethodName: "Closing",
Handler: _ControlPlane_Closing_Handler, Handler: _ControlPlane_Closing_Handler,

View File

@@ -184,7 +184,7 @@ func (g *RemoteGrainGRPC) Apply(content interface{}, isReplay bool) (*CartGrain,
} }
// Reconstruct a lightweight CartGrain (only fields we expose internally) // Reconstruct a lightweight CartGrain (only fields we expose internally)
grain := &CartGrain{ grain := &CartGrain{
Id: ToCartId(state.CartId), Id: ToCartId(state.Id),
TotalPrice: state.TotalPrice, TotalPrice: state.TotalPrice,
TotalTax: state.TotalTax, TotalTax: state.TotalTax,
TotalDiscount: state.TotalDiscount, TotalDiscount: state.TotalDiscount,
@@ -201,11 +201,11 @@ func (g *RemoteGrainGRPC) Apply(content interface{}, isReplay bool) (*CartGrain,
storeId := toPtr(it.StoreId) storeId := toPtr(it.StoreId)
grain.Items = append(grain.Items, &CartItem{ grain.Items = append(grain.Items, &CartItem{
Id: int(it.Id), Id: int(it.Id),
ItemId: int(it.SourceItemId), ItemId: int(it.ItemId),
Sku: it.Sku, Sku: it.Sku,
Name: it.Name, Name: it.Name,
Price: it.UnitPrice, Price: it.Price,
Quantity: int(it.Quantity), Quantity: int(it.Qty),
TotalPrice: it.TotalPrice, TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax, TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice, OrgPrice: it.OrgPrice,
@@ -217,7 +217,7 @@ func (g *RemoteGrainGRPC) Apply(content interface{}, isReplay bool) (*CartGrain,
Category4: it.Category4, Category4: it.Category4,
Category5: it.Category5, Category5: it.Category5,
Image: it.Image, Image: it.Image,
ArticleType: it.ArticleType, ArticleType: it.Type,
SellerId: it.SellerId, SellerId: it.SellerId,
SellerName: it.SellerName, SellerName: it.SellerName,
Disclaimer: it.Disclaimer, Disclaimer: it.Disclaimer,
@@ -231,8 +231,8 @@ func (g *RemoteGrainGRPC) Apply(content interface{}, isReplay bool) (*CartGrain,
if d == nil { if d == nil {
continue continue
} }
intIds := make([]int, 0, len(d.ItemIds)) intIds := make([]int, 0, len(d.Items))
for _, id := range d.ItemIds { for _, id := range d.Items {
intIds = append(intIds, int(id)) intIds = append(intIds, int(id))
} }
grain.Deliveries = append(grain.Deliveries, &CartDelivery{ grain.Deliveries = append(grain.Deliveries, &CartDelivery{
@@ -266,7 +266,7 @@ func (g *RemoteGrainGRPC) GetCurrentState() (*CartGrain, error) {
return nil, fmt.Errorf("state reply missing state on success") return nil, fmt.Errorf("state reply missing state on success")
} }
grain := &CartGrain{ grain := &CartGrain{
Id: ToCartId(state.CartId), Id: ToCartId(state.Id),
TotalPrice: state.TotalPrice, TotalPrice: state.TotalPrice,
TotalTax: state.TotalTax, TotalTax: state.TotalTax,
TotalDiscount: state.TotalDiscount, TotalDiscount: state.TotalDiscount,
@@ -282,11 +282,11 @@ func (g *RemoteGrainGRPC) GetCurrentState() (*CartGrain, error) {
storeId := toPtr(it.StoreId) storeId := toPtr(it.StoreId)
grain.Items = append(grain.Items, &CartItem{ grain.Items = append(grain.Items, &CartItem{
Id: int(it.Id), Id: int(it.Id),
ItemId: int(it.SourceItemId), ItemId: int(it.ItemId),
Sku: it.Sku, Sku: it.Sku,
Name: it.Name, Name: it.Name,
Price: it.UnitPrice, Price: it.Price,
Quantity: int(it.Quantity), Quantity: int(it.Qty),
TotalPrice: it.TotalPrice, TotalPrice: it.TotalPrice,
TotalTax: it.TotalTax, TotalTax: it.TotalTax,
OrgPrice: it.OrgPrice, OrgPrice: it.OrgPrice,
@@ -298,7 +298,7 @@ func (g *RemoteGrainGRPC) GetCurrentState() (*CartGrain, error) {
Category4: it.Category4, Category4: it.Category4,
Category5: it.Category5, Category5: it.Category5,
Image: it.Image, Image: it.Image,
ArticleType: it.ArticleType, ArticleType: it.Type,
SellerId: it.SellerId, SellerId: it.SellerId,
SellerName: it.SellerName, SellerName: it.SellerName,
Disclaimer: it.Disclaimer, Disclaimer: it.Disclaimer,
@@ -311,8 +311,8 @@ func (g *RemoteGrainGRPC) GetCurrentState() (*CartGrain, error) {
if d == nil { if d == nil {
continue continue
} }
intIds := make([]int, 0, len(d.ItemIds)) intIds := make([]int, 0, len(d.Items))
for _, id := range d.ItemIds { for _, id := range d.Items {
intIds = append(intIds, int(id)) intIds = append(intIds, int(id))
} }
grain.Deliveries = append(grain.Deliveries, &CartDelivery{ grain.Deliveries = append(grain.Deliveries, &CartDelivery{

344
ring.go Normal file
View File

@@ -0,0 +1,344 @@
package main
import (
"encoding/binary"
"fmt"
"hash/fnv"
"sort"
"strings"
"sync"
)
// ring.go
//
// Consistent hashing ring skeleton for future integration.
// --------------------------------------------------------
// This file introduces a minimal, allocationlight consistent hashing structure
// intended to replace per-cart ownership negotiation. It focuses on:
// * Deterministic lookup: O(log V) via binary search
// * Even(ish) distribution using virtual nodes (vnodes)
// * Epoch / fingerprint tracking to detect membership drift
//
// NOT YET WIRED:
// * SyncedPool integration (ownerForCart, lazy migration)
// * Replication factor > 1
// * Persistent state migration
//
// Safe to import now; unused until explicit integration code is added.
//
// Design Notes
// ------------
// - Hosts contribute `vnodesPerHost` virtual nodes. Higher counts smooth
// distribution at cost of memory (V = hosts * vnodesPerHost).
// - Hash of vnode = FNV1a64(host + "#" + index). For improved quality you
// can swap in xxhash or siphash later without changing API (but doing so
// will reshuffle ownership).
// - Cart ownership lookup uses either cartID.Raw() when provided (uniform
// 64-bit space) or falls back to hashing string forms (legacy).
// - Epoch is monotonically increasing; consumers can fence stale results.
//
// Future Extensions
// -----------------
// - Weighted hosts (proportionally more vnodes).
// - Replication: LookupN(h, n) to return primary + replicas.
// - Streaming / diff-based ring updates (gossip).
// - Hash function injection for deterministic test scenarios.
//
// ---------------------------------------------------------------------------
// Vnode represents a single virtual node position on the ring.
type Vnode struct {
Hash uint64 // position on the ring
Host string // physical host owning this vnode
Index int // per-host vnode index (0..vnodesPerHost-1)
}
// Ring is an immutable consistent hash ring snapshot.
type Ring struct {
Epoch uint64
Vnodes []Vnode // sorted by Hash
hosts []string
fingerprint uint64 // membership fingerprint (order-independent)
}
// RingBuilder accumulates parameters to construct a Ring.
type RingBuilder struct {
epoch uint64
vnodesPerHost int
hosts []string
}
// NewRingBuilder creates a builder with defaults.
func NewRingBuilder() *RingBuilder {
return &RingBuilder{
vnodesPerHost: 64, // a reasonable default for small clusters
}
}
func (b *RingBuilder) WithEpoch(e uint64) *RingBuilder {
b.epoch = e
return b
}
func (b *RingBuilder) WithVnodesPerHost(n int) *RingBuilder {
if n > 0 {
b.vnodesPerHost = n
}
return b
}
func (b *RingBuilder) WithHosts(hosts []string) *RingBuilder {
uniq := make(map[string]struct{}, len(hosts))
out := make([]string, 0, len(hosts))
for _, h := range hosts {
h = strings.TrimSpace(h)
if h == "" {
continue
}
if _, ok := uniq[h]; ok {
continue
}
uniq[h] = struct{}{}
out = append(out, h)
}
sort.Strings(out)
b.hosts = out
return b
}
func (b *RingBuilder) Build() *Ring {
if len(b.hosts) == 0 {
return &Ring{
Epoch: b.epoch,
Vnodes: nil,
hosts: nil,
fingerprint: 0,
}
}
totalVnodes := len(b.hosts) * b.vnodesPerHost
vnodes := make([]Vnode, 0, totalVnodes)
for _, host := range b.hosts {
for i := 0; i < b.vnodesPerHost; i++ {
h := hashVnode(host, i)
vnodes = append(vnodes, Vnode{
Hash: h,
Host: host,
Index: i,
})
}
}
sort.Slice(vnodes, func(i, j int) bool {
if vnodes[i].Hash == vnodes[j].Hash {
// Tie-break deterministically by host then index to avoid instability
if vnodes[i].Host == vnodes[j].Host {
return vnodes[i].Index < vnodes[j].Index
}
return vnodes[i].Host < vnodes[j].Host
}
return vnodes[i].Hash < vnodes[j].Hash
})
fp := fingerprintHosts(b.hosts)
return &Ring{
Epoch: b.epoch,
Vnodes: vnodes,
hosts: append([]string(nil), b.hosts...),
fingerprint: fp,
}
}
// Hosts returns a copy of the host list (sorted).
func (r *Ring) Hosts() []string {
if len(r.hosts) == 0 {
return nil
}
cp := make([]string, len(r.hosts))
copy(cp, r.hosts)
return cp
}
// Fingerprint returns a hash representing the unordered membership set.
func (r *Ring) Fingerprint() uint64 {
return r.fingerprint
}
// Empty indicates ring has no vnodes.
func (r *Ring) Empty() bool {
return len(r.Vnodes) == 0
}
// Lookup returns the vnode owning a given hash value.
func (r *Ring) Lookup(h uint64) Vnode {
if len(r.Vnodes) == 0 {
return Vnode{}
}
// Binary search: first position with Hash >= h
i := sort.Search(len(r.Vnodes), func(i int) bool {
return r.Vnodes[i].Hash >= h
})
if i == len(r.Vnodes) {
return r.Vnodes[0]
}
return r.Vnodes[i]
}
// LookupID selects owner vnode for a CartID (fast path).
func (r *Ring) LookupID(id CartID) Vnode {
return r.Lookup(id.Raw())
}
// LookupString hashes an arbitrary string and looks up owner.
func (r *Ring) LookupString(s string) Vnode {
return r.Lookup(hashKeyString(s))
}
// LookupN returns up to n distinct host vnodes in ring order
// starting from the primary owner of hash h (for replication).
func (r *Ring) LookupN(h uint64, n int) []Vnode {
if n <= 0 || len(r.Vnodes) == 0 {
return nil
}
if n > len(r.hosts) {
n = len(r.hosts)
}
owners := make([]Vnode, 0, n)
seen := make(map[string]struct{}, n)
start := r.Lookup(h)
// Find index of start (can binary search again or linear scan; since we
// already have start.Hash we do another search for clarity)
i := sort.Search(len(r.Vnodes), func(i int) bool {
return r.Vnodes[i].Hash >= start.Hash
})
if i == len(r.Vnodes) {
i = 0
}
for idx := 0; len(owners) < n && idx < len(r.Vnodes); idx++ {
v := r.Vnodes[(i+idx)%len(r.Vnodes)]
if _, ok := seen[v.Host]; ok {
continue
}
seen[v.Host] = struct{}{}
owners = append(owners, v)
}
return owners
}
// DiffHosts compares this ring's membership to another.
func (r *Ring) DiffHosts(other *Ring) (added []string, removed []string) {
if other == nil {
return r.Hosts(), nil
}
cur := make(map[string]struct{}, len(r.hosts))
for _, h := range r.hosts {
cur[h] = struct{}{}
}
oth := make(map[string]struct{}, len(other.hosts))
for _, h := range other.hosts {
oth[h] = struct{}{}
}
for h := range cur {
if _, ok := oth[h]; !ok {
removed = append(removed, h)
}
}
for h := range oth {
if _, ok := cur[h]; !ok {
added = append(added, h)
}
}
sort.Strings(added)
sort.Strings(removed)
return
}
// ---------------------------- Hash Functions ---------------------------------
func hashVnode(host string, idx int) uint64 {
h := fnv.New64a()
_, _ = h.Write([]byte(host))
_, _ = h.Write([]byte{'#'})
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], uint64(idx))
_, _ = h.Write(buf[:])
return h.Sum64()
}
// hashKeyString provides a stable hash for arbitrary string keys (legacy IDs).
func hashKeyString(s string) uint64 {
h := fnv.New64a()
_, _ = h.Write([]byte(s))
return h.Sum64()
}
// fingerprintHosts produces an order-insensitive hash over the host set.
func fingerprintHosts(hosts []string) uint64 {
if len(hosts) == 0 {
return 0
}
h := fnv.New64a()
for _, host := range hosts {
_, _ = h.Write([]byte(host))
_, _ = h.Write([]byte{0})
}
return h.Sum64()
}
// --------------------------- Thread-Safe Wrapper -----------------------------
//
// RingRef offers atomic swap + read semantics. SyncedPool can embed or hold
// one of these to manage live ring updates safely.
type RingRef struct {
mu sync.RWMutex
ring *Ring
}
func NewRingRef(r *Ring) *RingRef {
return &RingRef{ring: r}
}
func (rr *RingRef) Get() *Ring {
rr.mu.RLock()
r := rr.ring
rr.mu.RUnlock()
return r
}
func (rr *RingRef) Set(r *Ring) {
rr.mu.Lock()
rr.ring = r
rr.mu.Unlock()
}
func (rr *RingRef) LookupID(id CartID) Vnode {
r := rr.Get()
if r == nil {
return Vnode{}
}
return r.LookupID(id)
}
// ----------------------------- Debug Utilities -------------------------------
func (r *Ring) String() string {
var b strings.Builder
fmt.Fprintf(&b, "Ring{epoch=%d vnodes=%d hosts=%d}\n", r.Epoch, len(r.Vnodes), len(r.hosts))
limit := len(r.Vnodes)
if limit > 16 {
limit = 16
}
for i := 0; i < limit; i++ {
v := r.Vnodes[i]
fmt.Fprintf(&b, " %02d hash=%016x host=%s idx=%d\n", i, v.Hash, v.Host, v.Index)
}
if len(r.Vnodes) > limit {
fmt.Fprintf(&b, " ... (%d more)\n", len(r.Vnodes)-limit)
}
return b.String()
}

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"log" "log"
"reflect"
"sync" "sync"
"time" "time"
@@ -15,14 +16,15 @@ import (
) )
// SyncedPool coordinates cart grain ownership across nodes using gRPC control plane // SyncedPool coordinates cart grain ownership across nodes using gRPC control plane
// and cart actor services. Legacy frame / TCP code has been removed. // and cart actor services.
// //
// Responsibilities: // Responsibilities:
// - Local grain access (delegates to GrainLocalPool) // - Local grain access (delegates to GrainLocalPool)
// - Remote grain proxy management (RemoteGrainGRPC) // - Remote grain proxy management (RemoteGrainGRPC)
// - Cluster membership (AddRemote via discovery + negotiation) // - Cluster membership (AddRemote via discovery + negotiation)
// - Ownership acquisition (quorum via ConfirmOwner RPC)
// - Health/ping monitoring & remote removal // - Health/ping monitoring & remote removal
// - Ring based deterministic ownership (no runtime negotiation)
// - (Scaffolding) replication factor awareness via ring.LookupN
// //
// Thread-safety: public methods that mutate internal maps lock p.mu (RWMutex). // Thread-safety: public methods that mutate internal maps lock p.mu (RWMutex).
type SyncedPool struct { type SyncedPool struct {
@@ -40,7 +42,12 @@ type SyncedPool struct {
// Discovery handler for re-adding hosts after failures // Discovery handler for re-adding hosts after failures
discardedHostHandler *DiscardedHostHandler discardedHostHandler *DiscardedHostHandler
// Metrics / instrumentation dependencies already declared globally // Consistent hashing ring (immutable snapshot reference)
ringRef *RingRef
// Configuration
vnodesPerHost int
replicationFactor int // RF (>=1). Currently only primary is active; replicas are scaffolding.
} }
// RemoteHostGRPC tracks a remote host's clients & health. // RemoteHostGRPC tracks a remote host's clients & health.
@@ -71,7 +78,54 @@ var (
}) })
remoteLookupCount = promauto.NewCounter(prometheus.CounterOpts{ remoteLookupCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_remote_lookup_total", Name: "cart_remote_lookup_total",
Help: "The total number of remote lookups", Help: "The total number of remote lookups (legacy counter)",
})
// Ring / ownership metrics
ringEpoch = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_epoch",
Help: "Current consistent hashing ring epoch (fingerprint-based pseudo-epoch)",
})
ringHosts = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_hosts",
Help: "Number of hosts currently in the ring",
})
ringVnodes = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_ring_vnodes",
Help: "Number of virtual nodes in the ring",
})
ringLookupLocal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_ring_lookup_local_total",
Help: "Ring ownership lookups resolved to the local host",
})
ringLookupRemote = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_ring_lookup_remote_total",
Help: "Ring ownership lookups resolved to a remote host",
})
ringHostShare = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "cart_ring_host_share",
Help: "Fractional share of ring vnodes per host",
}, []string{"host"})
cartMutationsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_mutations_total",
Help: "Total number of cart state mutations applied (local + remote routed).",
})
cartMutationFailuresTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "cart_mutation_failures_total",
Help: "Total number of failed cart state mutations (local apply errors or remote routing failures).",
})
cartMutationLatencySeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "cart_mutation_latency_seconds",
Help: "Latency of cart mutations (successful or failed) in seconds.",
Buckets: prometheus.DefBuckets,
}, []string{"mutation"})
cartActiveGrains = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cart_active_grains",
Help: "Number of active (resident) local grains.",
}) })
) )
@@ -82,8 +136,12 @@ func NewSyncedPool(local *GrainLocalPool, hostname string, discovery Discovery)
remoteHosts: make(map[string]*RemoteHostGRPC), remoteHosts: make(map[string]*RemoteHostGRPC),
remoteIndex: make(map[CartId]Grain), remoteIndex: make(map[CartId]Grain),
discardedHostHandler: NewDiscardedHostHandler(1338), discardedHostHandler: NewDiscardedHostHandler(1338),
vnodesPerHost: 64, // default smoothing factor; adjust if needed
replicationFactor: 1, // RF scaffold; >1 not yet activating replicas
} }
p.discardedHostHandler.SetReconnectHandler(p.AddRemote) p.discardedHostHandler.SetReconnectHandler(p.AddRemote)
// Initialize empty ring (will be rebuilt after first AddRemote or discovery event)
p.rebuildRing()
if discovery != nil { if discovery != nil {
go func() { go func() {
@@ -175,6 +233,8 @@ func (p *SyncedPool) AddRemote(host string) {
p.remoteHosts[host] = remote p.remoteHosts[host] = remote
p.mu.Unlock() p.mu.Unlock()
connectedRemotes.Set(float64(p.RemoteCount())) connectedRemotes.Set(float64(p.RemoteCount()))
// Rebuild consistent hashing ring including this new host
p.rebuildRing()
log.Printf("Connected to remote host %s", host) log.Printf("Connected to remote host %s", host)
@@ -222,6 +282,8 @@ func (p *SyncedPool) RemoveHost(host string) {
remote.Conn.Close() remote.Conn.Close()
} }
connectedRemotes.Set(float64(p.RemoteCount())) connectedRemotes.Set(float64(p.RemoteCount()))
// Rebuild ring after host removal
p.rebuildRing()
} }
// RemoteCount returns number of tracked remote hosts. // RemoteCount returns number of tracked remote hosts.
@@ -302,6 +364,8 @@ func (p *SyncedPool) Negotiate() {
} }
p.mu.RUnlock() p.mu.RUnlock()
changed := false
for _, r := range remotes { for _, r := range remotes {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
reply, err := r.ControlClient.Negotiate(ctx, &proto.NegotiateRequest{KnownHosts: hosts}) reply, err := r.ControlClient.Negotiate(ctx, &proto.NegotiateRequest{KnownHosts: hosts})
@@ -313,12 +377,18 @@ func (p *SyncedPool) Negotiate() {
for _, h := range reply.Hosts { for _, h := range reply.Hosts {
if !p.IsKnown(h) { if !p.IsKnown(h) {
p.AddRemote(h) p.AddRemote(h)
} changed = true
} }
} }
} }
// ------------------------- Grain Management ---------------------------------- // If new hosts were discovered during negotiation, rebuild the ring once at the end.
if changed {
p.rebuildRing()
}
}
// ------------------------- Grain / Ring Ownership ----------------------------
// RemoveRemoteGrain removes a remote grain mapping. // RemoveRemoteGrain removes a remote grain mapping.
func (p *SyncedPool) RemoveRemoteGrain(id CartId) { func (p *SyncedPool) RemoveRemoteGrain(id CartId) {
@@ -333,9 +403,9 @@ func (p *SyncedPool) SpawnRemoteGrain(id CartId, host string) {
return return
} }
p.mu.Lock() p.mu.Lock()
// If local grain exists, remove it (ownership changed) // If local grain exists (legacy key), remove from local map (ownership moved).
if g, ok := p.local.grains[id]; ok && g != nil { if g, ok := p.local.grains[LegacyToCartKey(id)]; ok && g != nil {
delete(p.local.grains, id) delete(p.local.grains, LegacyToCartKey(id))
} }
remoteHost, ok := p.remoteHosts[host] remoteHost, ok := p.remoteHosts[host]
if !ok { if !ok {
@@ -362,80 +432,191 @@ func (p *SyncedPool) GetHealthyRemotes() []*RemoteHostGRPC {
return ret return ret
} }
// RequestOwnership attempts to become owner of a cart, requiring quorum. // rebuildRing reconstructs the consistent hashing ring from current host set
// On success local grain is (or will be) created; peers spawn remote proxies. // and updates ring-related metrics.
func (p *SyncedPool) RequestOwnership(id CartId) error { func (p *SyncedPool) rebuildRing() {
ok := 0 p.mu.RLock()
all := 0 hosts := make([]string, 0, len(p.remoteHosts)+1)
remotes := p.GetHealthyRemotes() hosts = append(hosts, p.Hostname)
for _, r := range remotes { for h := range p.remoteHosts {
ctx, cancel := context.WithTimeout(context.Background(), 800*time.Millisecond) hosts = append(hosts, h)
reply, err := r.ControlClient.ConfirmOwner(ctx, &proto.OwnerChangeRequest{
CartId: id.String(),
NewHost: p.Hostname,
})
cancel()
all++
if err != nil || reply == nil || !reply.Accepted {
log.Printf("ConfirmOwner failure from %s for %s: %v (reply=%v)", r.Host, id, err, reply)
continue
} }
ok++ p.mu.RUnlock()
epochSeed := fingerprintHosts(hosts)
builder := NewRingBuilder().
WithHosts(hosts).
WithEpoch(epochSeed).
WithVnodesPerHost(p.vnodesPerHost)
r := builder.Build()
if p.ringRef == nil {
p.ringRef = NewRingRef(r)
} else {
p.ringRef.Set(r)
} }
// Quorum rule mirrors legacy: // Metrics
// - If fewer than 3 total, require all. ringEpoch.Set(float64(r.Epoch))
// - Else require majority (ok >= all/2). ringHosts.Set(float64(len(r.Hosts())))
if (all < 3 && ok < all) || ok < (all/2) { ringVnodes.Set(float64(len(r.Vnodes)))
p.removeLocalGrain(id) ringHostShare.Reset()
return fmt.Errorf("quorum not reached (ok=%d all=%d)", ok, all) if len(r.Vnodes) > 0 {
perHost := make(map[string]int)
for _, v := range r.Vnodes {
perHost[v.Host]++
} }
grainSyncCount.Inc() total := float64(len(r.Vnodes))
return nil for h, c := range perHost {
ringHostShare.WithLabelValues(h).Set(float64(c) / total)
}
}
}
// ForceRingRefresh exposes a manual ring rebuild hook (primarily for tests).
func (p *SyncedPool) ForceRingRefresh() {
p.rebuildRing()
}
// ownersFor returns the ordered list of primary + replica owners for a cart id
// (length min(replicationFactor, #hosts)). Currently only the first (primary)
// is used. This scaffolds future replication work.
func (p *SyncedPool) ownersFor(id CartId) []string {
if p.ringRef == nil || p.replicationFactor <= 0 {
return []string{p.Hostname}
}
r := p.ringRef.Get()
if r == nil || r.Empty() {
return []string{p.Hostname}
}
vnodes := r.LookupN(hashKeyString(id.String()), p.replicationFactor)
out := make([]string, 0, len(vnodes))
seen := make(map[string]struct{}, len(vnodes))
for _, v := range vnodes {
if _, ok := seen[v.Host]; ok {
continue
}
seen[v.Host] = struct{}{}
out = append(out, v.Host)
}
if len(out) == 0 {
out = append(out, p.Hostname)
}
return out
}
// ownerHostFor returns the primary owner host for a given id.
func (p *SyncedPool) ownerHostFor(id CartId) string {
return p.ownersFor(id)[0]
}
// DebugOwnerHost exposes (for tests) the currently computed primary owner host.
func (p *SyncedPool) DebugOwnerHost(id CartId) string {
return p.ownerHostFor(id)
} }
func (p *SyncedPool) removeLocalGrain(id CartId) { func (p *SyncedPool) removeLocalGrain(id CartId) {
p.mu.Lock() p.mu.Lock()
delete(p.local.grains, id) delete(p.local.grains, LegacyToCartKey(id))
p.mu.Unlock() p.mu.Unlock()
} }
// getGrain returns a local or remote grain; if absent, attempts ownership. // getGrain returns a local or remote grain. For remote ownership it performs a
// bounded readiness wait (small retries) to reduce first-call failures while
// the remote connection & proxy are initializing.
func (p *SyncedPool) getGrain(id CartId) (Grain, error) { func (p *SyncedPool) getGrain(id CartId) (Grain, error) {
p.mu.RLock() owner := p.ownerHostFor(id)
localGrain, isLocal := p.local.grains[id] if owner == p.Hostname {
remoteGrain, isRemote := p.remoteIndex[id] ringLookupLocal.Inc()
p.mu.RUnlock()
if isLocal && localGrain != nil {
return localGrain, nil
}
if isRemote {
remoteLookupCount.Inc()
return remoteGrain, nil
}
// Attempt to claim ownership (async semantics preserved)
go p.RequestOwnership(id)
// Create local grain (lazy spawn) - may be rolled back by quorum failure
grain, err := p.local.GetGrain(id) grain, err := p.local.GetGrain(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return grain, nil return grain, nil
} }
ringLookupRemote.Inc()
// Kick off remote dial if we don't yet know the owner.
if !p.IsKnown(owner) {
go p.AddRemote(owner)
}
// Fast path existing proxy
p.mu.RLock()
if rg, ok := p.remoteIndex[id]; ok {
p.mu.RUnlock()
remoteLookupCount.Inc()
return rg, nil
}
p.mu.RUnlock()
const (
attempts = 5
sleepPerTry = 40 * time.Millisecond
)
for attempt := 0; attempt < attempts; attempt++ {
// Try to spawn (idempotent if host already known)
if p.IsKnown(owner) {
p.SpawnRemoteGrain(id, owner)
}
// Check again
p.mu.RLock()
if rg, ok := p.remoteIndex[id]; ok {
p.mu.RUnlock()
remoteLookupCount.Inc()
return rg, nil
}
p.mu.RUnlock()
// Last attempt? break to return error.
if attempt == attempts-1 {
break
}
time.Sleep(sleepPerTry)
}
return nil, fmt.Errorf("remote owner %s not yet available for cart %s (after %d attempts)", owner, id.String(), attempts)
}
// Apply applies a single mutation to a grain (local or remote). // Apply applies a single mutation to a grain (local or remote).
// Replication (RF>1) scaffolding: future enhancement will fan-out mutations
// to replica owners (best-effort) and reconcile quorum on read.
func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) { func (p *SyncedPool) Apply(id CartId, mutation interface{}) (*CartGrain, error) {
grain, err := p.getGrain(id) grain, err := p.getGrain(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return grain.Apply(mutation, false) start := time.Now()
result, applyErr := grain.Apply(mutation, false)
// Derive mutation type label (strip pointer)
mutationType := "unknown"
if mutation != nil {
if t := reflect.TypeOf(mutation); t != nil {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Name() != "" {
mutationType = t.Name()
}
}
}
cartMutationLatencySeconds.WithLabelValues(mutationType).Observe(time.Since(start).Seconds())
if applyErr == nil && result != nil {
cartMutationsTotal.Inc()
if p.ownerHostFor(id) == p.Hostname {
// Update active grains gauge only for local ownership
cartActiveGrains.Set(float64(p.local.DebugGrainCount()))
}
} else if applyErr != nil {
cartMutationFailuresTotal.Inc()
}
return result, applyErr
} }
// Get returns current state of a grain (local or remote). // Get returns current state of a grain (local or remote).
// Future replication hook: Read-repair or quorum read can be added here.
func (p *SyncedPool) Get(id CartId) (*CartGrain, error) { func (p *SyncedPool) Get(id CartId) (*CartGrain, error) {
grain, err := p.getGrain(id) grain, err := p.getGrain(id)
if err != nil { if err != nil {