revert port
This commit is contained in:
@@ -177,6 +177,93 @@ func (s *cartActorGRPCServer) GetState(ctx context.Context, req *messages.StateR
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ControlPlane: Ping
|
||||
func (s *cartActorGRPCServer) Ping(ctx context.Context, _ *messages.Empty) (*messages.PingReply, error) {
|
||||
return &messages.PingReply{
|
||||
Host: s.syncedPool.Hostname,
|
||||
UnixTime: time.Now().Unix(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ControlPlane: Negotiate (merge host views)
|
||||
func (s *cartActorGRPCServer) Negotiate(ctx context.Context, req *messages.NegotiateRequest) (*messages.NegotiateReply, error) {
|
||||
hostSet := make(map[string]struct{})
|
||||
// Caller view
|
||||
for _, h := range req.GetKnownHosts() {
|
||||
if h != "" {
|
||||
hostSet[h] = struct{}{}
|
||||
}
|
||||
}
|
||||
// This host
|
||||
hostSet[s.syncedPool.Hostname] = struct{}{}
|
||||
// Known remotes
|
||||
s.syncedPool.mu.RLock()
|
||||
for h := range s.syncedPool.remoteHosts {
|
||||
hostSet[h] = struct{}{}
|
||||
}
|
||||
s.syncedPool.mu.RUnlock()
|
||||
|
||||
out := make([]string, 0, len(hostSet))
|
||||
for h := range hostSet {
|
||||
out = append(out, h)
|
||||
}
|
||||
return &messages.NegotiateReply{Hosts: out}, nil
|
||||
}
|
||||
|
||||
// ControlPlane: GetCartIds (locally owned carts only)
|
||||
func (s *cartActorGRPCServer) GetCartIds(ctx context.Context, _ *messages.Empty) (*messages.CartIdsReply, error) {
|
||||
ids := make([]string, 0, len(s.syncedPool.local.grains))
|
||||
s.syncedPool.local.mu.RLock()
|
||||
for id, g := range s.syncedPool.local.grains {
|
||||
if g != nil {
|
||||
ids = append(ids, id.String())
|
||||
}
|
||||
}
|
||||
s.syncedPool.local.mu.RUnlock()
|
||||
return &messages.CartIdsReply{CartIds: ids}, nil
|
||||
}
|
||||
|
||||
// ControlPlane: ConfirmOwner (simple always-accept implementation)
|
||||
// Future enhancement: add fencing / versioning & validate current holder.
|
||||
func (s *cartActorGRPCServer) ConfirmOwner(ctx context.Context, req *messages.OwnerChangeRequest) (*messages.OwnerChangeAck, error) {
|
||||
if req.GetCartId() == "" || req.GetNewHost() == "" {
|
||||
return &messages.OwnerChangeAck{
|
||||
Accepted: false,
|
||||
Message: "cart_id and new_host required",
|
||||
}, nil
|
||||
}
|
||||
// If we are *not* the new host and currently have a local grain, we:
|
||||
// 1. Drop any local grain (relinquish ownership)
|
||||
// 2. Spawn (or refresh) a remote proxy pointing to the new owner so
|
||||
// subsequent mutations from this node route correctly.
|
||||
if req.GetNewHost() != s.syncedPool.Hostname {
|
||||
cid := ToCartId(req.GetCartId())
|
||||
// Drop local ownership if present.
|
||||
s.syncedPool.local.mu.Lock()
|
||||
delete(s.syncedPool.local.grains, cid)
|
||||
s.syncedPool.local.mu.Unlock()
|
||||
|
||||
// Ensure a remote proxy exists for the new owner. SpawnRemoteGrain will
|
||||
// no-op if host unknown and attempt AddRemote asynchronously.
|
||||
s.syncedPool.SpawnRemoteGrain(cid, req.GetNewHost())
|
||||
}
|
||||
return &messages.OwnerChangeAck{
|
||||
Accepted: true,
|
||||
Message: "accepted",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ControlPlane: Closing (peer shutdown notification)
|
||||
func (s *cartActorGRPCServer) Closing(ctx context.Context, req *messages.ClosingNotice) (*messages.OwnerChangeAck, error) {
|
||||
if req.GetHost() != "" {
|
||||
s.syncedPool.RemoveHost(req.GetHost())
|
||||
}
|
||||
return &messages.OwnerChangeAck{
|
||||
Accepted: true,
|
||||
Message: "removed host",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartGRPCServer configures and starts the unified gRPC server on the given address.
|
||||
// It registers both the CartActor and ControlPlane services.
|
||||
func StartGRPCServer(addr string, pool GrainPool, syncedPool *SyncedPool) (*grpc.Server, error) {
|
||||
|
||||
Reference in New Issue
Block a user