diff --git a/images/chromium-headful/Dockerfile b/images/chromium-headful/Dockerfile index f70cf776..ce6fea17 100644 --- a/images/chromium-headful/Dockerfile +++ b/images/chromium-headful/Dockerfile @@ -175,6 +175,9 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,id=$CACHEIDPREFIX-ap imagemagick \ sudo \ mutter \ + # FUSE for fspipe virtual filesystem (fuse provides /bin/fusermount required by go-fuse) + fuse \ + libfuse2 \ # Python/pyenv reqs build-essential \ libssl-dev \ @@ -216,6 +219,9 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,id=$CACHEIDPREFIX-ap COPY --from=ffmpeg-downloader /usr/local/bin/ffmpeg /usr/local/bin/ffmpeg COPY --from=ffmpeg-downloader /usr/local/bin/ffprobe /usr/local/bin/ffprobe +# Enable user_allow_other in fuse.conf so FUSE mounts can be accessed by other users (e.g., Chrome) +RUN echo "user_allow_other" >> /etc/fuse.conf + # runtime ENV USERNAME=root RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,id=$CACHEIDPREFIX-apt-cache \ @@ -318,4 +324,9 @@ RUN esbuild /tmp/playwright-daemon.ts \ RUN useradd -m -s /bin/bash kernel +# Create fspipe download directory with proper permissions for FUSE mount +RUN mkdir -p /home/kernel/fspipe-downloads && \ + chown kernel:kernel /home/kernel/fspipe-downloads && \ + chmod 777 /home/kernel/fspipe-downloads + ENTRYPOINT [ "/wrapper.sh" ] diff --git a/images/chromium-headful/run-docker.sh b/images/chromium-headful/run-docker.sh index f99c976f..0856a2f4 100755 --- a/images/chromium-headful/run-docker.sh +++ b/images/chromium-headful/run-docker.sh @@ -58,6 +58,7 @@ RUN_ARGS=( --memory 8192m -p 9222:9222 -p 444:10001 + -p 9000:9000 -e DISPLAY_NUM=1 -e HEIGHT=1080 -e WIDTH=1920 diff --git a/server/Makefile b/server/Makefile index 83e78c96..ffbb116e 100644 --- a/server/Makefile +++ b/server/Makefile @@ -28,7 +28,7 @@ oapi-generate: $(OAPI_CODEGEN) go mod tidy build: | $(BIN_DIR) - go build -o $(BIN_DIR)/api ./cmd/api + go build -buildvcs=false -o $(BIN_DIR)/api ./cmd/api dev: build $(RECORDING_DIR) OUTPUT_DIR=$(RECORDING_DIR) DISPLAY_NUM=$(DISPLAY_NUM) ./bin/api diff --git a/server/cmd/api/api/fspipe.go b/server/cmd/api/api/fspipe.go new file mode 100644 index 00000000..8e4044f7 --- /dev/null +++ b/server/cmd/api/api/fspipe.go @@ -0,0 +1,379 @@ +package api + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/onkernel/kernel-images/server/lib/fspipe/daemon" + "github.com/onkernel/kernel-images/server/lib/fspipe/health" + "github.com/onkernel/kernel-images/server/lib/fspipe/listener" + "github.com/onkernel/kernel-images/server/lib/fspipe/transport" + "github.com/onkernel/kernel-images/server/lib/logger" + oapi "github.com/onkernel/kernel-images/server/lib/oapi" + "github.com/onkernel/kernel-images/server/lib/policy" +) + +const ( + defaultFspipeMountPath = "/home/kernel/fspipe-downloads" + defaultFspipeHealthPort = 8090 + defaultFspipeListenerPort = 9000 + defaultFspipeOutputDir = "/tmp/fspipe-output" +) + +// fspipeState holds the state of the running fspipe daemon +type fspipeState struct { + mu sync.RWMutex + + running bool + transportMode string // "websocket" or "s3" + mountPath string + wsEndpoint string + s3Bucket string + healthPort int + listenerPort int + outputDir string + + transport transport.Transport + fuseServer *fuse.Server + healthServer *health.Server + listenerServer *listener.Server +} + +var fspipe = &fspipeState{} + +// StartFspipe starts the fspipe daemon with the given configuration +func (s *ApiService) StartFspipe(ctx context.Context, req oapi.StartFspipeRequestObject) (oapi.StartFspipeResponseObject, error) { + log := logger.FromContext(ctx) + + fspipe.mu.Lock() + defer fspipe.mu.Unlock() + + // Check if already running + if fspipe.running { + return oapi.StartFspipe409JSONResponse{ + ConflictErrorJSONResponse: oapi.ConflictErrorJSONResponse{ + Message: "fspipe daemon is already running", + }, + }, nil + } + + // Determine mount path (Chrome's download directory) + mountPath := defaultFspipeMountPath + if req.Body != nil && req.Body.MountPath != nil && *req.Body.MountPath != "" { + mountPath = *req.Body.MountPath + } + + // Determine health port + healthPort := defaultFspipeHealthPort + if req.Body != nil && req.Body.HealthPort != nil { + healthPort = *req.Body.HealthPort + } + + // Determine if S3 mode + hasS3 := req.Body != nil && req.Body.S3Config != nil + + // Create mountpoint if it doesn't exist (with permissions accessible to all users) + if err := os.MkdirAll(mountPath, 0777); err != nil { + log.Error("failed to create fspipe mountpoint", "path", mountPath, "error", err) + return oapi.StartFspipe500JSONResponse{ + InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{ + Message: fmt.Sprintf("failed to create mountpoint: %v", err), + }, + }, nil + } + // Ensure the directory has proper permissions for Chrome to access + os.Chmod(mountPath, 0777) + + var client transport.Transport + var transportMode string + var wsEndpoint string + var s3Bucket string + var listenerServer *listener.Server + var listenerPort int + var outputDir string + + if hasS3 { + // S3/R2 mode - upload directly to cloud storage + transportMode = "s3" + s3Cfg := req.Body.S3Config + + region := "auto" + if s3Cfg.Region != nil { + region = *s3Cfg.Region + } + prefix := "" + if s3Cfg.Prefix != nil { + prefix = *s3Cfg.Prefix + } + + s3Config := transport.S3Config{ + Endpoint: s3Cfg.Endpoint, + Bucket: s3Cfg.Bucket, + AccessKeyID: s3Cfg.AccessKeyId, + SecretAccessKey: s3Cfg.SecretAccessKey, + Region: region, + Prefix: prefix, + } + s3Bucket = s3Cfg.Bucket + + var err error + client, err = transport.NewS3Client(s3Config) + if err != nil { + log.Error("failed to create S3 transport", "bucket", s3Cfg.Bucket, "error", err) + return oapi.StartFspipe500JSONResponse{ + InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{ + Message: fmt.Sprintf("failed to create S3 transport: %v", err), + }, + }, nil + } + } else { + // Default WebSocket mode - start broadcaster that external clients connect to + transportMode = "websocket" + listenerPort = defaultFspipeListenerPort + + // External endpoint URL for clients outside the container + wsEndpoint = fmt.Sprintf("ws://0.0.0.0:%d/fspipe", listenerPort) + + // Create broadcaster - a WebSocket server that broadcasts to connected clients + broadcasterAddr := fmt.Sprintf(":%d", listenerPort) + broadcaster := transport.NewBroadcaster(broadcasterAddr, "/fspipe") + + // For now, allow operation without clients (fake ACKs) for backward compatibility + // Set to true for strict mode where downloads fail if no client is connected + broadcaster.SetRequireClient(false) + + // Enable fast mode: writes are fire-and-forget, only FileCreate waits for ACK + // This gives much better throughput for large file downloads + broadcaster.SetFastMode(true) + + client = broadcaster + } + + // Connect transport + if err := client.Connect(); err != nil { + if listenerServer != nil { + listenerServer.Stop() + } + client.Close() + log.Error("failed to connect fspipe transport", "error", err) + return oapi.StartFspipe500JSONResponse{ + InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{ + Message: fmt.Sprintf("failed to connect transport: %v", err), + }, + }, nil + } + + // Mount the FUSE filesystem + fuseServer, err := daemon.Mount(mountPath, client) + if err != nil { + if listenerServer != nil { + listenerServer.Stop() + } + client.Close() + log.Error("failed to mount fspipe filesystem", "path", mountPath, "error", err) + return oapi.StartFspipe500JSONResponse{ + InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{ + Message: fmt.Sprintf("failed to mount filesystem: %v", err), + }, + }, nil + } + + // Start health server + healthAddr := fmt.Sprintf(":%d", healthPort) + healthServer := health.NewServer(healthAddr) + + healthServer.RegisterCheck("transport", func() (health.Status, string) { + state := client.State() + switch state { + case transport.StateConnected: + return health.StatusHealthy, "connected" + case transport.StateReconnecting: + return health.StatusDegraded, "reconnecting" + default: + return health.StatusUnhealthy, state.String() + } + }) + + healthServer.RegisterStats("transport", func() map[string]interface{} { + stats := client.Stats() + result := make(map[string]interface{}) + for k, v := range stats { + result[k] = v + } + result["state"] = client.State().String() + return result + }) + + if err := healthServer.Start(); err != nil { + log.Warn("failed to start fspipe health server", "error", err) + } + + // Store state + fspipe.running = true + fspipe.transportMode = transportMode + fspipe.mountPath = mountPath + fspipe.wsEndpoint = wsEndpoint + fspipe.s3Bucket = s3Bucket + fspipe.healthPort = healthPort + fspipe.listenerPort = listenerPort + fspipe.outputDir = outputDir + fspipe.transport = client + fspipe.fuseServer = fuseServer + fspipe.healthServer = healthServer + fspipe.listenerServer = listenerServer + + log.Info("fspipe daemon started", "mode", transportMode, "mount", mountPath) + + // Set Chrome download directory via enterprise policy (more reliable than command-line flag) + policyManager := &policy.Policy{} + if err := policyManager.SetDownloadDirectory(mountPath, true); err != nil { + log.Warn("failed to set Chrome download directory policy", "error", err) + } else { + log.Info("set Chrome download directory policy", "path", mountPath) + } + + // Restart Chrome to apply policy changes + if err := s.restartChromiumAndWait(ctx, "fspipe setup"); err != nil { + log.Warn("failed to restart Chrome for fspipe setup", "error", err) + } + + // Build response + response := oapi.FspipeStartResult{ + Running: true, + TransportMode: oapi.FspipeStartResultTransportMode(transportMode), + MountPath: mountPath, + } + + if transportMode == "websocket" { + response.WsEndpoint = &wsEndpoint + } else { + response.S3Bucket = &s3Bucket + } + + healthEndpoint := fmt.Sprintf("http://localhost:%d", healthPort) + response.HealthEndpoint = &healthEndpoint + + return oapi.StartFspipe200JSONResponse(response), nil +} + +// StopFspipe stops the running fspipe daemon +func (s *ApiService) StopFspipe(ctx context.Context, req oapi.StopFspipeRequestObject) (oapi.StopFspipeResponseObject, error) { + log := logger.FromContext(ctx) + + fspipe.mu.Lock() + defer fspipe.mu.Unlock() + + if !fspipe.running { + return oapi.StopFspipe400JSONResponse{ + BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{ + Message: "fspipe daemon is not running", + }, + }, nil + } + + // Stop health server + if fspipe.healthServer != nil { + fspipe.healthServer.Stop(ctx) + } + + // Unmount filesystem + if fspipe.fuseServer != nil { + if err := fspipe.fuseServer.Unmount(); err != nil { + log.Warn("failed to unmount fspipe filesystem", "error", err) + } + } + + // Close transport + if fspipe.transport != nil { + if err := fspipe.transport.Close(); err != nil { + log.Warn("failed to close fspipe transport", "error", err) + } + } + + // Stop listener server (if running) + if fspipe.listenerServer != nil { + if err := fspipe.listenerServer.Stop(); err != nil { + log.Warn("failed to stop fspipe listener", "error", err) + } + } + + // Clear download directory policy + policyManager := &policy.Policy{} + if err := policyManager.ClearDownloadDirectory(); err != nil { + log.Warn("failed to clear Chrome download directory policy", "error", err) + } + + // Reset state + fspipe.running = false + fspipe.transportMode = "" + fspipe.mountPath = "" + fspipe.wsEndpoint = "" + fspipe.s3Bucket = "" + fspipe.healthPort = 0 + fspipe.listenerPort = 0 + fspipe.outputDir = "" + fspipe.transport = nil + fspipe.fuseServer = nil + fspipe.healthServer = nil + fspipe.listenerServer = nil + + log.Info("fspipe daemon stopped") + return oapi.StopFspipe200Response{}, nil +} + +// GetFspipeStatus returns the current status of the fspipe daemon +func (s *ApiService) GetFspipeStatus(ctx context.Context, req oapi.GetFspipeStatusRequestObject) (oapi.GetFspipeStatusResponseObject, error) { + fspipe.mu.RLock() + defer fspipe.mu.RUnlock() + + if !fspipe.running { + return oapi.GetFspipeStatus200JSONResponse(oapi.FspipeStatus{ + Running: false, + }), nil + } + + status := oapi.FspipeStatus{ + Running: true, + MountPath: &fspipe.mountPath, + } + + // Set transport mode + mode := oapi.FspipeStatusTransportMode(fspipe.transportMode) + status.TransportMode = &mode + + // Set transport state + if fspipe.transport != nil { + stateStr := fspipe.transport.State().String() + var state oapi.FspipeStatusTransportState + switch stateStr { + case "connected": + state = oapi.Connected + case "reconnecting": + state = oapi.Reconnecting + default: + state = oapi.Disconnected + } + status.TransportState = &state + + // Get stats + rawStats := fspipe.transport.Stats() + stats := make(map[string]interface{}) + for k, v := range rawStats { + stats[k] = v + } + status.Stats = &stats + } + + // Set endpoint info + if fspipe.transportMode == "websocket" && fspipe.wsEndpoint != "" { + status.WsEndpoint = &fspipe.wsEndpoint + } + if fspipe.transportMode == "s3" && fspipe.s3Bucket != "" { + status.S3Bucket = &fspipe.s3Bucket + } + + return oapi.GetFspipeStatus200JSONResponse(status), nil +} diff --git a/server/go.mod b/server/go.mod index a9ce8ec7..53e45df8 100644 --- a/server/go.mod +++ b/server/go.mod @@ -4,6 +4,10 @@ go 1.25.0 require ( github.com/avast/retry-go/v5 v5.0.0 + github.com/aws/aws-sdk-go-v2 v1.24.0 + github.com/aws/aws-sdk-go-v2/config v1.26.1 + github.com/aws/aws-sdk-go-v2/credentials v1.16.12 + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 github.com/coder/websocket v1.8.14 github.com/creack/pty v1.1.24 github.com/fsnotify/fsnotify v1.9.0 @@ -12,7 +16,11 @@ require ( github.com/glebarez/sqlite v1.11.0 github.com/go-chi/chi/v5 v5.2.1 github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 + github.com/hanwen/go-fuse/v2 v2.5.1 + github.com/joho/godotenv v1.5.1 github.com/kelseyhightower/envconfig v1.4.0 + github.com/klauspost/compress v1.18.3 github.com/m1k1o/neko/server v0.0.0-20251008185748-46e2fc7d3866 github.com/nrednav/cuid2 v1.1.0 github.com/oapi-codegen/runtime v1.1.2 @@ -25,6 +33,20 @@ require ( require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/glebarez/go-sqlite v1.21.2 // indirect @@ -33,7 +55,6 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.18.3 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect diff --git a/server/go.sum b/server/go.sum index 324dfabf..34f3dd2b 100644 --- a/server/go.sum +++ b/server/go.sum @@ -3,6 +3,42 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/avast/retry-go/v5 v5.0.0 h1:kf1Qc2UsTZ4qq8elDymqfbISvkyMuhgRxuJqX2NHP7k= github.com/avast/retry-go/v5 v5.0.0/go.mod h1://d+usmKWio1agtZfS1H/ltTqwtIfBnRq9zEwjc3eH8= +github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= +github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= +github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= +github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= @@ -31,14 +67,22 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hanwen/go-fuse/v2 v2.5.1 h1:OQBE8zVemSocRxA4OaFJbjJ5hlpCmIWbGr7r0M4uoQQ= +github.com/hanwen/go-fuse/v2 v2.5.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= @@ -50,10 +94,14 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/nrednav/cuid2 v1.1.0 h1:Y2P9Fo1Iz7lKuwcn+fS0mbxkNvEqoNLUtm0+moHCnYc= @@ -87,8 +135,10 @@ github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4d github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= diff --git a/server/lib/fspipe/daemon/daemon.go b/server/lib/fspipe/daemon/daemon.go new file mode 100644 index 00000000..dfe2f903 --- /dev/null +++ b/server/lib/fspipe/daemon/daemon.go @@ -0,0 +1,572 @@ +// Package daemon provides the FUSE filesystem mounting functionality for fspipe. +// This package exposes the internal daemon functionality for use by external packages. +package daemon + +import ( + "context" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/google/uuid" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" + "github.com/onkernel/kernel-images/server/lib/fspipe/transport" +) + +var oneSecond = time.Second + +// Mount mounts the fspipe FUSE filesystem at the specified path +func Mount(mountpoint string, client transport.Transport) (*fuse.Server, error) { + root := newPipeDir(client, nil, "") + + opts := &fs.Options{ + MountOptions: fuse.MountOptions{ + AllowOther: true, // Allow Chrome (running as different user) to access the mount + Debug: false, + FsName: "fspipe", + Name: "fspipe", + }, + AttrTimeout: &oneSecond, + EntryTimeout: &oneSecond, + } + + server, err := fs.Mount(mountpoint, root, opts) + if err != nil { + return nil, err + } + + logging.Info("Mounted fspipe at %s", mountpoint) + return server, nil +} + +// defaultAttr returns default attributes for nodes +func defaultAttr(mode uint32) fuse.Attr { + now := time.Now() + return fuse.Attr{ + Mode: mode, + Nlink: 1, + Owner: fuse.Owner{ + Uid: uint32(syscall.Getuid()), + Gid: uint32(syscall.Getgid()), + }, + Atime: uint64(now.Unix()), + Mtime: uint64(now.Unix()), + Ctime: uint64(now.Unix()), + } +} + +// pipeDir represents a directory in the virtual filesystem +type pipeDir struct { + fs.Inode + + client transport.Transport + parent *pipeDir + name string + + mu sync.RWMutex + children map[string]fs.InodeEmbedder +} + +var _ fs.InodeEmbedder = (*pipeDir)(nil) +var _ fs.NodeGetattrer = (*pipeDir)(nil) +var _ fs.NodeLookuper = (*pipeDir)(nil) +var _ fs.NodeCreater = (*pipeDir)(nil) +var _ fs.NodeMkdirer = (*pipeDir)(nil) +var _ fs.NodeUnlinker = (*pipeDir)(nil) +var _ fs.NodeRmdirer = (*pipeDir)(nil) +var _ fs.NodeRenamer = (*pipeDir)(nil) +var _ fs.NodeReaddirer = (*pipeDir)(nil) +var _ fs.NodeStatfser = (*pipeDir)(nil) + +func newPipeDir(client transport.Transport, parent *pipeDir, name string) *pipeDir { + return &pipeDir{ + client: client, + parent: parent, + name: name, + children: make(map[string]fs.InodeEmbedder), + } +} + +func (d *pipeDir) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + // Use 0777 to allow Chrome (running as different user) full access + out.Attr = defaultAttr(fuse.S_IFDIR | 0777) + return 0 +} + +// Statfs returns filesystem statistics. Chrome checks this before downloading. +func (d *pipeDir) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { + // Return generous fake stats - this is a pipe filesystem, space is "unlimited" + // These values are designed to make Chrome happy when checking disk space + const blockSize = 4096 + const totalBlocks = 1024 * 1024 * 1024 // ~4TB worth of blocks + const freeBlocks = 1024 * 1024 * 512 // ~2TB free + + out.Blocks = totalBlocks + out.Bfree = freeBlocks + out.Bavail = freeBlocks + out.Files = 1000000 + out.Ffree = 999999 + out.Bsize = blockSize + out.NameLen = 255 + out.Frsize = blockSize + + return 0 +} + +func (d *pipeDir) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { + // Hold lock for entire operation to prevent TOCTOU race + d.mu.RLock() + defer d.mu.RUnlock() + + child, ok := d.children[name] + if !ok { + return nil, syscall.ENOENT + } + + inode := d.GetChild(name) + if inode != nil { + switch n := child.(type) { + case *pipeFile: + // Ensure world-writable permission for cross-user access + n.mu.RLock() + mode := n.mode + size := n.size + n.mu.RUnlock() + out.Attr = defaultAttr(fuse.S_IFREG | mode | 0666) + out.Attr.Size = uint64(size) + case *pipeDir: + out.Attr = defaultAttr(fuse.S_IFDIR | 0777) + } + return inode, 0 + } + + return nil, syscall.ENOENT +} + +func (d *pipeDir) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) { + relPath := d.relPath(name) + logging.Debug("Create: %s (mode=%o)", relPath, mode) + + file := newPipeFile(d.client, d, name, mode) + + msg := protocol.FileCreate{ + FileID: file.id, + Filename: relPath, + Mode: mode, + } + + // Use SendAndReceive to get ACK from listener - ensures file was created + respType, respData, err := d.client.SendAndReceive(protocol.MsgFileCreate, &msg) + if err != nil { + logging.Debug("Create: failed to send FileCreate: %v", err) + return nil, nil, 0, syscall.EIO + } + + if respType != protocol.MsgFileCreateAck { + logging.Debug("Create: unexpected response type: 0x%02x", respType) + return nil, nil, 0, syscall.EIO + } + + var ack protocol.FileCreateAck + if err := protocol.DecodePayload(respData, &ack); err != nil { + logging.Debug("Create: failed to decode ack: %v", err) + return nil, nil, 0, syscall.EIO + } + + if !ack.Success { + logging.Debug("Create: listener error: %s", ack.Error) + return nil, nil, 0, syscall.EIO + } + + d.mu.Lock() + d.children[name] = file + d.mu.Unlock() + + stable := fs.StableAttr{Mode: fuse.S_IFREG} + inode := d.NewInode(ctx, file, stable) + + // Ensure world-writable permission for cross-user access + out.Attr = defaultAttr(fuse.S_IFREG | mode | 0666) + + handle := newPipeHandle(file) + return inode, handle, fuse.FOPEN_DIRECT_IO, 0 +} + +func (d *pipeDir) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) { + logging.Debug("Mkdir: %s", d.relPath(name)) + + newDir := newPipeDir(d.client, d, name) + + d.mu.Lock() + d.children[name] = newDir + d.mu.Unlock() + + stable := fs.StableAttr{Mode: fuse.S_IFDIR} + inode := d.NewInode(ctx, newDir, stable) + + // Ensure world-writable permission for cross-user access + out.Attr = defaultAttr(fuse.S_IFDIR | 0777) + return inode, 0 +} + +func (d *pipeDir) Unlink(ctx context.Context, name string) syscall.Errno { + relPath := d.relPath(name) + logging.Debug("Unlink: %s", relPath) + + d.mu.Lock() + child, ok := d.children[name] + if ok { + delete(d.children, name) + } + d.mu.Unlock() + + if !ok { + return syscall.ENOENT + } + + if file, isFile := child.(*pipeFile); isFile { + file.mu.Lock() + file.deleted = true + file.mu.Unlock() + } + + msg := protocol.Delete{Filename: relPath} + if err := d.client.Send(protocol.MsgDelete, &msg); err != nil { + logging.Debug("Unlink: failed to send Delete: %v", err) + } + + return 0 +} + +func (d *pipeDir) Rmdir(ctx context.Context, name string) syscall.Errno { + logging.Debug("Rmdir: %s", d.relPath(name)) + + d.mu.Lock() + child, ok := d.children[name] + if !ok { + d.mu.Unlock() + return syscall.ENOENT + } + + dir, isDir := child.(*pipeDir) + if !isDir { + d.mu.Unlock() + return syscall.ENOTDIR + } + + dir.mu.RLock() + empty := len(dir.children) == 0 + dir.mu.RUnlock() + + if !empty { + d.mu.Unlock() + return syscall.ENOTEMPTY + } + + delete(d.children, name) + d.mu.Unlock() + + return 0 +} + +func (d *pipeDir) Rename(ctx context.Context, name string, newParent fs.InodeEmbedder, newName string, flags uint32) syscall.Errno { + oldPath := d.relPath(name) + + newParentDir, ok := newParent.(*pipeDir) + if !ok { + return syscall.EINVAL + } + + newPath := newParentDir.relPath(newName) + logging.Debug("Rename: %s -> %s", oldPath, newPath) + + d.mu.Lock() + child, ok := d.children[name] + if !ok { + d.mu.Unlock() + return syscall.ENOENT + } + delete(d.children, name) + d.mu.Unlock() + + // Get the file ID if this is a pipeFile + var fileID string + switch c := child.(type) { + case *pipeFile: + c.name = newName + c.parent = newParentDir + fileID = c.id + case *pipeDir: + c.name = newName + c.parent = newParentDir + } + + newParentDir.mu.Lock() + newParentDir.children[newName] = child + newParentDir.mu.Unlock() + + msg := protocol.Rename{ + FileID: fileID, + OldName: oldPath, + NewName: newPath, + } + if err := d.client.Send(protocol.MsgRename, &msg); err != nil { + logging.Debug("Rename: failed to send Rename: %v", err) + } + + return 0 +} + +func (d *pipeDir) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) { + d.mu.RLock() + defer d.mu.RUnlock() + + entries := make([]fuse.DirEntry, 0, len(d.children)) + for name, child := range d.children { + var mode uint32 + switch child.(type) { + case *pipeFile: + mode = fuse.S_IFREG + case *pipeDir: + mode = fuse.S_IFDIR + } + entries = append(entries, fuse.DirEntry{ + Name: name, + Mode: mode, + }) + } + + return fs.NewListDirStream(entries), 0 +} + +func (d *pipeDir) relPath(name string) string { + if d.parent == nil { + return name + } + return filepath.Join(d.parent.relPath(d.name), name) +} + +// pipeFile represents a file in the virtual filesystem +type pipeFile struct { + fs.Inode + + client transport.Transport + parent *pipeDir + name string + id string + mode uint32 + + mu sync.RWMutex + size int64 + deleted bool +} + +var _ fs.InodeEmbedder = (*pipeFile)(nil) +var _ fs.NodeGetattrer = (*pipeFile)(nil) +var _ fs.NodeSetattrer = (*pipeFile)(nil) +var _ fs.NodeOpener = (*pipeFile)(nil) + +func newPipeFile(client transport.Transport, parent *pipeDir, name string, mode uint32) *pipeFile { + return &pipeFile{ + client: client, + parent: parent, + name: name, + id: uuid.New().String(), + mode: mode, + } +} + +func (f *pipeFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno { + f.mu.RLock() + defer f.mu.RUnlock() + + // Ensure world-writable permission for cross-user access (Chrome runs as different user) + mode := f.mode | 0666 + out.Attr = defaultAttr(fuse.S_IFREG | mode) + out.Attr.Size = uint64(f.size) + return 0 +} + +func (f *pipeFile) Setattr(ctx context.Context, fh fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno { + f.mu.Lock() + defer f.mu.Unlock() + + if sz, ok := in.GetSize(); ok { + logging.Debug("Setattr: truncate %s to %d", f.name, sz) + + f.size = int64(sz) + + msg := protocol.Truncate{ + FileID: f.id, + Size: f.size, + } + if err := f.client.Send(protocol.MsgTruncate, &msg); err != nil { + logging.Debug("Setattr: failed to send Truncate: %v", err) + return syscall.EIO + } + } + + if mode, ok := in.GetMode(); ok { + f.mode = mode + } + + // Ensure world-writable permission for cross-user access + mode := f.mode | 0666 + out.Attr = defaultAttr(fuse.S_IFREG | mode) + out.Attr.Size = uint64(f.size) + return 0 +} + +func (f *pipeFile) Open(ctx context.Context, flags uint32) (fs.FileHandle, uint32, syscall.Errno) { + f.mu.RLock() + deleted := f.deleted + f.mu.RUnlock() + + if deleted { + return nil, 0, syscall.ENOENT + } + + logging.Debug("Open: %s (flags=%d)", f.name, flags) + + handle := newPipeHandle(f) + return handle, fuse.FOPEN_DIRECT_IO, 0 +} + +func (f *pipeFile) relPath() string { + if f.parent == nil { + return f.name + } + return f.parent.relPath(f.name) +} + +// pipeHandle is a file handle for write operations +type pipeHandle struct { + file *pipeFile +} + +var _ fs.FileHandle = (*pipeHandle)(nil) +var _ fs.FileWriter = (*pipeHandle)(nil) +var _ fs.FileReader = (*pipeHandle)(nil) +var _ fs.FileFlusher = (*pipeHandle)(nil) +var _ fs.FileReleaser = (*pipeHandle)(nil) +var _ fs.FileFsyncer = (*pipeHandle)(nil) +var _ fs.FileAllocater = (*pipeHandle)(nil) + +func newPipeHandle(file *pipeFile) *pipeHandle { + return &pipeHandle{file: file} +} + +func (h *pipeHandle) Write(ctx context.Context, data []byte, off int64) (uint32, syscall.Errno) { + h.file.mu.Lock() + defer h.file.mu.Unlock() + + if h.file.deleted { + return 0, syscall.ENOENT + } + + remaining := data + offset := off + totalWritten := uint32(0) + + for len(remaining) > 0 { + chunkSize := protocol.ChunkSize + if len(remaining) < chunkSize { + chunkSize = len(remaining) + } + + chunk := remaining[:chunkSize] + remaining = remaining[chunkSize:] + + msg := protocol.WriteChunk{ + FileID: h.file.id, + Offset: offset, + Data: chunk, + } + + respType, respData, err := h.file.client.SendAndReceive(protocol.MsgWriteChunk, &msg) + if err != nil { + logging.Debug("Write: failed to send chunk: %v", err) + return totalWritten, syscall.EIO + } + + if respType != protocol.MsgWriteAck { + logging.Debug("Write: unexpected response type: 0x%02x", respType) + return totalWritten, syscall.EIO + } + + var ack protocol.WriteAck + if err := protocol.DecodePayload(respData, &ack); err != nil { + logging.Debug("Write: failed to decode ack: %v", err) + return totalWritten, syscall.EIO + } + + if ack.Error != "" { + logging.Debug("Write: remote error: %s", ack.Error) + return totalWritten, syscall.EIO + } + + offset += int64(ack.Written) + totalWritten += uint32(ack.Written) + } + + newSize := off + int64(totalWritten) + if newSize > h.file.size { + h.file.size = newSize + } + + return totalWritten, 0 +} + +func (h *pipeHandle) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { + return fuse.ReadResultData(nil), 0 +} + +func (h *pipeHandle) Flush(ctx context.Context) syscall.Errno { + logging.Debug("Flush: %s", h.file.name) + return 0 +} + +func (h *pipeHandle) Fsync(ctx context.Context, flags uint32) syscall.Errno { + logging.Debug("Fsync: %s (flags=%d)", h.file.name, flags) + // For a streaming pipe, fsync is a no-op since data is sent immediately + // Return success to allow Chrome downloads to complete + return 0 +} + +func (h *pipeHandle) Allocate(ctx context.Context, off uint64, size uint64, mode uint32) syscall.Errno { + logging.Debug("Allocate: %s (off=%d, size=%d, mode=%d)", h.file.name, off, size, mode) + // Pre-allocate space for the file. For a streaming pipe, we just update the size. + h.file.mu.Lock() + defer h.file.mu.Unlock() + + newSize := int64(off + size) + if newSize > h.file.size { + h.file.size = newSize + } + return 0 +} + +func (h *pipeHandle) Release(ctx context.Context) syscall.Errno { + logging.Debug("Release: %s", h.file.name) + + h.file.mu.RLock() + deleted := h.file.deleted + h.file.mu.RUnlock() + + if deleted { + return 0 + } + + msg := protocol.FileClose{ + FileID: h.file.id, + } + if err := h.file.client.Send(protocol.MsgFileClose, &msg); err != nil { + logging.Debug("Release: failed to send FileClose: %v", err) + } + + return 0 +} diff --git a/server/lib/fspipe/health/server.go b/server/lib/fspipe/health/server.go new file mode 100644 index 00000000..308060bc --- /dev/null +++ b/server/lib/fspipe/health/server.go @@ -0,0 +1,209 @@ +package health + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" +) + +// Status represents the health status +type Status string + +const ( + StatusHealthy Status = "healthy" + StatusDegraded Status = "degraded" + StatusUnhealthy Status = "unhealthy" +) + +// Check is a health check function +type Check func() (Status, string) + +// StatsProvider provides statistics +type StatsProvider func() map[string]interface{} + +// Server provides health and metrics endpoints +type Server struct { + addr string + server *http.Server + + mu sync.RWMutex + checks map[string]Check + stats map[string]StatsProvider + + startTime time.Time +} + +// NewServer creates a new health server +func NewServer(addr string) *Server { + s := &Server{ + addr: addr, + checks: make(map[string]Check), + stats: make(map[string]StatsProvider), + startTime: time.Now(), + } + + mux := http.NewServeMux() + mux.HandleFunc("/health", s.handleHealth) + mux.HandleFunc("/health/live", s.handleLiveness) + mux.HandleFunc("/health/ready", s.handleReadiness) + mux.HandleFunc("/metrics", s.handleMetrics) + mux.HandleFunc("/stats", s.handleStats) + + s.server = &http.Server{ + Addr: addr, + Handler: mux, + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + return s +} + +// RegisterCheck adds a health check +func (s *Server) RegisterCheck(name string, check Check) { + s.mu.Lock() + defer s.mu.Unlock() + s.checks[name] = check +} + +// RegisterStats adds a stats provider +func (s *Server) RegisterStats(name string, provider StatsProvider) { + s.mu.Lock() + defer s.mu.Unlock() + s.stats[name] = provider +} + +// Start begins serving +func (s *Server) Start() error { + go func() { + logging.Info("Health server listening on %s", s.addr) + if err := s.server.ListenAndServe(); err != http.ErrServerClosed { + logging.Error("Health server error: %v", err) + } + }() + return nil +} + +// Stop gracefully shuts down the server +func (s *Server) Stop(ctx context.Context) error { + return s.server.Shutdown(ctx) +} + +func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + defer s.mu.RUnlock() + + overall := StatusHealthy + results := make(map[string]interface{}) + + for name, check := range s.checks { + status, msg := check() + results[name] = map[string]interface{}{ + "status": status, + "message": msg, + } + + if status == StatusUnhealthy { + overall = StatusUnhealthy + } else if status == StatusDegraded && overall == StatusHealthy { + overall = StatusDegraded + } + } + + response := map[string]interface{}{ + "status": overall, + "checks": results, + "uptime": time.Since(s.startTime).String(), + "timestamp": time.Now().Format(time.RFC3339), + } + + w.Header().Set("Content-Type", "application/json") + if overall != StatusHealthy { + w.WriteHeader(http.StatusServiceUnavailable) + } + json.NewEncoder(w).Encode(response) +} + +func (s *Server) handleLiveness(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "status": "alive", + }) +} + +func (s *Server) handleReadiness(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + defer s.mu.RUnlock() + + for name, check := range s.checks { + status, msg := check() + if status == StatusUnhealthy { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusServiceUnavailable) + json.NewEncoder(w).Encode(map[string]interface{}{ + "status": "not_ready", + "reason": name, + "message": msg, + }) + return + } + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "status": "ready", + }) +} + +func (s *Server) handleMetrics(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + defer s.mu.RUnlock() + + // Prometheus-style metrics + w.Header().Set("Content-Type", "text/plain") + + fmt.Fprintf(w, "# HELP fspipe_uptime_seconds Uptime in seconds\n") + fmt.Fprintf(w, "# TYPE fspipe_uptime_seconds gauge\n") + fmt.Fprintf(w, "fspipe_uptime_seconds %f\n", time.Since(s.startTime).Seconds()) + + for name, provider := range s.stats { + stats := provider() + for key, value := range stats { + metricName := fmt.Sprintf("fspipe_%s_%s", name, key) + + switch v := value.(type) { + case uint64: + fmt.Fprintf(w, "%s %d\n", metricName, v) + case int64: + fmt.Fprintf(w, "%s %d\n", metricName, v) + case int: + fmt.Fprintf(w, "%s %d\n", metricName, v) + case float64: + fmt.Fprintf(w, "%s %f\n", metricName, v) + case string: + // Skip strings in prometheus format + } + } + } +} + +func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + defer s.mu.RUnlock() + + allStats := make(map[string]interface{}) + allStats["uptime"] = time.Since(s.startTime).String() + allStats["timestamp"] = time.Now().Format(time.RFC3339) + + for name, provider := range s.stats { + allStats[name] = provider() + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(allStats) +} diff --git a/server/lib/fspipe/listener/listener.go b/server/lib/fspipe/listener/listener.go new file mode 100644 index 00000000..8cb8bac4 --- /dev/null +++ b/server/lib/fspipe/listener/listener.go @@ -0,0 +1,669 @@ +// Package listener provides the fspipe listener server functionality. +// The listener receives file operations from fspipe daemons and writes files locally. +package listener + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" +) + +// Server is the TCP/WebSocket server that receives file operations +type Server struct { + addr string + localDir string + listener net.Listener + httpServer *http.Server + + wsEnabled bool + wsPath string + upgrader websocket.Upgrader + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // Shutdown management + shutdownTimeout time.Duration + + // Metrics + activeConnections atomic.Int64 + totalConnections atomic.Uint64 + totalFiles atomic.Uint64 + totalBytes atomic.Uint64 + totalErrors atomic.Uint64 +} + +// Config holds server configuration +type Config struct { + WebSocketEnabled bool + WebSocketPath string + ShutdownTimeout time.Duration +} + +// NewServer creates a new listener server (TCP mode) +func NewServer(addr string, localDir string) *Server { + return NewServerWithConfig(addr, localDir, Config{}) +} + +// NewServerWithConfig creates a new listener server with configuration +func NewServerWithConfig(addr string, localDir string, config Config) *Server { + ctx, cancel := context.WithCancel(context.Background()) + + shutdownTimeout := config.ShutdownTimeout + if shutdownTimeout == 0 { + shutdownTimeout = 10 * time.Second + } + + s := &Server{ + addr: addr, + localDir: localDir, + wsEnabled: config.WebSocketEnabled, + wsPath: config.WebSocketPath, + ctx: ctx, + cancel: cancel, + shutdownTimeout: shutdownTimeout, + upgrader: websocket.Upgrader{ + ReadBufferSize: 64 * 1024, + WriteBufferSize: 64 * 1024, + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + } + + if s.wsPath == "" { + s.wsPath = "/fspipe" + } + + return s +} + +// Start begins listening for connections +func (s *Server) Start() error { + if s.wsEnabled { + return s.startWebSocket() + } + return s.startTCP() +} + +func (s *Server) startTCP() error { + ln, err := net.Listen("tcp", s.addr) + if err != nil { + return err + } + s.listener = ln + + logging.Info("TCP listening on %s, writing files to %s", s.addr, s.localDir) + + s.wg.Add(1) + go s.acceptLoop() + + return nil +} + +func (s *Server) startWebSocket() error { + mux := http.NewServeMux() + mux.HandleFunc(s.wsPath, s.handleWebSocket) + + s.httpServer = &http.Server{ + Addr: s.addr, + Handler: mux, + } + + ln, err := net.Listen("tcp", s.addr) + if err != nil { + return err + } + s.listener = ln + + logging.Info("WebSocket listening on %s%s, writing files to %s", s.addr, s.wsPath, s.localDir) + + s.wg.Add(1) + go func() { + defer s.wg.Done() + if err := s.httpServer.Serve(ln); err != http.ErrServerClosed { + logging.Error("HTTP server error: %v", err) + } + }() + + return nil +} + +func (s *Server) acceptLoop() { + defer s.wg.Done() + + // Exponential backoff for accept errors + backoff := 10 * time.Millisecond + maxBackoff := 5 * time.Second + + for { + conn, err := s.listener.Accept() + if err != nil { + select { + case <-s.ctx.Done(): + return + default: + logging.Error("Accept error: %v", err) + s.totalErrors.Add(1) + + // Backoff to prevent CPU spin on persistent errors + timer := time.NewTimer(backoff) + select { + case <-s.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + continue + } + } + + // Reset backoff on successful accept + backoff = 10 * time.Millisecond + + s.totalConnections.Add(1) + s.activeConnections.Add(1) + logging.Info("New TCP connection from %s", conn.RemoteAddr()) + + s.wg.Add(1) + go s.handleTCPConnection(conn) + } +} + +func (s *Server) handleTCPConnection(conn net.Conn) { + defer s.wg.Done() + defer s.activeConnections.Add(-1) + defer conn.Close() + + handler := newHandler(s.localDir, &s.totalFiles, &s.totalBytes, &s.totalErrors) + reader := bufio.NewReader(conn) + writer := bufio.NewWriter(conn) + + handler.handle(s.ctx, reader, writer) + + logging.Info("TCP connection from %s closed", conn.RemoteAddr()) +} + +func (s *Server) handleWebSocket(w http.ResponseWriter, r *http.Request) { + conn, err := s.upgrader.Upgrade(w, r, nil) + if err != nil { + logging.Error("WebSocket upgrade error: %v", err) + s.totalErrors.Add(1) + return + } + + s.totalConnections.Add(1) + s.activeConnections.Add(1) + logging.Info("New WebSocket connection from %s", r.RemoteAddr) + + s.wg.Add(1) + go s.handleWSConnection(conn, r.RemoteAddr) +} + +func (s *Server) handleWSConnection(conn *websocket.Conn, remoteAddr string) { + defer s.wg.Done() + defer s.activeConnections.Add(-1) + defer conn.Close() + + handler := newHandler(s.localDir, &s.totalFiles, &s.totalBytes, &s.totalErrors) + wsAdapter := newWebSocketAdapter(conn) + + handler.handle(s.ctx, wsAdapter, wsAdapter) + + logging.Info("WebSocket connection from %s closed", remoteAddr) +} + +// Stop gracefully shuts down the server +func (s *Server) Stop() error { + logging.Info("Server shutting down...") + s.cancel() + + // Shutdown HTTP server with timeout + if s.httpServer != nil { + ctx, cancel := context.WithTimeout(context.Background(), s.shutdownTimeout) + defer cancel() + s.httpServer.Shutdown(ctx) + } + + if s.listener != nil { + s.listener.Close() + } + + // Wait for connections with timeout + done := make(chan struct{}) + go func() { + s.wg.Wait() + close(done) + }() + + select { + case <-done: + logging.Info("Server stopped gracefully") + case <-time.After(s.shutdownTimeout): + logging.Warn("Server shutdown timed out after %v", s.shutdownTimeout) + } + + return nil +} + +// Addr returns the listener address +func (s *Server) Addr() net.Addr { + if s.listener != nil { + return s.listener.Addr() + } + return nil +} + +// LocalDir returns the local directory where files are written +func (s *Server) LocalDir() string { + return s.localDir +} + +// WSPath returns the WebSocket path +func (s *Server) WSPath() string { + return s.wsPath +} + +// Stats returns server statistics +func (s *Server) Stats() map[string]interface{} { + return map[string]interface{}{ + "active_connections": s.activeConnections.Load(), + "total_connections": s.totalConnections.Load(), + "total_files": s.totalFiles.Load(), + "total_bytes": s.totalBytes.Load(), + "total_errors": s.totalErrors.Load(), + } +} + +// flusher is an interface for types that can flush buffered data +type flusher interface { + io.Writer + Flush() error +} + +// handler processes incoming messages and manages local files +type handler struct { + localDir string + + mu sync.RWMutex + files map[string]*fileEntry + + // Shared metrics + totalFiles *atomic.Uint64 + totalBytes *atomic.Uint64 + totalErrors *atomic.Uint64 +} + +// fileEntry wraps a file with metadata for tracking +type fileEntry struct { + file *os.File + path string + createdAt time.Time + bytesW int64 +} + +func newHandler(localDir string, totalFiles, totalBytes, totalErrors *atomic.Uint64) *handler { + return &handler{ + localDir: localDir, + files: make(map[string]*fileEntry), + totalFiles: totalFiles, + totalBytes: totalBytes, + totalErrors: totalErrors, + } +} + +func (h *handler) handle(ctx context.Context, r io.Reader, w flusher) { + // Panic recovery to prevent one bad message from crashing the server + defer func() { + if r := recover(); r != nil { + logging.Error("Handler panic recovered: %v", r) + if h.totalErrors != nil { + h.totalErrors.Add(1) + } + } + h.closeAllFiles() + }() + + decoder := protocol.NewDecoder(r) + encoder := protocol.NewEncoder(w) + + for { + select { + case <-ctx.Done(): + return + default: + } + + msgType, payload, err := decoder.Decode() + if err != nil { + if err != io.EOF { + logging.Debug("Decode error: %v", err) + } + return + } + + if err := h.handleMessage(msgType, payload, encoder, w); err != nil { + logging.Debug("Handle message error: %v", err) + if h.totalErrors != nil { + h.totalErrors.Add(1) + } + } + } +} + +func (h *handler) handleMessage(msgType byte, payload []byte, encoder *protocol.Encoder, w flusher) error { + switch msgType { + case protocol.MsgFileCreate: + var msg protocol.FileCreate + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleFileCreate(&msg, encoder, w) + + case protocol.MsgFileClose: + var msg protocol.FileClose + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleFileClose(&msg) + + case protocol.MsgWriteChunk: + var msg protocol.WriteChunk + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleWriteChunk(&msg, encoder, w) + + case protocol.MsgTruncate: + var msg protocol.Truncate + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleTruncate(&msg) + + case protocol.MsgRename: + var msg protocol.Rename + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleRename(&msg) + + case protocol.MsgDelete: + var msg protocol.Delete + if err := protocol.DecodePayload(payload, &msg); err != nil { + return err + } + return h.handleDelete(&msg) + + default: + logging.Debug("Unknown message type: 0x%02x", msgType) + return nil + } +} + +func (h *handler) handleFileCreate(msg *protocol.FileCreate, encoder *protocol.Encoder, w flusher) error { + h.mu.Lock() + defer h.mu.Unlock() + + path := filepath.Join(h.localDir, msg.Filename) + + ack := protocol.FileCreateAck{ + FileID: msg.FileID, + Success: true, + } + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + ack.Success = false + ack.Error = err.Error() + if encErr := encoder.Encode(protocol.MsgFileCreateAck, &ack); encErr != nil { + return encErr + } + return w.Flush() + } + + f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(msg.Mode)) + if err != nil { + ack.Success = false + ack.Error = err.Error() + if encErr := encoder.Encode(protocol.MsgFileCreateAck, &ack); encErr != nil { + return encErr + } + return w.Flush() + } + + h.files[msg.FileID] = &fileEntry{ + file: f, + path: path, + createdAt: time.Now(), + } + + if h.totalFiles != nil { + h.totalFiles.Add(1) + } + + logging.Debug("Created file: %s (id=%s)", msg.Filename, msg.FileID) + + // Send success ACK + if err := encoder.Encode(protocol.MsgFileCreateAck, &ack); err != nil { + return err + } + return w.Flush() +} + +func (h *handler) handleFileClose(msg *protocol.FileClose) error { + h.mu.Lock() + defer h.mu.Unlock() + + entry, ok := h.files[msg.FileID] + if !ok { + logging.Debug("FileClose: unknown file ID %s", msg.FileID) + return nil + } + + // Sync before close to ensure data is written + if err := entry.file.Sync(); err != nil { + logging.Debug("Sync error for %s: %v", msg.FileID, err) + } + + if err := entry.file.Close(); err != nil { + logging.Debug("Close error for %s: %v", msg.FileID, err) + } + + delete(h.files, msg.FileID) + logging.Debug("Closed file: id=%s, bytes=%d, duration=%v", + msg.FileID, entry.bytesW, time.Since(entry.createdAt)) + return nil +} + +func (h *handler) handleWriteChunk(msg *protocol.WriteChunk, encoder *protocol.Encoder, w flusher) error { + h.mu.RLock() + entry, ok := h.files[msg.FileID] + h.mu.RUnlock() + + ack := protocol.WriteAck{ + FileID: msg.FileID, + Offset: msg.Offset, + } + + if !ok { + ack.Error = "unknown file ID" + if err := encoder.Encode(protocol.MsgWriteAck, &ack); err != nil { + return err + } + return w.Flush() + } + + n, err := entry.file.WriteAt(msg.Data, msg.Offset) + if err != nil { + ack.Error = err.Error() + } else { + // Track bytes written + h.mu.Lock() + entry.bytesW += int64(n) + h.mu.Unlock() + + if h.totalBytes != nil { + h.totalBytes.Add(uint64(n)) + } + } + ack.Written = n + + if err := encoder.Encode(protocol.MsgWriteAck, &ack); err != nil { + return err + } + return w.Flush() +} + +func (h *handler) handleTruncate(msg *protocol.Truncate) error { + h.mu.RLock() + entry, ok := h.files[msg.FileID] + h.mu.RUnlock() + + if !ok { + logging.Debug("Truncate: unknown file ID %s", msg.FileID) + return nil + } + + if err := entry.file.Truncate(msg.Size); err != nil { + logging.Debug("Truncate error for %s: %v", msg.FileID, err) + return err + } + + logging.Debug("Truncated file: id=%s to %d bytes", msg.FileID, msg.Size) + return nil +} + +func (h *handler) handleRename(msg *protocol.Rename) error { + oldPath := filepath.Join(h.localDir, msg.OldName) + newPath := filepath.Join(h.localDir, msg.NewName) + + dir := filepath.Dir(newPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + + if err := os.Rename(oldPath, newPath); err != nil { + logging.Debug("Rename error: %v", err) + return err + } + + logging.Debug("Renamed: %s -> %s", msg.OldName, msg.NewName) + return nil +} + +func (h *handler) handleDelete(msg *protocol.Delete) error { + path := filepath.Join(h.localDir, msg.Filename) + + if err := os.Remove(path); err != nil { + logging.Debug("Delete error: %v", err) + return err + } + + logging.Debug("Deleted: %s", msg.Filename) + return nil +} + +func (h *handler) closeAllFiles() { + h.mu.Lock() + defer h.mu.Unlock() + + for id, entry := range h.files { + if err := entry.file.Sync(); err != nil { + logging.Debug("Sync error during cleanup for %s: %v", id, err) + } + if err := entry.file.Close(); err != nil { + logging.Debug("Close error during cleanup for %s: %v", id, err) + } + delete(h.files, id) + } +} + +// webSocketAdapter adapts a WebSocket connection to io.Reader/Writer interfaces +type webSocketAdapter struct { + conn *websocket.Conn + + readMu sync.Mutex + readBuf bytes.Buffer + + writeMu sync.Mutex + writeBuf bytes.Buffer +} + +func newWebSocketAdapter(conn *websocket.Conn) *webSocketAdapter { + return &webSocketAdapter{ + conn: conn, + } +} + +func (a *webSocketAdapter) Read(p []byte) (int, error) { + a.readMu.Lock() + defer a.readMu.Unlock() + + if a.readBuf.Len() > 0 { + return a.readBuf.Read(p) + } + + messageType, data, err := a.conn.ReadMessage() + if err != nil { + return 0, err + } + + if messageType != websocket.BinaryMessage { + return a.Read(p) + } + + a.readBuf.Write(data) + return a.readBuf.Read(p) +} + +func (a *webSocketAdapter) Write(p []byte) (int, error) { + a.writeMu.Lock() + defer a.writeMu.Unlock() + + return a.writeBuf.Write(p) +} + +func (a *webSocketAdapter) Flush() error { + a.writeMu.Lock() + defer a.writeMu.Unlock() + + if a.writeBuf.Len() == 0 { + return nil + } + + data := a.writeBuf.Bytes() + a.writeBuf.Reset() + + if err := a.conn.WriteMessage(websocket.BinaryMessage, data); err != nil { + logging.Error("WebSocket write error: %v", err) + return err + } + + return nil +} + +func (a *webSocketAdapter) Close() error { + return a.conn.Close() +} + +var _ io.Reader = (*webSocketAdapter)(nil) +var _ io.Writer = (*webSocketAdapter)(nil) diff --git a/server/lib/fspipe/logging/logging.go b/server/lib/fspipe/logging/logging.go new file mode 100644 index 00000000..ddb32099 --- /dev/null +++ b/server/lib/fspipe/logging/logging.go @@ -0,0 +1,223 @@ +package logging + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "time" +) + +// Level represents log severity +type Level int + +const ( + LevelDebug Level = iota + LevelInfo + LevelWarn + LevelError + LevelSilent +) + +var levelNames = map[Level]string{ + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarn: "WARN", + LevelError: "ERROR", +} + +var levelColors = map[Level]string{ + LevelDebug: "\033[36m", // Cyan + LevelInfo: "\033[32m", // Green + LevelWarn: "\033[33m", // Yellow + LevelError: "\033[31m", // Red +} + +const colorReset = "\033[0m" + +// Logger provides structured logging with levels +type Logger struct { + mu sync.Mutex + out io.Writer + level Level + prefix string + useColor bool +} + +var defaultLogger = &Logger{ + out: os.Stderr, + level: LevelInfo, + useColor: true, +} + +// SetOutput sets the output destination +func SetOutput(w io.Writer) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + defaultLogger.out = w +} + +// SetLevel sets the minimum log level +func SetLevel(level Level) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + defaultLogger.level = level +} + +// SetSilent enables/disables silent mode +func SetSilent(silent bool) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + if silent { + defaultLogger.level = LevelSilent + } else { + defaultLogger.level = LevelInfo + } +} + +// SetVerbose enables verbose (debug) logging +func SetVerbose(verbose bool) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + if verbose { + defaultLogger.level = LevelDebug + } +} + +// SetColor enables/disables color output +func SetColor(useColor bool) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + defaultLogger.useColor = useColor +} + +// SetPrefix sets a prefix for all log messages +func SetPrefix(prefix string) { + defaultLogger.mu.Lock() + defer defaultLogger.mu.Unlock() + defaultLogger.prefix = prefix +} + +func (l *Logger) log(level Level, format string, args ...interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + + if level < l.level { + return + } + + // Get caller info + _, file, line, ok := runtime.Caller(2) + if ok { + // Extract just the filename + if idx := strings.LastIndex(file, "/"); idx >= 0 { + file = file[idx+1:] + } + } else { + file = "???" + line = 0 + } + + // Format timestamp + now := time.Now() + timestamp := now.Format("15:04:05.000") + + // Format message + msg := fmt.Sprintf(format, args...) + + // Build log line + var buf strings.Builder + + if l.useColor { + buf.WriteString(levelColors[level]) + } + + buf.WriteString(timestamp) + buf.WriteString(" [") + buf.WriteString(levelNames[level]) + buf.WriteString("] ") + + if l.prefix != "" { + buf.WriteString(l.prefix) + buf.WriteString(" ") + } + + buf.WriteString(file) + buf.WriteString(":") + buf.WriteString(fmt.Sprintf("%d", line)) + buf.WriteString(" ") + + if l.useColor { + buf.WriteString(colorReset) + } + + buf.WriteString(msg) + buf.WriteString("\n") + + l.out.Write([]byte(buf.String())) +} + +// Debug logs a debug message +func Debug(format string, args ...interface{}) { + defaultLogger.log(LevelDebug, format, args...) +} + +// Info logs an info message +func Info(format string, args ...interface{}) { + defaultLogger.log(LevelInfo, format, args...) +} + +// Warn logs a warning message +func Warn(format string, args ...interface{}) { + defaultLogger.log(LevelWarn, format, args...) +} + +// Error logs an error message +func Error(format string, args ...interface{}) { + defaultLogger.log(LevelError, format, args...) +} + +// Debugf is an alias for Debug +func Debugf(format string, args ...interface{}) { + Debug(format, args...) +} + +// Infof is an alias for Info +func Infof(format string, args ...interface{}) { + Info(format, args...) +} + +// Warnf is an alias for Warn +func Warnf(format string, args ...interface{}) { + Warn(format, args...) +} + +// Errorf is an alias for Error +func Errorf(format string, args ...interface{}) { + Error(format, args...) +} + +// TraceOp logs an operation start and returns a function to log completion +func TraceOp(op string, details string) func(error) { + start := time.Now() + Debug("→ %s: %s", op, details) + + return func(err error) { + elapsed := time.Since(start) + if err != nil { + Error("✗ %s: %s (elapsed=%v, error=%v)", op, details, elapsed, err) + } else { + Debug("✓ %s: %s (elapsed=%v)", op, details, elapsed) + } + } +} + +// FormatBytes formats byte slice for logging (truncated if too long) +func FormatBytes(data []byte, maxLen int) string { + if len(data) <= maxLen { + return fmt.Sprintf("[%d bytes]", len(data)) + } + return fmt.Sprintf("[%d bytes, first %d: %x...]", len(data), maxLen, data[:maxLen]) +} diff --git a/server/lib/fspipe/protocol/codec.go b/server/lib/fspipe/protocol/codec.go new file mode 100644 index 00000000..ee58890a --- /dev/null +++ b/server/lib/fspipe/protocol/codec.go @@ -0,0 +1,94 @@ +package protocol + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "io" +) + +// Frame format: [Length: 4 bytes (uint32 BE)] [Type: 1 byte] [Payload: N bytes JSON] + +// Encoder writes framed messages to a writer +type Encoder struct { + w io.Writer +} + +// NewEncoder creates a new encoder +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// Encode writes a message with length-prefix framing +func (e *Encoder) Encode(msgType byte, payload interface{}) error { + // Marshal payload to JSON + data, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("marshal payload: %w", err) + } + + // Calculate total frame length (type byte + payload) + frameLen := uint32(1 + len(data)) + + // Write length prefix (4 bytes, big-endian) + if err := binary.Write(e.w, binary.BigEndian, frameLen); err != nil { + return fmt.Errorf("write length: %w", err) + } + + // Write message type (1 byte) + if _, err := e.w.Write([]byte{msgType}); err != nil { + return fmt.Errorf("write type: %w", err) + } + + // Write payload + if _, err := e.w.Write(data); err != nil { + return fmt.Errorf("write payload: %w", err) + } + + return nil +} + +// Decoder reads framed messages from a reader +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new decoder +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// Decode reads a framed message and returns the type and raw JSON payload +func (d *Decoder) Decode() (byte, []byte, error) { + // Read length prefix (4 bytes) + var frameLen uint32 + if err := binary.Read(d.r, binary.BigEndian, &frameLen); err != nil { + return 0, nil, err + } + + if frameLen < 1 { + return 0, nil, fmt.Errorf("invalid frame length: %d", frameLen) + } + + // Read type byte + typeBuf := make([]byte, 1) + if _, err := io.ReadFull(d.r, typeBuf); err != nil { + return 0, nil, fmt.Errorf("read type: %w", err) + } + + // Read payload + payloadLen := frameLen - 1 + payload := make([]byte, payloadLen) + if payloadLen > 0 { + if _, err := io.ReadFull(d.r, payload); err != nil { + return 0, nil, fmt.Errorf("read payload: %w", err) + } + } + + return typeBuf[0], payload, nil +} + +// DecodePayload unmarshals JSON payload into the target struct +func DecodePayload(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/server/lib/fspipe/protocol/messages.go b/server/lib/fspipe/protocol/messages.go new file mode 100644 index 00000000..71fd46de --- /dev/null +++ b/server/lib/fspipe/protocol/messages.go @@ -0,0 +1,74 @@ +package protocol + +// Message types +const ( + MsgFileCreate byte = 0x01 + MsgFileCreateAck byte = 0x02 // Response to FileCreate + MsgFileClose byte = 0x03 + MsgWriteChunk byte = 0x10 + MsgWriteAck byte = 0x11 + MsgTruncate byte = 0x12 + MsgRename byte = 0x20 + MsgDelete byte = 0x21 +) + +// ChunkSize is the default chunk size for file writes (64KB) +const ChunkSize = 64 * 1024 + +// FileCreate is sent when a new file is created +type FileCreate struct { + FileID string `json:"file_id"` + Filename string `json:"filename"` + Mode uint32 `json:"mode"` +} + +// FileCreateAck is the response to FileCreate +type FileCreateAck struct { + FileID string `json:"file_id"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` +} + +// FileClose is sent when a file handle is closed +type FileClose struct { + FileID string `json:"file_id"` +} + +// WriteChunk is sent for each chunk of file data +type WriteChunk struct { + FileID string `json:"file_id"` + Offset int64 `json:"offset"` + Data []byte `json:"data"` +} + +// WriteAck is sent as acknowledgment for a write +type WriteAck struct { + FileID string `json:"file_id"` + Offset int64 `json:"offset"` + Written int `json:"written"` + Error string `json:"error,omitempty"` +} + +// Truncate is sent to truncate a file +type Truncate struct { + FileID string `json:"file_id"` + Size int64 `json:"size"` +} + +// Rename is sent to rename a file +type Rename struct { + FileID string `json:"file_id"` + OldName string `json:"old_name"` + NewName string `json:"new_name"` +} + +// Delete is sent to delete a file +type Delete struct { + Filename string `json:"filename"` +} + +// Message wraps any protocol message with its type +type Message struct { + Type byte + Payload interface{} +} diff --git a/server/lib/fspipe/queue/queue.go b/server/lib/fspipe/queue/queue.go new file mode 100644 index 00000000..ede9f7f0 --- /dev/null +++ b/server/lib/fspipe/queue/queue.go @@ -0,0 +1,222 @@ +package queue + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "time" + + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" +) + +var ( + ErrQueueFull = errors.New("queue is full") + ErrQueueClosed = errors.New("queue is closed") + ErrSendTimeout = errors.New("send timeout") + ErrAckTimeout = errors.New("acknowledgment timeout") +) + +// Message represents a queued message with tracking info +type Message struct { + ID uint64 + Type byte + Payload interface{} + Result chan error + Timestamp time.Time + Retries int +} + +// Queue is a bounded message queue with backpressure +type Queue struct { + messages chan *Message + pending sync.Map // msgID -> *Message (waiting for ACK) + + seqNum uint64 + maxSize int + closed atomic.Bool + + ackTimeout time.Duration + maxRetries int + + mu sync.Mutex +} + +// Config holds queue configuration +type Config struct { + MaxSize int // Maximum queue size + AckTimeout time.Duration // Timeout waiting for ACK + MaxRetries int // Maximum send retries +} + +// DefaultConfig returns sensible defaults +func DefaultConfig() Config { + return Config{ + MaxSize: 1000, + AckTimeout: 30 * time.Second, + MaxRetries: 3, + } +} + +// New creates a new message queue +func New(cfg Config) *Queue { + if cfg.MaxSize <= 0 { + cfg.MaxSize = 1000 + } + if cfg.AckTimeout <= 0 { + cfg.AckTimeout = 30 * time.Second + } + if cfg.MaxRetries <= 0 { + cfg.MaxRetries = 3 + } + + return &Queue{ + messages: make(chan *Message, cfg.MaxSize), + maxSize: cfg.MaxSize, + ackTimeout: cfg.AckTimeout, + maxRetries: cfg.MaxRetries, + } +} + +// Enqueue adds a message to the queue +// Returns immediately if queue has space, blocks with timeout otherwise +func (q *Queue) Enqueue(ctx context.Context, msgType byte, payload interface{}) (*Message, error) { + if q.closed.Load() { + return nil, ErrQueueClosed + } + + msg := &Message{ + ID: atomic.AddUint64(&q.seqNum, 1), + Type: msgType, + Payload: payload, + Result: make(chan error, 1), + Timestamp: time.Now(), + } + + select { + case q.messages <- msg: + return msg, nil + case <-ctx.Done(): + return nil, ctx.Err() + default: + // Queue full - try with timeout + select { + case q.messages <- msg: + return msg, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(5 * time.Second): + return nil, ErrQueueFull + } + } +} + +// EnqueueSync adds a message and waits for the result +func (q *Queue) EnqueueSync(ctx context.Context, msgType byte, payload interface{}) error { + msg, err := q.Enqueue(ctx, msgType, payload) + if err != nil { + return err + } + + // Wait for send completion with timeout + select { + case err := <-msg.Result: + return err + case <-ctx.Done(): + return ctx.Err() + case <-time.After(q.ackTimeout): + return ErrSendTimeout + } +} + +// Dequeue removes the next message from the queue +func (q *Queue) Dequeue(ctx context.Context) (*Message, error) { + select { + case msg, ok := <-q.messages: + if !ok { + return nil, ErrQueueClosed + } + return msg, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// TrackPending marks a message as pending ACK +func (q *Queue) TrackPending(msg *Message) { + q.pending.Store(msg.ID, msg) +} + +// AckMessage marks a message as successfully sent +func (q *Queue) AckMessage(msgID uint64, err error) { + if val, ok := q.pending.LoadAndDelete(msgID); ok { + msg := val.(*Message) + select { + case msg.Result <- err: + default: + logging.Warn("Message %d result channel full", msgID) + } + } +} + +// GetPendingCount returns the number of pending messages +func (q *Queue) GetPendingCount() int { + count := 0 + q.pending.Range(func(_, _ interface{}) bool { + count++ + return true + }) + return count +} + +// RetryPending re-queues all pending messages for retry +func (q *Queue) RetryPending() int { + count := 0 + q.pending.Range(func(key, val interface{}) bool { + msg := val.(*Message) + msg.Retries++ + + if msg.Retries > q.maxRetries { + q.pending.Delete(key) + select { + case msg.Result <- errors.New("max retries exceeded"): + default: + } + logging.Warn("Message %d exceeded max retries", msg.ID) + } else { + // Re-queue for retry + select { + case q.messages <- msg: + count++ + default: + logging.Error("Cannot re-queue message %d: queue full", msg.ID) + } + } + q.pending.Delete(key) + return true + }) + return count +} + +// Len returns the current queue length +func (q *Queue) Len() int { + return len(q.messages) +} + +// Close closes the queue +func (q *Queue) Close() { + if q.closed.CompareAndSwap(false, true) { + close(q.messages) + + // Fail all pending messages + q.pending.Range(func(key, val interface{}) bool { + msg := val.(*Message) + select { + case msg.Result <- ErrQueueClosed: + default: + } + q.pending.Delete(key) + return true + }) + } +} diff --git a/server/lib/fspipe/transport/broadcaster.go b/server/lib/fspipe/transport/broadcaster.go new file mode 100644 index 00000000..64c5a50e --- /dev/null +++ b/server/lib/fspipe/transport/broadcaster.go @@ -0,0 +1,605 @@ +// Package transport provides the broadcaster - a WebSocket server that external clients connect to. +// When the FUSE daemon writes, it broadcasts to all connected clients. +package transport + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" +) + +const ( + // Timeouts + writeTimeout = 10 * time.Second + ackTimeout = 15 * time.Second + pingInterval = 30 * time.Second + pongTimeout = 10 * time.Second + shutdownTimeout = 5 * time.Second + + // Buffer sizes + responseChSize = 100 + writeBufferSize = 256 * 1024 + readBufferSize = 64 * 1024 +) + +// clientConn wraps a WebSocket connection with health tracking +type clientConn struct { + conn *websocket.Conn + responseCh chan wsResponse + addr string + healthy atomic.Bool + lastPong atomic.Int64 + writeMu sync.Mutex // Per-connection write lock +} + +func newClientConn(conn *websocket.Conn) *clientConn { + c := &clientConn{ + conn: conn, + responseCh: make(chan wsResponse, responseChSize), + addr: conn.RemoteAddr().String(), + } + c.healthy.Store(true) + c.lastPong.Store(time.Now().UnixNano()) + return c +} + +func (c *clientConn) isHealthy() bool { + if !c.healthy.Load() { + return false + } + // Check if we've received a pong recently + lastPong := time.Unix(0, c.lastPong.Load()) + return time.Since(lastPong) < pingInterval+pongTimeout +} + +func (c *clientConn) writeWithDeadline(data []byte) error { + c.writeMu.Lock() + defer c.writeMu.Unlock() + + c.conn.SetWriteDeadline(time.Now().Add(writeTimeout)) + err := c.conn.WriteMessage(websocket.BinaryMessage, data) + c.conn.SetWriteDeadline(time.Time{}) // Clear deadline + + if err != nil { + c.healthy.Store(false) + } + return err +} + +func (c *clientConn) ping() error { + c.writeMu.Lock() + defer c.writeMu.Unlock() + + c.conn.SetWriteDeadline(time.Now().Add(writeTimeout)) + err := c.conn.WriteMessage(websocket.PingMessage, []byte{}) + c.conn.SetWriteDeadline(time.Time{}) + + if err != nil { + c.healthy.Store(false) + } + return err +} + +// Broadcaster is a WebSocket server that broadcasts file ops to connected clients. +// External clients connect to receive file chunks and operations. +type Broadcaster struct { + addr string + path string + server *http.Server + + mu sync.RWMutex + clients map[*websocket.Conn]*clientConn + state ConnectionState + + // Per-file request tracking for concurrent file operations + fileMu sync.RWMutex + fileReqs map[string]*fileRequest // fileID -> pending request + + // Require at least one client for writes (fail-safe mode) + requireClient atomic.Bool + + // Fast mode: don't wait for ACKs on writes (fire-and-forget) + // Only FileCreate waits for ACK, writes are async + fastMode atomic.Bool + + // Stats + messagesSent atomic.Uint64 + messagesRecv atomic.Uint64 + bytesSent atomic.Uint64 + bytesRecv atomic.Uint64 + clientsTotal atomic.Uint64 + clientsCurrent atomic.Int64 + errors atomic.Uint64 + + ctx context.Context + cancel context.CancelFunc + + upgrader websocket.Upgrader +} + +// fileRequest tracks a pending request for a specific file +type fileRequest struct { + mu sync.Mutex + waiting bool + respCh chan wsResponse + deadline time.Time +} + +// NewBroadcaster creates a new broadcaster that listens on the given address. +// Clients connect to ws://addr/path to receive file operations. +func NewBroadcaster(addr, path string) *Broadcaster { + ctx, cancel := context.WithCancel(context.Background()) + b := &Broadcaster{ + addr: addr, + path: path, + clients: make(map[*websocket.Conn]*clientConn), + fileReqs: make(map[string]*fileRequest), + state: StateDisconnected, + ctx: ctx, + cancel: cancel, + upgrader: websocket.Upgrader{ + ReadBufferSize: readBufferSize, + WriteBufferSize: writeBufferSize, + CheckOrigin: func(r *http.Request) bool { return true }, + }, + } + // Default: require at least one client (fail-safe) + b.requireClient.Store(true) + return b +} + +// SetRequireClient sets whether writes should fail when no clients are connected. +// If true (default), writes fail with error when no clients. If false, fake ACKs are returned. +func (b *Broadcaster) SetRequireClient(require bool) { + b.requireClient.Store(require) +} + +// SetFastMode enables fire-and-forget mode for write operations. +// In fast mode, only FileCreate waits for ACK. Writes are sent async without waiting. +// This significantly improves throughput but trades off guaranteed delivery. +func (b *Broadcaster) SetFastMode(fast bool) { + b.fastMode.Store(fast) +} + +// Connect starts the WebSocket server. +func (b *Broadcaster) Connect() error { + mux := http.NewServeMux() + mux.HandleFunc(b.path, b.handleWebSocket) + + b.server = &http.Server{ + Addr: b.addr, + Handler: mux, + } + + errCh := make(chan error, 1) + + // Start server in background + go func() { + logging.Info("Broadcaster listening on %s%s", b.addr, b.path) + if err := b.server.ListenAndServe(); err != http.ErrServerClosed { + logging.Error("Broadcaster server error: %v", err) + errCh <- err + } + }() + + // Wait a bit and check for immediate errors + select { + case err := <-errCh: + return fmt.Errorf("broadcaster failed to start: %w", err) + case <-time.After(100 * time.Millisecond): + // Server started successfully + } + + b.mu.Lock() + b.state = StateConnected + b.mu.Unlock() + + // Start health monitor + go b.healthMonitor() + + return nil +} + +// healthMonitor periodically pings clients and removes dead ones +func (b *Broadcaster) healthMonitor() { + ticker := time.NewTicker(pingInterval) + defer ticker.Stop() + + for { + select { + case <-b.ctx.Done(): + return + case <-ticker.C: + b.pingClients() + b.removeDeadClients() + } + } +} + +func (b *Broadcaster) pingClients() { + b.mu.RLock() + clients := make([]*clientConn, 0, len(b.clients)) + for _, c := range b.clients { + clients = append(clients, c) + } + b.mu.RUnlock() + + for _, c := range clients { + if err := c.ping(); err != nil { + logging.Debug("Ping failed for %s: %v", c.addr, err) + } + } +} + +func (b *Broadcaster) removeDeadClients() { + b.mu.Lock() + defer b.mu.Unlock() + + for conn, c := range b.clients { + if !c.isHealthy() { + logging.Info("Removing dead client: %s", c.addr) + conn.Close() + close(c.responseCh) + delete(b.clients, conn) + b.clientsCurrent.Add(-1) + } + } +} + +func (b *Broadcaster) handleWebSocket(w http.ResponseWriter, r *http.Request) { + conn, err := b.upgrader.Upgrade(w, r, nil) + if err != nil { + logging.Error("Broadcaster upgrade error: %v", err) + return + } + + client := newClientConn(conn) + + // Set up pong handler + conn.SetPongHandler(func(string) error { + client.lastPong.Store(time.Now().UnixNano()) + return nil + }) + + b.mu.Lock() + b.clients[conn] = client + b.mu.Unlock() + + b.clientsTotal.Add(1) + b.clientsCurrent.Add(1) + + logging.Info("Client connected: %s (total: %d)", client.addr, b.clientsCurrent.Load()) + + // Read responses from this client + go b.readLoop(client) +} + +func (b *Broadcaster) readLoop(client *clientConn) { + defer func() { + b.mu.Lock() + delete(b.clients, client.conn) + close(client.responseCh) + b.mu.Unlock() + + b.clientsCurrent.Add(-1) + client.conn.Close() + client.healthy.Store(false) + logging.Info("Client disconnected: %s (total: %d)", client.addr, b.clientsCurrent.Load()) + }() + + for { + _, rawData, err := client.conn.ReadMessage() + if err != nil { + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + logging.Debug("Client read error from %s: %v", client.addr, err) + } + return + } + + b.messagesRecv.Add(1) + b.bytesRecv.Add(uint64(len(rawData))) + + if len(rawData) < 5 { + logging.Debug("Malformed message from %s: too short", client.addr) + continue + } + + msgType := rawData[4] + msgData := rawData[5:] + + // Route ACK to the appropriate file request + b.routeResponse(msgType, msgData) + } +} + +// routeResponse routes an ACK response to the waiting file request +func (b *Broadcaster) routeResponse(msgType byte, data []byte) { + // Extract file_id from response + var resp struct { + FileID string `json:"file_id"` + } + if err := json.Unmarshal(data, &resp); err != nil { + logging.Debug("Failed to parse response file_id: %v", err) + return + } + + b.fileMu.RLock() + req, ok := b.fileReqs[resp.FileID] + b.fileMu.RUnlock() + + if !ok || req == nil { + logging.Debug("No pending request for file %s", resp.FileID) + return + } + + req.mu.Lock() + if req.waiting { + select { + case req.respCh <- wsResponse{msgType: msgType, data: data}: + default: + logging.Debug("Response channel full for file %s", resp.FileID) + } + } + req.mu.Unlock() +} + +// getHealthyClients returns a list of healthy connected clients +func (b *Broadcaster) getHealthyClients() []*clientConn { + b.mu.RLock() + defer b.mu.RUnlock() + + clients := make([]*clientConn, 0, len(b.clients)) + for _, c := range b.clients { + if c.isHealthy() { + clients = append(clients, c) + } + } + return clients +} + +// Send broadcasts a message to all connected clients (fire-and-forget). +func (b *Broadcaster) Send(msgType byte, payload interface{}) error { + encodedData, err := b.encodeMessage(msgType, payload) + if err != nil { + return err + } + + clients := b.getHealthyClients() + + if len(clients) == 0 { + if b.requireClient.Load() { + b.errors.Add(1) + return fmt.Errorf("no healthy clients connected") + } + logging.Debug("No clients connected, message dropped") + return nil + } + + var sendErrors int + for _, c := range clients { + if err := c.writeWithDeadline(encodedData); err != nil { + logging.Debug("Broadcast write error to %s: %v", c.addr, err) + sendErrors++ + } + } + + // Fail if all sends failed + if sendErrors == len(clients) { + b.errors.Add(1) + return fmt.Errorf("failed to send to all %d clients", len(clients)) + } + + b.messagesSent.Add(1) + b.bytesSent.Add(uint64(len(encodedData))) + + return nil +} + +// SendSync sends a message and waits for flush (broadcasts to all clients). +func (b *Broadcaster) SendSync(msgType byte, payload interface{}) error { + return b.Send(msgType, payload) +} + +// SendAndReceive broadcasts a message and waits for ACK from any client. +func (b *Broadcaster) SendAndReceive(msgType byte, payload interface{}) (byte, []byte, error) { + // Fast mode: fire-and-forget for writes, only wait for FileCreate ACK + if b.fastMode.Load() && msgType == protocol.MsgWriteChunk { + msg := payload.(*protocol.WriteChunk) + if err := b.Send(msgType, payload); err != nil { + return 0, nil, err + } + // Return immediate fake ACK + ack := protocol.WriteAck{FileID: msg.FileID, Offset: msg.Offset, Written: len(msg.Data)} + data, _ := json.Marshal(ack) + return protocol.MsgWriteAck, data, nil + } + + // Extract file ID for routing + var fileID string + switch msg := payload.(type) { + case *protocol.FileCreate: + fileID = msg.FileID + case *protocol.WriteChunk: + fileID = msg.FileID + default: + // For other message types, use a random ID + fileID = fmt.Sprintf("_req_%d", time.Now().UnixNano()) + } + + // Create or get file request tracker + b.fileMu.Lock() + req, ok := b.fileReqs[fileID] + if !ok { + req = &fileRequest{ + respCh: make(chan wsResponse, 1), + } + b.fileReqs[fileID] = req + } + b.fileMu.Unlock() + + // Mark as waiting + req.mu.Lock() + req.waiting = true + req.deadline = time.Now().Add(ackTimeout) + // Drain any stale responses + select { + case <-req.respCh: + default: + } + req.mu.Unlock() + + // Cleanup when done + defer func() { + req.mu.Lock() + req.waiting = false + req.mu.Unlock() + }() + + // Encode and send + encodedData, err := b.encodeMessage(msgType, payload) + if err != nil { + return 0, nil, err + } + + clients := b.getHealthyClients() + + if len(clients) == 0 { + if b.requireClient.Load() { + b.errors.Add(1) + return 0, nil, fmt.Errorf("no healthy clients connected") + } + // Fallback to fake ACK if not requiring clients + return b.fakeAck(msgType, payload) + } + + // Send to all healthy clients + var sendErrors int + for _, c := range clients { + if err := c.writeWithDeadline(encodedData); err != nil { + logging.Debug("Broadcast write error to %s: %v", c.addr, err) + sendErrors++ + } + } + + if sendErrors == len(clients) { + b.errors.Add(1) + return 0, nil, fmt.Errorf("failed to send to all %d clients", len(clients)) + } + + b.messagesSent.Add(1) + b.bytesSent.Add(uint64(len(encodedData))) + + // Wait for ACK + select { + case resp := <-req.respCh: + return resp.msgType, resp.data, nil + case <-time.After(ackTimeout): + b.errors.Add(1) + return 0, nil, fmt.Errorf("ACK timeout after %v for file %s", ackTimeout, fileID) + case <-b.ctx.Done(): + return 0, nil, fmt.Errorf("broadcaster shutting down") + } +} + +// fakeAck returns a fake ACK when no clients are connected (only if requireClient is false) +func (b *Broadcaster) fakeAck(msgType byte, payload interface{}) (byte, []byte, error) { + switch msgType { + case protocol.MsgFileCreate: + msg := payload.(*protocol.FileCreate) + ack := protocol.FileCreateAck{FileID: msg.FileID, Success: true} + data, _ := json.Marshal(ack) + logging.Debug("Fake ACK for FileCreate %s (no clients)", msg.FileID) + return protocol.MsgFileCreateAck, data, nil + case protocol.MsgWriteChunk: + msg := payload.(*protocol.WriteChunk) + ack := protocol.WriteAck{FileID: msg.FileID, Offset: msg.Offset, Written: len(msg.Data)} + data, _ := json.Marshal(ack) + return protocol.MsgWriteAck, data, nil + default: + return 0, nil, nil + } +} + +func (b *Broadcaster) encodeMessage(msgType byte, payload interface{}) ([]byte, error) { + jsonData, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("marshal payload: %w", err) + } + + totalLen := 1 + len(jsonData) + data := make([]byte, 4+totalLen) + + data[0] = byte(totalLen >> 24) + data[1] = byte(totalLen >> 16) + data[2] = byte(totalLen >> 8) + data[3] = byte(totalLen) + data[4] = msgType + copy(data[5:], jsonData) + + return data, nil +} + +// State returns the current connection state. +func (b *Broadcaster) State() ConnectionState { + b.mu.RLock() + defer b.mu.RUnlock() + return b.state +} + +// ClientCount returns the number of healthy connected clients. +func (b *Broadcaster) ClientCount() int { + return len(b.getHealthyClients()) +} + +// Stats returns broadcaster statistics. +func (b *Broadcaster) Stats() map[string]uint64 { + return map[string]uint64{ + "messages_sent": b.messagesSent.Load(), + "messages_recv": b.messagesRecv.Load(), + "bytes_sent": b.bytesSent.Load(), + "bytes_recv": b.bytesRecv.Load(), + "clients_total": b.clientsTotal.Load(), + "clients_current": uint64(b.clientsCurrent.Load()), + "errors": b.errors.Load(), + } +} + +// Close shuts down the broadcaster gracefully. +func (b *Broadcaster) Close() error { + b.cancel() // Signal shutdown + + b.mu.Lock() + b.state = StateDisconnected + + // Close all client connections gracefully + for conn, c := range b.clients { + // Send close message + c.writeMu.Lock() + conn.WriteControl( + websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseGoingAway, "server shutting down"), + time.Now().Add(time.Second), + ) + c.writeMu.Unlock() + conn.Close() + close(c.responseCh) + } + b.clients = make(map[*websocket.Conn]*clientConn) + b.mu.Unlock() + + // Shutdown HTTP server + if b.server != nil { + ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + return b.server.Shutdown(ctx) + } + return nil +} diff --git a/server/lib/fspipe/transport/client.go b/server/lib/fspipe/transport/client.go new file mode 100644 index 00000000..82406d7f --- /dev/null +++ b/server/lib/fspipe/transport/client.go @@ -0,0 +1,623 @@ +package transport + +import ( + "bufio" + "context" + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" + "github.com/onkernel/kernel-images/server/lib/fspipe/queue" +) + +var ( + ErrNotConnected = errors.New("not connected") + ErrSendFailed = errors.New("send failed") + ErrShuttingDown = errors.New("client is shutting down") + ErrInvalidConfig = errors.New("invalid configuration") +) + +// ConnectionState represents the connection status +type ConnectionState int32 + +const ( + StateDisconnected ConnectionState = iota + StateConnecting + StateConnected + StateReconnecting + StateFailed +) + +func (s ConnectionState) String() string { + switch s { + case StateDisconnected: + return "disconnected" + case StateConnecting: + return "connecting" + case StateConnected: + return "connected" + case StateReconnecting: + return "reconnecting" + case StateFailed: + return "failed" + default: + return "unknown" + } +} + +// ClientConfig holds client configuration +type ClientConfig struct { + // Connection settings + DialTimeout time.Duration + MaxRetries int // 0 = infinite retries + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // Health check settings + HealthCheckInterval time.Duration + PingTimeout time.Duration + + // Queue settings + QueueSize int + AckTimeout time.Duration + + // Shutdown settings + ShutdownTimeout time.Duration +} + +// DefaultClientConfig returns production-ready defaults +func DefaultClientConfig() ClientConfig { + return ClientConfig{ + DialTimeout: 10 * time.Second, + MaxRetries: 0, // 0 = infinite retries + InitialBackoff: 500 * time.Millisecond, + MaxBackoff: 30 * time.Second, + BackoffMultiplier: 2.0, + HealthCheckInterval: 5 * time.Second, + PingTimeout: 3 * time.Second, + QueueSize: 1000, + AckTimeout: 10 * time.Second, // Reduced from 30s + ShutdownTimeout: 5 * time.Second, + } +} + +// ValidateConfig checks configuration for invalid values +func ValidateConfig(config ClientConfig) error { + if config.DialTimeout <= 0 { + return fmt.Errorf("%w: DialTimeout must be positive", ErrInvalidConfig) + } + if config.InitialBackoff <= 0 { + return fmt.Errorf("%w: InitialBackoff must be positive", ErrInvalidConfig) + } + if config.MaxBackoff < config.InitialBackoff { + return fmt.Errorf("%w: MaxBackoff must be >= InitialBackoff", ErrInvalidConfig) + } + if config.BackoffMultiplier < 1.0 { + return fmt.Errorf("%w: BackoffMultiplier must be >= 1.0", ErrInvalidConfig) + } + if config.QueueSize <= 0 { + return fmt.Errorf("%w: QueueSize must be positive", ErrInvalidConfig) + } + if config.AckTimeout <= 0 { + return fmt.Errorf("%w: AckTimeout must be positive", ErrInvalidConfig) + } + if config.ShutdownTimeout <= 0 { + return fmt.Errorf("%w: ShutdownTimeout must be positive", ErrInvalidConfig) + } + return nil +} + +// Client manages the connection to the remote listener +type Client struct { + addr string + config ClientConfig + + // Connection state protected by connMu + connMu sync.RWMutex + conn net.Conn + encoder *protocol.Encoder + decoder *protocol.Decoder + bufW *bufio.Writer + + state atomic.Int32 // ConnectionState + + // Message queue for non-blocking sends + sendQueue *queue.Queue + + // Background goroutine management + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // Reconnection management - single goroutine handles all reconnects + reconnectCh chan struct{} + reconnectOnce sync.Once + + // Shutdown management + shutdownMu sync.Mutex + shutdown bool + + // Metrics + messagesSent atomic.Uint64 + messagesAcked atomic.Uint64 + messagesRetried atomic.Uint64 + connectionLost atomic.Uint64 + reconnectSuccess atomic.Uint64 + healthCheckFails atomic.Uint64 +} + +// NewClient creates a new transport client with default config +func NewClient(addr string) *Client { + return NewClientWithConfig(addr, DefaultClientConfig()) +} + +// NewClientWithConfig creates a new transport client with custom config +func NewClientWithConfig(addr string, config ClientConfig) *Client { + // Apply defaults for zero values + if config.ShutdownTimeout == 0 { + config.ShutdownTimeout = 5 * time.Second + } + + ctx, cancel := context.WithCancel(context.Background()) + + c := &Client{ + addr: addr, + config: config, + ctx: ctx, + cancel: cancel, + sendQueue: queue.New(queue.Config{ + MaxSize: config.QueueSize, + AckTimeout: config.AckTimeout, + MaxRetries: 3, + }), + reconnectCh: make(chan struct{}, 1), // Buffered to avoid blocking + } + + c.state.Store(int32(StateDisconnected)) + return c +} + +// Connect establishes connection to the remote server +func (c *Client) Connect() error { + c.connMu.Lock() + err := c.connectLocked() + c.connMu.Unlock() + + if err != nil { + return err + } + + // Start background workers exactly once + c.reconnectOnce.Do(func() { + c.wg.Add(3) + go c.sendLoop() + go c.healthCheckLoop() + go c.reconnectLoop() + }) + + return nil +} + +// connectLocked establishes connection (must hold connMu) +func (c *Client) connectLocked() error { + // Close existing connection if any + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + + c.state.Store(int32(StateConnecting)) + + backoff := c.config.InitialBackoff + attempt := 0 + + for { + select { + case <-c.ctx.Done(): + c.state.Store(int32(StateDisconnected)) + return c.ctx.Err() + default: + } + + attempt++ + conn, err := net.DialTimeout("tcp", c.addr, c.config.DialTimeout) + if err != nil { + logging.Warn("Connection attempt %d to %s failed: %v", attempt, c.addr, err) + + if c.config.MaxRetries > 0 && attempt >= c.config.MaxRetries { + c.state.Store(int32(StateFailed)) + return fmt.Errorf("failed to connect after %d retries: %w", attempt, err) + } + + // Exponential backoff with context cancellation + timer := time.NewTimer(backoff) + select { + case <-c.ctx.Done(): + timer.Stop() + c.state.Store(int32(StateDisconnected)) + return c.ctx.Err() + case <-timer.C: + } + + backoff = time.Duration(float64(backoff) * c.config.BackoffMultiplier) + if backoff > c.config.MaxBackoff { + backoff = c.config.MaxBackoff + } + continue + } + + // Configure connection for reliability + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(15 * time.Second) + tcpConn.SetNoDelay(true) + } + + c.conn = conn + c.bufW = bufio.NewWriterSize(conn, 64*1024) + c.encoder = protocol.NewEncoder(c.bufW) + c.decoder = protocol.NewDecoder(bufio.NewReaderSize(conn, 64*1024)) + + c.state.Store(int32(StateConnected)) + logging.Info("Connected to %s (attempt %d)", c.addr, attempt) + c.reconnectSuccess.Add(1) + return nil + } +} + +// reconnectLoop handles reconnection in a dedicated goroutine +// This prevents race conditions and deadlocks from concurrent reconnection attempts +func (c *Client) reconnectLoop() { + defer c.wg.Done() + + for { + select { + case <-c.ctx.Done(): + return + case <-c.reconnectCh: + // Drain any additional reconnect signals + for { + select { + case <-c.reconnectCh: + default: + goto doReconnect + } + } + + doReconnect: + currentState := ConnectionState(c.state.Load()) + if currentState == StateConnected { + continue // Already connected + } + + c.connectionLost.Add(1) + logging.Info("Starting reconnection...") + + c.connMu.Lock() + // Close existing connection + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + c.state.Store(int32(StateReconnecting)) + + // Re-queue pending messages before reconnecting + count := c.sendQueue.RetryPending() + if count > 0 { + logging.Info("Re-queued %d pending messages for retry", count) + } + + err := c.connectLocked() + c.connMu.Unlock() + + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + logging.Error("Reconnection failed: %v", err) + } + } + } +} + +// triggerReconnect signals the reconnect loop to reconnect +// This is safe to call from any goroutine without holding locks +func (c *Client) triggerReconnect() { + // Non-blocking send to reconnect channel + select { + case c.reconnectCh <- struct{}{}: + default: + // Already a reconnect pending + } +} + +// sendLoop processes the message queue +func (c *Client) sendLoop() { + defer c.wg.Done() + + for { + msg, err := c.sendQueue.Dequeue(c.ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, queue.ErrQueueClosed) { + return + } + logging.Error("Dequeue error: %v", err) + continue + } + + err = c.sendMessage(msg) + if err != nil { + logging.Debug("Send failed for message %d: %v", msg.ID, err) + c.handleSendError(msg, err) + } else { + c.messagesSent.Add(1) + } + } +} + +func (c *Client) sendMessage(msg *queue.Message) error { + c.connMu.Lock() + defer c.connMu.Unlock() + + if c.conn == nil { + return ErrNotConnected + } + + // Set write deadline + c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + + if err := c.encoder.Encode(msg.Type, msg.Payload); err != nil { + return fmt.Errorf("encode: %w", err) + } + + if err := c.bufW.Flush(); err != nil { + return fmt.Errorf("flush: %w", err) + } + + // For messages expecting ACK, track them + if msg.Type == protocol.MsgWriteChunk { + c.sendQueue.TrackPending(msg) + } else { + // Non-ACK messages complete immediately + select { + case msg.Result <- nil: + default: + } + } + + return nil +} + +func (c *Client) handleSendError(msg *queue.Message, err error) { + // Trigger reconnection (non-blocking, handled by reconnectLoop) + c.triggerReconnect() + + // Re-queue the message with retry limit + msg.Retries++ + if msg.Retries <= 3 { + c.messagesRetried.Add(1) + if _, qerr := c.sendQueue.Enqueue(c.ctx, msg.Type, msg.Payload); qerr != nil { + // Queue full or closed, notify caller + select { + case msg.Result <- fmt.Errorf("requeue failed: %w", qerr): + default: + } + } + } else { + // Max retries exceeded + select { + case msg.Result <- fmt.Errorf("max retries exceeded: %w", err): + default: + } + } +} + +// healthCheckLoop monitors connection health with actual verification +func (c *Client) healthCheckLoop() { + defer c.wg.Done() + + ticker := time.NewTicker(c.config.HealthCheckInterval) + defer ticker.Stop() + + consecutiveFails := 0 + const maxConsecutiveFails = 3 + + for { + select { + case <-c.ctx.Done(): + return + case <-ticker.C: + if ConnectionState(c.state.Load()) != StateConnected { + consecutiveFails = 0 + continue + } + + // Actually verify the connection is alive + if !c.verifyConnection() { + consecutiveFails++ + c.healthCheckFails.Add(1) + logging.Warn("Health check failed (%d/%d)", consecutiveFails, maxConsecutiveFails) + + if consecutiveFails >= maxConsecutiveFails { + logging.Error("Health check failed %d times, triggering reconnect", consecutiveFails) + c.state.Store(int32(StateReconnecting)) + c.triggerReconnect() + consecutiveFails = 0 + } + } else { + consecutiveFails = 0 + } + } + } +} + +// verifyConnection checks if the connection is actually working +func (c *Client) verifyConnection() bool { + c.connMu.RLock() + conn := c.conn + c.connMu.RUnlock() + + if conn == nil { + return false + } + + // Set a short deadline and try to detect if connection is alive + // We use SetReadDeadline with a very short timeout to check for errors + conn.SetReadDeadline(time.Now().Add(1 * time.Millisecond)) + + // Try to read - we expect timeout (connection alive) or error (connection dead) + one := make([]byte, 1) + _, err := conn.Read(one) + + // Reset deadline + conn.SetReadDeadline(time.Time{}) + + if err != nil { + // Timeout is expected and means connection is alive + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return true + } + // Any other error means connection is dead + return false + } + + // We got data - unexpected but connection is alive + // Note: This could mess up protocol framing, but health check + // shouldn't receive data in normal operation + return true +} + +// Send sends a message asynchronously (non-blocking) +func (c *Client) Send(msgType byte, payload interface{}) error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return ErrShuttingDown + } + c.shutdownMu.Unlock() + + _, err := c.sendQueue.Enqueue(c.ctx, msgType, payload) + return err +} + +// SendSync sends a message and waits for completion +func (c *Client) SendSync(msgType byte, payload interface{}) error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return ErrShuttingDown + } + c.shutdownMu.Unlock() + + return c.sendQueue.EnqueueSync(c.ctx, msgType, payload) +} + +// SendAndReceive sends a message and waits for a response +func (c *Client) SendAndReceive(msgType byte, payload interface{}) (byte, []byte, error) { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return 0, nil, ErrShuttingDown + } + c.shutdownMu.Unlock() + + c.connMu.Lock() + defer c.connMu.Unlock() + + if c.conn == nil { + return 0, nil, ErrNotConnected + } + + // Set deadlines + c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + c.conn.SetReadDeadline(time.Now().Add(c.config.AckTimeout)) + + if err := c.encoder.Encode(msgType, payload); err != nil { + return 0, nil, fmt.Errorf("send: %w", err) + } + + if err := c.bufW.Flush(); err != nil { + return 0, nil, fmt.Errorf("flush: %w", err) + } + + respType, respData, err := c.decoder.Decode() + if err != nil { + // Connection error - trigger reconnect + go c.triggerReconnect() + return 0, nil, fmt.Errorf("receive: %w", err) + } + + c.messagesAcked.Add(1) + return respType, respData, nil +} + +// State returns the current connection state +func (c *Client) State() ConnectionState { + return ConnectionState(c.state.Load()) +} + +// Stats returns client statistics +func (c *Client) Stats() map[string]uint64 { + return map[string]uint64{ + "messages_sent": c.messagesSent.Load(), + "messages_acked": c.messagesAcked.Load(), + "messages_retried": c.messagesRetried.Load(), + "connection_lost": c.connectionLost.Load(), + "reconnect_success": c.reconnectSuccess.Load(), + "health_check_fails": c.healthCheckFails.Load(), + "queue_length": uint64(c.sendQueue.Len()), + "pending_acks": uint64(c.sendQueue.GetPendingCount()), + } +} + +// Close closes the connection with graceful shutdown +func (c *Client) Close() error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return nil + } + c.shutdown = true + c.shutdownMu.Unlock() + + logging.Info("Client shutting down...") + + // Signal goroutines to stop + c.cancel() + + // Close queue to unblock sendLoop + c.sendQueue.Close() + + // Wait for goroutines with timeout + done := make(chan struct{}) + go func() { + c.wg.Wait() + close(done) + }() + + select { + case <-done: + logging.Info("Client goroutines stopped gracefully") + case <-time.After(c.config.ShutdownTimeout): + logging.Warn("Client shutdown timed out after %v", c.config.ShutdownTimeout) + } + + // Close connection + c.connMu.Lock() + defer c.connMu.Unlock() + + if c.conn != nil { + err := c.conn.Close() + c.conn = nil + c.state.Store(int32(StateDisconnected)) + return err + } + c.state.Store(int32(StateDisconnected)) + return nil +} diff --git a/server/lib/fspipe/transport/s3.go b/server/lib/fspipe/transport/s3.go new file mode 100644 index 00000000..22cfc3cc --- /dev/null +++ b/server/lib/fspipe/transport/s3.go @@ -0,0 +1,745 @@ +package transport + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + "sync" + "sync/atomic" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/joho/godotenv" + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" +) + +const ( + // S3 minimum part size (5MB) - except for the last part + minPartSize = 5 * 1024 * 1024 +) + +// isTempFilename checks if the filename is a Chrome temp file +// Chrome creates files like .org.chromium.Chromium.XXXXXX during downloads +func isTempFilename(key string) bool { + // Extract just the filename from the key (after last /) + parts := strings.Split(key, "/") + filename := parts[len(parts)-1] + return strings.HasPrefix(filename, ".org.chromium.") || + strings.HasPrefix(filename, ".com.google.") +} + +// S3Config holds S3/R2 configuration +type S3Config struct { + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Region string `json:"region"` + Prefix string `json:"prefix"` // Optional path prefix +} + +// S3Client manages S3/R2 uploads +type S3Client struct { + config S3Config + s3Client *s3.Client + + ctx context.Context + cancel context.CancelFunc + + // Track multipart uploads + mu sync.RWMutex + uploads map[string]*multipartUpload + + state atomic.Int32 + + // Metrics + filesCreated atomic.Uint64 + filesUploaded atomic.Uint64 + bytesUploaded atomic.Uint64 + errors atomic.Uint64 +} + +type multipartUpload struct { + key string + finalKey string // Desired final key after renames (updated by handleRename) + uploadID string + parts []types.CompletedPart + buffer bytes.Buffer + partNum int32 + started bool // Whether multipart upload has been initiated + hasData bool // Whether any data has been written + completed bool // Whether upload has been completed (waiting for potential rename) + renaming bool // Whether a rename is currently in progress (prevents race conditions) + closePending bool // FileClose received but waiting for rename before completing +} + +// LoadS3ConfigFromEnv loads S3 config from .env file +func LoadS3ConfigFromEnv(envFile string) (S3Config, error) { + if envFile == "" { + envFile = ".env" + } + + // Load .env file if it exists + if _, err := os.Stat(envFile); err == nil { + if err := godotenv.Load(envFile); err != nil { + return S3Config{}, fmt.Errorf("load .env: %w", err) + } + } + + cfg := S3Config{ + Endpoint: os.Getenv("S3_ENDPOINT"), + Bucket: os.Getenv("S3_BUCKET"), + AccessKeyID: os.Getenv("S3_ACCESS_KEY_ID"), + SecretAccessKey: os.Getenv("S3_SECRET_ACCESS_KEY"), + Region: os.Getenv("S3_REGION"), + Prefix: os.Getenv("S3_PREFIX"), + } + + if cfg.Region == "" { + cfg.Region = "auto" // Default for R2 + } + + return cfg, cfg.Validate() +} + +// ParseS3ConfigFromJSON parses S3 config from JSON string +func ParseS3ConfigFromJSON(jsonStr string) (S3Config, error) { + var cfg S3Config + if err := json.Unmarshal([]byte(jsonStr), &cfg); err != nil { + return cfg, fmt.Errorf("parse JSON: %w", err) + } + + if cfg.Region == "" { + cfg.Region = "auto" + } + + return cfg, cfg.Validate() +} + +// Validate checks required fields +func (c S3Config) Validate() error { + if c.Endpoint == "" { + return errors.New("S3_ENDPOINT is required") + } + if c.Bucket == "" { + return errors.New("S3_BUCKET is required") + } + if c.AccessKeyID == "" { + return errors.New("S3_ACCESS_KEY_ID is required") + } + if c.SecretAccessKey == "" { + return errors.New("S3_SECRET_ACCESS_KEY is required") + } + return nil +} + +// NewS3Client creates a new S3/R2 transport client +func NewS3Client(cfg S3Config) (*S3Client, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + + // Create S3 client + awsCfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(cfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + cfg.AccessKeyID, + cfg.SecretAccessKey, + "", + )), + ) + if err != nil { + cancel() + return nil, fmt.Errorf("load AWS config: %w", err) + } + + s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) { + o.BaseEndpoint = aws.String(cfg.Endpoint) + o.UsePathStyle = true // Required for R2 and most S3-compatible storage + }) + + c := &S3Client{ + config: cfg, + s3Client: s3Client, + ctx: ctx, + cancel: cancel, + uploads: make(map[string]*multipartUpload), + } + + c.state.Store(int32(StateConnected)) + return c, nil +} + +// Connect is a no-op for S3 (already connected on creation) +func (c *S3Client) Connect() error { + logging.Info("S3 client ready for bucket: %s", c.config.Bucket) + return nil +} + +// Send handles file operations +func (c *S3Client) Send(msgType byte, payload interface{}) error { + return c.sendInternal(msgType, payload) +} + +// SendSync sends a message synchronously (same as Send for S3 since it's already synchronous) +func (c *S3Client) SendSync(msgType byte, payload interface{}) error { + return c.sendInternal(msgType, payload) +} + +// sendInternal handles the actual file operations +func (c *S3Client) sendInternal(msgType byte, payload interface{}) error { + switch msgType { + case protocol.MsgFileCreate: + msg := payload.(*protocol.FileCreate) + return c.handleFileCreate(msg) + + case protocol.MsgWriteChunk: + msg := payload.(*protocol.WriteChunk) + return c.handleWriteChunk(msg) + + case protocol.MsgFileClose: + msg := payload.(*protocol.FileClose) + return c.handleFileClose(msg) + + case protocol.MsgRename: + msg := payload.(*protocol.Rename) + return c.handleRename(msg) + + case protocol.MsgDelete: + msg := payload.(*protocol.Delete) + return c.handleDelete(msg) + + case protocol.MsgTruncate: + // S3 doesn't support truncate, log warning + logging.Warn("Truncate not supported for S3") + return nil + + default: + return fmt.Errorf("unknown message type: 0x%02x", msgType) + } +} + +// SendAndReceive sends and returns appropriate ACK for S3 +func (c *S3Client) SendAndReceive(msgType byte, payload interface{}) (byte, []byte, error) { + // For S3, we send and return a fake ACK + err := c.Send(msgType, payload) + + // Return appropriate ACK based on message type + switch msgType { + case protocol.MsgFileCreate: + msg := payload.(*protocol.FileCreate) + ack := protocol.FileCreateAck{ + FileID: msg.FileID, + Success: err == nil, + } + if err != nil { + ack.Error = err.Error() + } + data, _ := json.Marshal(ack) + return protocol.MsgFileCreateAck, data, nil + + case protocol.MsgWriteChunk: + if err != nil { + return 0, nil, err + } + msg := payload.(*protocol.WriteChunk) + ack := protocol.WriteAck{ + FileID: msg.FileID, + Offset: msg.Offset, + Written: len(msg.Data), + } + data, _ := json.Marshal(ack) + return protocol.MsgWriteAck, data, nil + } + + if err != nil { + return 0, nil, err + } + return 0, nil, nil +} + +func (c *S3Client) handleFileCreate(msg *protocol.FileCreate) error { + key := c.config.Prefix + msg.Filename + + c.mu.Lock() + defer c.mu.Unlock() + + // Check if we already have an upload for this FileID + // Don't overwrite if one exists (preserves finalKey from renames) + if existing, ok := c.uploads[msg.FileID]; ok { + logging.Info("S3: FileCreate for existing id=%s, keeping finalKey=%s (new filename=%s)", msg.FileID, existing.finalKey, key) + return nil + } + + // Don't start multipart upload yet - wait for first write + // This handles Chrome's placeholder files that get created and immediately closed + c.uploads[msg.FileID] = &multipartUpload{ + key: key, + finalKey: key, // Will be updated by handleRename if rename arrives before upload completes + parts: make([]types.CompletedPart, 0), + partNum: 0, + started: false, + hasData: false, + } + + c.filesCreated.Add(1) + logging.Info("S3: Registered file key=%s id=%s", key, msg.FileID) + return nil +} + +func (c *S3Client) handleWriteChunk(msg *protocol.WriteChunk) error { + c.mu.Lock() + upload, ok := c.uploads[msg.FileID] + if !ok { + c.mu.Unlock() + return fmt.Errorf("unknown file ID: %s", msg.FileID) + } + + // Start multipart upload on first write (lazy initialization) + // Use finalKey which may have been updated by rename before writes started + if !upload.started { + // Use finalKey - it may have been updated by rename(s) before first write + uploadKey := upload.finalKey + logging.Info("S3: Starting multipart upload for id=%s with key=%s (original key was %s)", msg.FileID, uploadKey, upload.key) + output, err := c.s3Client.CreateMultipartUpload(c.ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(uploadKey), + }) + if err != nil { + c.mu.Unlock() + c.errors.Add(1) + return fmt.Errorf("create multipart upload: %w", err) + } + // Update key to match what we actually used for the multipart upload + upload.key = uploadKey + upload.uploadID = *output.UploadId + upload.started = true + logging.Info("S3: Started multipart upload for %s", uploadKey) + } + + // Buffer the data + upload.buffer.Write(msg.Data) + upload.hasData = true + c.bytesUploaded.Add(uint64(len(msg.Data))) + + // If buffer >= 5MB, upload a part + if upload.buffer.Len() >= minPartSize { + if err := c.uploadPartLocked(upload); err != nil { + c.mu.Unlock() + return err + } + } + + c.mu.Unlock() + return nil +} + +func (c *S3Client) uploadPartLocked(upload *multipartUpload) error { + upload.partNum++ + data := upload.buffer.Bytes() + upload.buffer.Reset() + + output, err := c.s3Client.UploadPart(c.ctx, &s3.UploadPartInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(upload.key), + UploadId: aws.String(upload.uploadID), + PartNumber: aws.Int32(upload.partNum), + Body: bytes.NewReader(data), + }) + if err != nil { + c.errors.Add(1) + return fmt.Errorf("upload part %d: %w", upload.partNum, err) + } + + upload.parts = append(upload.parts, types.CompletedPart{ + ETag: output.ETag, + PartNumber: aws.Int32(upload.partNum), + }) + + logging.Debug("S3: Uploaded part %d (%d bytes) for %s", upload.partNum, len(data), upload.key) + return nil +} + +func (c *S3Client) handleFileClose(msg *protocol.FileClose) error { + c.mu.Lock() + upload, ok := c.uploads[msg.FileID] + if !ok { + c.mu.Unlock() + logging.Info("S3: FileClose for unknown ID %s", msg.FileID) + return nil + } + + logging.Info("S3: FileClose id=%s key=%s finalKey=%s started=%v hasData=%v completed=%v closePending=%v", msg.FileID, upload.key, upload.finalKey, upload.started, upload.hasData, upload.completed, upload.closePending) + + // If already completed, this is a duplicate close - ignore + if upload.completed { + c.mu.Unlock() + logging.Debug("S3: FileClose for already completed upload %s", upload.key) + return nil + } + + // If no data was ever written, this is likely a placeholder file from Chrome's + // open-close-open pattern. DON'T delete from map - writes may come later! + if !upload.hasData { + c.mu.Unlock() + logging.Debug("S3: FileClose with no data for %s - keeping registration for late writes", upload.key) + return nil + } + + // If multipart upload was never started (shouldn't happen if hasData is true, but be safe) + if !upload.started { + c.mu.Unlock() + logging.Debug("S3: FileClose but multipart never started for %s", upload.key) + return nil + } + + // Chrome sends FileClose BEFORE all data is written when file still has temp name! + // Don't complete the upload until we've seen a rename (temp -> .crdownload) + // Check if file still has temp name AND no rename has happened yet + if isTempFilename(upload.finalKey) { + upload.closePending = true + c.mu.Unlock() + logging.Info("S3: FileClose with temp filename - deferring completion until rename (closePending=true)") + return nil + } + + // File has been renamed (or was never a temp file) - proceed with completion + c.mu.Unlock() + return c.completeUpload(msg.FileID, upload) +} + +// completeUpload finalizes the multipart upload to S3 +func (c *S3Client) completeUpload(fileID string, upload *multipartUpload) error { + c.mu.Lock() + uploadKey := upload.key + + // Upload remaining data as final part + if upload.buffer.Len() > 0 { + if err := c.uploadPartLocked(upload); err != nil { + c.mu.Unlock() + // Abort the upload + c.s3Client.AbortMultipartUpload(c.ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(uploadKey), + UploadId: aws.String(upload.uploadID), + }) + return err + } + } + c.mu.Unlock() + + // Complete the multipart upload + _, err := c.s3Client.CompleteMultipartUpload(c.ctx, &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(uploadKey), + UploadId: aws.String(upload.uploadID), + MultipartUpload: &types.CompletedMultipartUpload{ + Parts: upload.parts, + }, + }) + if err != nil { + c.errors.Add(1) + // Abort on error + c.s3Client.AbortMultipartUpload(c.ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(uploadKey), + UploadId: aws.String(upload.uploadID), + }) + return fmt.Errorf("complete multipart upload: %w", err) + } + + logging.Info("S3: Completed upload for %s (%d parts)", uploadKey, len(upload.parts)) + + // Check if finalKey was updated by a rename that arrived during CompleteMultipartUpload + c.mu.Lock() + currentFinalKey := upload.finalKey + upload.completed = true + upload.key = uploadKey + + if currentFinalKey != uploadKey { + upload.renaming = true + c.mu.Unlock() + logging.Info("S3: Rename arrived during upload completion, applying: %s -> %s", uploadKey, currentFinalKey) + if err := c.doS3RenameLoop(uploadKey, fileID); err != nil { + logging.Error("S3: Failed to apply deferred rename: %v", err) + } + } else { + c.mu.Unlock() + } + + c.filesUploaded.Add(1) + return nil +} + +func (c *S3Client) handleRename(msg *protocol.Rename) error { + newKey := c.config.Prefix + msg.NewName + oldKey := c.config.Prefix + msg.OldName + + logging.Info("S3: Rename called: id=%s old=%s new=%s", msg.FileID, msg.OldName, msg.NewName) + + // First check if there's an active/completed upload for this file by FileID + c.mu.Lock() + if msg.FileID != "" { + if upload, ok := c.uploads[msg.FileID]; ok { + // Always update finalKey to the desired destination + upload.finalKey = newKey + + // If closePending was set, clear it - the next FileClose after rename will trigger completion + // DON'T complete here because Chrome may write more data after rename! + if upload.closePending { + upload.closePending = false + logging.Info("S3: Rename cleared closePending, will complete on next FileClose: %s -> %s (id=%s)", upload.key, newKey, msg.FileID) + } + + if upload.completed { + // Upload already completed in S3 + if upload.renaming { + // A rename is already in progress - just update finalKey (done above) + c.mu.Unlock() + logging.Info("S3: Rename queued (another rename in progress): -> %s (id=%s)", newKey, msg.FileID) + return nil + } + // Start the rename - use upload.key (actual S3 location) + actualKey := upload.key + upload.renaming = true + c.mu.Unlock() + logging.Info("S3: Rename after completion: %s -> %s (id=%s)", actualKey, newKey, msg.FileID) + return c.doS3RenameLoop(actualKey, msg.FileID) + } else if upload.started { + c.mu.Unlock() + logging.Info("S3: Rename during upload (will apply at close): %s -> %s (id=%s)", upload.key, newKey, msg.FileID) + } else { + c.mu.Unlock() + logging.Info("S3: Rename before upload start: finalKey updated to %s (id=%s)", newKey, msg.FileID) + } + return nil + } + logging.Info("S3: Rename FileID=%s not found in uploads map", msg.FileID) + } + + // Fallback: search by old filename if FileID lookup failed + logging.Info("S3: Rename fallback search by oldKey=%s", oldKey) + for fileID, upload := range c.uploads { + logging.Info("S3: Rename checking upload id=%s key=%s finalKey=%s completed=%v", fileID, upload.key, upload.finalKey, upload.completed) + if upload.key == oldKey || upload.finalKey == oldKey { + upload.finalKey = newKey + + if upload.completed { + if upload.renaming { + c.mu.Unlock() + logging.Info("S3: Rename queued by filename (another rename in progress): -> %s (id=%s)", newKey, fileID) + return nil + } + actualKey := upload.key + upload.renaming = true + c.mu.Unlock() + logging.Info("S3: Rename by filename after completion: %s -> %s (id=%s)", actualKey, newKey, fileID) + return c.doS3RenameLoop(actualKey, fileID) + } else if upload.started { + c.mu.Unlock() + logging.Info("S3: Rename by filename during upload: %s -> %s (id=%s)", upload.key, newKey, fileID) + } else { + c.mu.Unlock() + logging.Info("S3: Rename by filename before upload start: -> %s (id=%s)", newKey, fileID) + } + return nil + } + } + c.mu.Unlock() + + // No active upload found - check if object exists in S3 and rename it + logging.Info("S3: Rename - no active upload found for id=%s or key=%s, trying S3 directly", msg.FileID, oldKey) + + _, err := c.s3Client.HeadObject(c.ctx, &s3.HeadObjectInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(oldKey), + }) + if err != nil { + // Source doesn't exist - this is expected for Chrome's placeholder files + // Just log and return success (the rename is a no-op) + logging.Debug("S3: Rename skipped - source %s does not exist (placeholder file)", oldKey) + return nil + } + + // S3 doesn't support rename, so we copy + delete + _, err = c.s3Client.CopyObject(c.ctx, &s3.CopyObjectInput{ + Bucket: aws.String(c.config.Bucket), + CopySource: aws.String(c.config.Bucket + "/" + oldKey), + Key: aws.String(newKey), + }) + if err != nil { + c.errors.Add(1) + return fmt.Errorf("copy object: %w", err) + } + + _, err = c.s3Client.DeleteObject(c.ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(oldKey), + }) + if err != nil { + logging.Warn("S3: Failed to delete old key after rename: %v", err) + // Don't return error - copy succeeded + } + + logging.Debug("S3: Renamed %s -> %s", oldKey, newKey) + return nil +} + +// doS3RenameLoop performs S3 renames in a loop until key == finalKey +// This handles multiple renames that arrive while a rename is in progress +func (c *S3Client) doS3RenameLoop(currentKey string, fileID string) error { + for { + // Get the target key under lock + c.mu.Lock() + upload, ok := c.uploads[fileID] + if !ok { + c.mu.Unlock() + return nil + } + targetKey := upload.finalKey + if currentKey == targetKey { + // No more renames needed + upload.renaming = false + c.mu.Unlock() + return nil + } + c.mu.Unlock() + + // Do the S3 copy+delete + logging.Info("S3: Renaming %s -> %s", currentKey, targetKey) + _, err := c.s3Client.CopyObject(c.ctx, &s3.CopyObjectInput{ + Bucket: aws.String(c.config.Bucket), + CopySource: aws.String(c.config.Bucket + "/" + currentKey), + Key: aws.String(targetKey), + }) + if err != nil { + c.errors.Add(1) + logging.Error("S3: Failed to copy %s -> %s: %v", currentKey, targetKey, err) + // Clear renaming flag on error + c.mu.Lock() + if upload, ok := c.uploads[fileID]; ok { + upload.renaming = false + } + c.mu.Unlock() + return fmt.Errorf("copy object: %w", err) + } + + _, err = c.s3Client.DeleteObject(c.ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(currentKey), + }) + if err != nil { + logging.Warn("S3: Failed to delete old key %s after rename: %v", currentKey, err) + } + + // Update the key and check if we need another rename + c.mu.Lock() + if upload, ok := c.uploads[fileID]; ok { + upload.key = targetKey + } + c.mu.Unlock() + + logging.Info("S3: Renamed %s -> %s", currentKey, targetKey) + currentKey = targetKey + // Loop will check if finalKey changed during this rename + } +} + +// doS3Rename performs a single S3 copy+delete (used by handleFileClose) +func (c *S3Client) doS3Rename(oldKey, newKey, fileID string) error { + _, err := c.s3Client.CopyObject(c.ctx, &s3.CopyObjectInput{ + Bucket: aws.String(c.config.Bucket), + CopySource: aws.String(c.config.Bucket + "/" + oldKey), + Key: aws.String(newKey), + }) + if err != nil { + c.errors.Add(1) + logging.Error("S3: Failed to copy %s -> %s: %v", oldKey, newKey, err) + return fmt.Errorf("copy object: %w", err) + } + + _, err = c.s3Client.DeleteObject(c.ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(oldKey), + }) + if err != nil { + logging.Warn("S3: Failed to delete old key %s after rename: %v", oldKey, err) + } + + // Update the upload entry's key to reflect the new name + c.mu.Lock() + if upload, ok := c.uploads[fileID]; ok { + upload.key = newKey + } + c.mu.Unlock() + + logging.Info("S3: Renamed %s -> %s", oldKey, newKey) + return nil +} + +func (c *S3Client) handleDelete(msg *protocol.Delete) error { + key := c.config.Prefix + msg.Filename + + _, err := c.s3Client.DeleteObject(c.ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(key), + }) + if err != nil { + c.errors.Add(1) + return fmt.Errorf("delete object: %w", err) + } + + logging.Debug("S3: Deleted %s", key) + return nil +} + +// State returns current state (always connected for S3) +func (c *S3Client) State() ConnectionState { + return ConnectionState(c.state.Load()) +} + +// Stats returns client statistics +func (c *S3Client) Stats() map[string]uint64 { + return map[string]uint64{ + "files_created": c.filesCreated.Load(), + "files_uploaded": c.filesUploaded.Load(), + "bytes_uploaded": c.bytesUploaded.Load(), + "errors": c.errors.Load(), + } +} + +// Close cleans up resources +func (c *S3Client) Close() error { + c.cancel() + + // Abort any pending uploads that were actually started + c.mu.Lock() + defer c.mu.Unlock() + + for fileID, upload := range c.uploads { + if upload.started && upload.uploadID != "" { + logging.Warn("S3: Aborting incomplete upload for %s", upload.key) + c.s3Client.AbortMultipartUpload(c.ctx, &s3.AbortMultipartUploadInput{ + Bucket: aws.String(c.config.Bucket), + Key: aws.String(upload.key), + UploadId: aws.String(upload.uploadID), + }) + } + delete(c.uploads, fileID) + } + + return nil +} + +// Compile-time interface check +var _ Transport = (*S3Client)(nil) diff --git a/server/lib/fspipe/transport/transport.go b/server/lib/fspipe/transport/transport.go new file mode 100644 index 00000000..4d208090 --- /dev/null +++ b/server/lib/fspipe/transport/transport.go @@ -0,0 +1,53 @@ +package transport + +import ( + "fmt" + "net/url" +) + +// Transport defines the interface for all transport implementations +type Transport interface { + // Connect establishes connection to the remote server + Connect() error + + // Send sends a message asynchronously (non-blocking) + Send(msgType byte, payload interface{}) error + + // SendSync sends a message and waits for send completion (blocking) + // This ensures the message is actually sent before returning, unlike Send which queues. + // Use this for messages that must be delivered before subsequent operations. + SendSync(msgType byte, payload interface{}) error + + // SendAndReceive sends a message and waits for a response + SendAndReceive(msgType byte, payload interface{}) (byte, []byte, error) + + // State returns the current connection state + State() ConnectionState + + // Stats returns transport statistics + Stats() map[string]uint64 + + // Close closes the transport + Close() error +} + +// NewTransport creates a transport based on the URL scheme +func NewTransport(remoteURL string, config ClientConfig) (Transport, error) { + u, err := url.Parse(remoteURL) + if err != nil { + return nil, fmt.Errorf("invalid URL: %w", err) + } + + switch u.Scheme { + case "tcp": + return NewClientWithConfig(u.Host, config), nil + case "ws", "wss": + return NewWebSocketClient(remoteURL, config), nil + default: + return nil, fmt.Errorf("unsupported scheme: %s (use tcp://, ws://, or wss://)", u.Scheme) + } +} + +// Compile-time interface checks +var _ Transport = (*Client)(nil) +var _ Transport = (*WebSocketClient)(nil) diff --git a/server/lib/fspipe/transport/websocket.go b/server/lib/fspipe/transport/websocket.go new file mode 100644 index 00000000..292b1f23 --- /dev/null +++ b/server/lib/fspipe/transport/websocket.go @@ -0,0 +1,584 @@ +package transport + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/onkernel/kernel-images/server/lib/fspipe/logging" + "github.com/onkernel/kernel-images/server/lib/fspipe/protocol" + "github.com/onkernel/kernel-images/server/lib/fspipe/queue" +) + +// WebSocketClient manages WebSocket connection to the remote listener +type WebSocketClient struct { + url string + config ClientConfig + + connMu sync.RWMutex + conn *websocket.Conn + + state atomic.Int32 // ConnectionState + + // Message queue for non-blocking sends + sendQueue *queue.Queue + + // Response channel for SendAndReceive - protected by reqMu + responseCh chan wsResponse + reqMu sync.Mutex // Serializes SendAndReceive calls to prevent response mixup + + // Background goroutine management + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // Reconnection management - single goroutine handles all reconnects + reconnectCh chan struct{} + reconnectOnce sync.Once + + // Shutdown management + shutdownMu sync.Mutex + shutdown bool + + // Metrics + messagesSent atomic.Uint64 + messagesAcked atomic.Uint64 + messagesRetried atomic.Uint64 + connectionLost atomic.Uint64 + reconnectSuccess atomic.Uint64 + healthCheckFails atomic.Uint64 +} + +type wsResponse struct { + msgType byte + data []byte + err error +} + +// NewWebSocketClient creates a new WebSocket transport client +func NewWebSocketClient(url string, config ClientConfig) *WebSocketClient { + // Apply defaults for zero values + if config.ShutdownTimeout == 0 { + config.ShutdownTimeout = 5 * time.Second + } + + ctx, cancel := context.WithCancel(context.Background()) + + c := &WebSocketClient{ + url: url, + config: config, + ctx: ctx, + cancel: cancel, + sendQueue: queue.New(queue.Config{ + MaxSize: config.QueueSize, + AckTimeout: config.AckTimeout, + MaxRetries: 3, + }), + responseCh: make(chan wsResponse, 10), + reconnectCh: make(chan struct{}, 1), + } + + c.state.Store(int32(StateDisconnected)) + return c +} + +// Connect establishes WebSocket connection +func (c *WebSocketClient) Connect() error { + c.connMu.Lock() + err := c.connectLocked() + c.connMu.Unlock() + + if err != nil { + return err + } + + // Start background workers exactly once + c.reconnectOnce.Do(func() { + c.wg.Add(4) + go c.sendLoop() + go c.readLoop() + go c.pingLoop() + go c.reconnectLoop() + }) + + return nil +} + +func (c *WebSocketClient) connectLocked() error { + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + + c.state.Store(int32(StateConnecting)) + + backoff := c.config.InitialBackoff + attempt := 0 + + dialer := websocket.Dialer{ + HandshakeTimeout: c.config.DialTimeout, + } + + for { + select { + case <-c.ctx.Done(): + c.state.Store(int32(StateDisconnected)) + return c.ctx.Err() + default: + } + + attempt++ + + conn, resp, err := dialer.Dial(c.url, http.Header{}) + if err != nil { + if resp != nil { + logging.Warn("WebSocket dial attempt %d failed: %v (status: %d)", attempt, err, resp.StatusCode) + } else { + logging.Warn("WebSocket dial attempt %d failed: %v", attempt, err) + } + + if c.config.MaxRetries > 0 && attempt >= c.config.MaxRetries { + c.state.Store(int32(StateFailed)) + return fmt.Errorf("failed to connect after %d retries: %w", attempt, err) + } + + // Exponential backoff with context cancellation + timer := time.NewTimer(backoff) + select { + case <-c.ctx.Done(): + timer.Stop() + c.state.Store(int32(StateDisconnected)) + return c.ctx.Err() + case <-timer.C: + } + + backoff = time.Duration(float64(backoff) * c.config.BackoffMultiplier) + if backoff > c.config.MaxBackoff { + backoff = c.config.MaxBackoff + } + continue + } + + c.conn = conn + c.state.Store(int32(StateConnected)) + logging.Info("WebSocket connected to %s (attempt %d)", c.url, attempt) + c.reconnectSuccess.Add(1) + return nil + } +} + +// reconnectLoop handles reconnection in a dedicated goroutine +func (c *WebSocketClient) reconnectLoop() { + defer c.wg.Done() + + for { + select { + case <-c.ctx.Done(): + return + case <-c.reconnectCh: + // Drain any additional reconnect signals + for { + select { + case <-c.reconnectCh: + default: + goto doReconnect + } + } + + doReconnect: + currentState := ConnectionState(c.state.Load()) + if currentState == StateConnected { + continue + } + + c.connectionLost.Add(1) + logging.Info("WebSocket starting reconnection...") + + c.connMu.Lock() + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + c.state.Store(int32(StateReconnecting)) + + count := c.sendQueue.RetryPending() + if count > 0 { + logging.Info("Re-queued %d pending messages for retry", count) + } + + err := c.connectLocked() + c.connMu.Unlock() + + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + logging.Error("WebSocket reconnection failed: %v", err) + } + } + } +} + +// triggerReconnect signals the reconnect loop to reconnect +func (c *WebSocketClient) triggerReconnect() { + select { + case c.reconnectCh <- struct{}{}: + default: + } +} + +// sendLoop processes the message queue +func (c *WebSocketClient) sendLoop() { + defer c.wg.Done() + + for { + msg, err := c.sendQueue.Dequeue(c.ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, queue.ErrQueueClosed) { + return + } + logging.Error("Dequeue error: %v", err) + continue + } + + err = c.sendMessage(msg) + if err != nil { + logging.Debug("Send failed for message %d: %v", msg.ID, err) + c.handleSendError(msg, err) + } else { + c.messagesSent.Add(1) + } + } +} + +func (c *WebSocketClient) sendMessage(msg *queue.Message) error { + c.connMu.Lock() + defer c.connMu.Unlock() + + if c.conn == nil { + return ErrNotConnected + } + + // Build frame: [Length: 4B] [Type: 1B] [Payload: NB] + payload, err := json.Marshal(msg.Payload) + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + + frameLen := uint32(1 + len(payload)) + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, frameLen) + buf.WriteByte(msg.Type) + buf.Write(payload) + + c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + if err := c.conn.WriteMessage(websocket.BinaryMessage, buf.Bytes()); err != nil { + return fmt.Errorf("write: %w", err) + } + + // For messages expecting ACK, track them + if msg.Type == protocol.MsgWriteChunk { + c.sendQueue.TrackPending(msg) + } else { + select { + case msg.Result <- nil: + default: + } + } + + return nil +} + +// readLoop reads messages from WebSocket +func (c *WebSocketClient) readLoop() { + defer c.wg.Done() + + for { + select { + case <-c.ctx.Done(): + return + default: + } + + c.connMu.RLock() + conn := c.conn + c.connMu.RUnlock() + + if conn == nil { + time.Sleep(100 * time.Millisecond) + continue + } + + conn.SetReadDeadline(time.Now().Add(60 * time.Second)) + messageType, data, err := conn.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + logging.Info("WebSocket closed normally") + } else if !errors.Is(err, context.Canceled) { + logging.Warn("WebSocket read error: %v", err) + c.state.Store(int32(StateReconnecting)) + c.triggerReconnect() + } + continue + } + + if messageType != websocket.BinaryMessage { + continue + } + + // Parse frame + if len(data) < 5 { + logging.Warn("Invalid frame: too short") + continue + } + + frameLen := binary.BigEndian.Uint32(data[:4]) + if int(frameLen) != len(data)-4 { + logging.Warn("Invalid frame length") + continue + } + + msgType := data[4] + payload := data[5:] + + // Handle ACK messages + if msgType == protocol.MsgWriteAck { + var ack protocol.WriteAck + if err := json.Unmarshal(payload, &ack); err == nil { + c.messagesAcked.Add(1) + } + } + + // Send to response channel for SendAndReceive + select { + case c.responseCh <- wsResponse{msgType: msgType, data: payload}: + default: + } + } +} + +// pingLoop sends periodic pings +func (c *WebSocketClient) pingLoop() { + defer c.wg.Done() + + ticker := time.NewTicker(c.config.HealthCheckInterval) + defer ticker.Stop() + + consecutiveFails := 0 + const maxConsecutiveFails = 3 + + for { + select { + case <-c.ctx.Done(): + return + case <-ticker.C: + if ConnectionState(c.state.Load()) != StateConnected { + consecutiveFails = 0 + continue + } + + c.connMu.RLock() + conn := c.conn + c.connMu.RUnlock() + + if conn != nil { + conn.SetWriteDeadline(time.Now().Add(c.config.PingTimeout)) + if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { + consecutiveFails++ + c.healthCheckFails.Add(1) + logging.Debug("Ping failed (%d/%d): %v", consecutiveFails, maxConsecutiveFails, err) + + if consecutiveFails >= maxConsecutiveFails { + logging.Error("Ping failed %d times, triggering reconnect", consecutiveFails) + c.state.Store(int32(StateReconnecting)) + c.triggerReconnect() + consecutiveFails = 0 + } + } else { + consecutiveFails = 0 + } + } + } + } +} + +func (c *WebSocketClient) handleSendError(msg *queue.Message, err error) { + // Trigger reconnection (non-blocking) + c.triggerReconnect() + + msg.Retries++ + if msg.Retries <= 3 { + c.messagesRetried.Add(1) + if _, qerr := c.sendQueue.Enqueue(c.ctx, msg.Type, msg.Payload); qerr != nil { + select { + case msg.Result <- fmt.Errorf("requeue failed: %w", qerr): + default: + } + } + } else { + select { + case msg.Result <- fmt.Errorf("max retries exceeded: %w", err): + default: + } + } +} + +// Send sends a message asynchronously +func (c *WebSocketClient) Send(msgType byte, payload interface{}) error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return ErrShuttingDown + } + c.shutdownMu.Unlock() + + _, err := c.sendQueue.Enqueue(c.ctx, msgType, payload) + return err +} + +// SendSync sends a message and waits for send completion +func (c *WebSocketClient) SendSync(msgType byte, payload interface{}) error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return ErrShuttingDown + } + c.shutdownMu.Unlock() + + return c.sendQueue.EnqueueSync(c.ctx, msgType, payload) +} + +// SendAndReceive sends a message and waits for response +func (c *WebSocketClient) SendAndReceive(msgType byte, payload interface{}) (byte, []byte, error) { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return 0, nil, ErrShuttingDown + } + c.shutdownMu.Unlock() + + // Serialize all SendAndReceive calls to prevent response mixup + c.reqMu.Lock() + defer c.reqMu.Unlock() + + // Drain any stale responses from previous timed-out requests + for { + select { + case <-c.responseCh: + logging.Debug("SendAndReceive: drained stale response") + default: + goto sendRequest + } + } + +sendRequest: + c.connMu.Lock() + + if c.conn == nil { + c.connMu.Unlock() + return 0, nil, ErrNotConnected + } + + // Build and send frame + data, err := json.Marshal(payload) + if err != nil { + c.connMu.Unlock() + return 0, nil, fmt.Errorf("marshal: %w", err) + } + + frameLen := uint32(1 + len(data)) + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, frameLen) + buf.WriteByte(msgType) + buf.Write(data) + + c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) + if err := c.conn.WriteMessage(websocket.BinaryMessage, buf.Bytes()); err != nil { + c.connMu.Unlock() + go c.triggerReconnect() + return 0, nil, fmt.Errorf("write: %w", err) + } + + c.connMu.Unlock() + + // Wait for response + select { + case resp := <-c.responseCh: + c.messagesAcked.Add(1) + return resp.msgType, resp.data, resp.err + case <-time.After(c.config.AckTimeout): + return 0, nil, errors.New("response timeout") + case <-c.ctx.Done(): + return 0, nil, c.ctx.Err() + } +} + +// State returns current connection state +func (c *WebSocketClient) State() ConnectionState { + return ConnectionState(c.state.Load()) +} + +// Stats returns client statistics +func (c *WebSocketClient) Stats() map[string]uint64 { + return map[string]uint64{ + "messages_sent": c.messagesSent.Load(), + "messages_acked": c.messagesAcked.Load(), + "messages_retried": c.messagesRetried.Load(), + "connection_lost": c.connectionLost.Load(), + "reconnect_success": c.reconnectSuccess.Load(), + "health_check_fails": c.healthCheckFails.Load(), + "queue_length": uint64(c.sendQueue.Len()), + "pending_acks": uint64(c.sendQueue.GetPendingCount()), + } +} + +// Close closes the WebSocket connection with graceful shutdown +func (c *WebSocketClient) Close() error { + c.shutdownMu.Lock() + if c.shutdown { + c.shutdownMu.Unlock() + return nil + } + c.shutdown = true + c.shutdownMu.Unlock() + + logging.Info("WebSocket client shutting down...") + + c.cancel() + c.sendQueue.Close() + + // Wait for goroutines with timeout + done := make(chan struct{}) + go func() { + c.wg.Wait() + close(done) + }() + + select { + case <-done: + logging.Info("WebSocket client goroutines stopped gracefully") + case <-time.After(c.config.ShutdownTimeout): + logging.Warn("WebSocket client shutdown timed out after %v", c.config.ShutdownTimeout) + } + + c.connMu.Lock() + defer c.connMu.Unlock() + + if c.conn != nil { + c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + err := c.conn.Close() + c.conn = nil + c.state.Store(int32(StateDisconnected)) + return err + } + c.state.Store(int32(StateDisconnected)) + return nil +} diff --git a/server/lib/oapi/oapi.go b/server/lib/oapi/oapi.go index 0fae4552..527b228a 100644 --- a/server/lib/oapi/oapi.go +++ b/server/lib/oapi/oapi.go @@ -56,6 +56,25 @@ const ( WRITE FileSystemEventType = "WRITE" ) +// Defines values for FspipeStartResultTransportMode. +const ( + FspipeStartResultTransportModeS3 FspipeStartResultTransportMode = "s3" + FspipeStartResultTransportModeWebsocket FspipeStartResultTransportMode = "websocket" +) + +// Defines values for FspipeStatusTransportMode. +const ( + FspipeStatusTransportModeS3 FspipeStatusTransportMode = "s3" + FspipeStatusTransportModeWebsocket FspipeStatusTransportMode = "websocket" +) + +// Defines values for FspipeStatusTransportState. +const ( + Connected FspipeStatusTransportState = "connected" + Disconnected FspipeStatusTransportState = "disconnected" + Reconnecting FspipeStatusTransportState = "reconnecting" +) + // Defines values for PatchDisplayRequestRefreshRate. const ( N10 PatchDisplayRequestRefreshRate = 10 @@ -259,6 +278,60 @@ type FileSystemEvent struct { // FileSystemEventType Event type. type FileSystemEventType string +// FspipeStartResult Response after starting the fspipe daemon +type FspipeStartResult struct { + // HealthEndpoint URL of the fspipe health/metrics server + HealthEndpoint *string `json:"health_endpoint,omitempty"` + + // MountPath The path where the virtual filesystem is mounted + MountPath string `json:"mount_path"` + + // Running Whether the daemon is now running + Running bool `json:"running"` + + // S3Bucket The S3 bucket being used (if transport_mode is s3) + S3Bucket *string `json:"s3_bucket,omitempty"` + + // TransportMode The transport mode being used + TransportMode FspipeStartResultTransportMode `json:"transport_mode"` + + // WsEndpoint The WebSocket endpoint being used (if transport_mode is websocket) + WsEndpoint *string `json:"ws_endpoint,omitempty"` +} + +// FspipeStartResultTransportMode The transport mode being used +type FspipeStartResultTransportMode string + +// FspipeStatus Status of the fspipe daemon +type FspipeStatus struct { + // MountPath The path where the virtual filesystem is mounted (if running) + MountPath *string `json:"mount_path,omitempty"` + + // Running Whether the daemon is currently running + Running bool `json:"running"` + + // S3Bucket The S3 bucket being used (if transport_mode is s3) + S3Bucket *string `json:"s3_bucket,omitempty"` + + // Stats Transport statistics (messages sent, bytes transferred, etc.) + Stats *map[string]interface{} `json:"stats,omitempty"` + + // TransportMode The transport mode being used (if running) + TransportMode *FspipeStatusTransportMode `json:"transport_mode,omitempty"` + + // TransportState Current transport connection state (if running) + TransportState *FspipeStatusTransportState `json:"transport_state,omitempty"` + + // WsEndpoint The WebSocket endpoint being used (if transport_mode is websocket) + WsEndpoint *string `json:"ws_endpoint,omitempty"` +} + +// FspipeStatusTransportMode The transport mode being used (if running) +type FspipeStatusTransportMode string + +// FspipeStatusTransportState Current transport connection state (if running) +type FspipeStatusTransportState string + // ListFiles Array of file or directory information entries. type ListFiles = []FileInfo @@ -497,6 +570,27 @@ type RecorderInfo struct { StartedAt *time.Time `json:"started_at"` } +// S3Config S3/R2 storage configuration +type S3Config struct { + // AccessKeyId Access key ID + AccessKeyId string `json:"access_key_id"` + + // Bucket Bucket name + Bucket string `json:"bucket"` + + // Endpoint S3-compatible endpoint URL (e.g., "https://ACCOUNT_ID.r2.cloudflarestorage.com") + Endpoint string `json:"endpoint"` + + // Prefix Optional key prefix for uploaded objects + Prefix *string `json:"prefix,omitempty"` + + // Region Region (use "auto" for R2) + Region *string `json:"region,omitempty"` + + // SecretAccessKey Secret access key + SecretAccessKey string `json:"secret_access_key"` +} + // ScreenshotRegion defines model for ScreenshotRegion. type ScreenshotRegion struct { // Height Height of the region in pixels @@ -565,6 +659,22 @@ type StartFsWatchRequest struct { Recursive *bool `json:"recursive,omitempty"` } +// StartFspipeRequest Request to start the fspipe daemon +type StartFspipeRequest struct { + // HealthPort Port for the fspipe health/metrics HTTP server + HealthPort *int `json:"health_port,omitempty"` + + // MountPath Path where the virtual filesystem will be mounted. Defaults to /home/kernel/Downloads. + MountPath *string `json:"mount_path,omitempty"` + + // S3Config S3/R2 storage configuration + S3Config *S3Config `json:"s3_config,omitempty"` + + // WsEndpoint WebSocket endpoint for streaming files (e.g., "ws://listener:9000/fspipe" or "wss://..."). + // Mutually exclusive with s3_config. + WsEndpoint *string `json:"ws_endpoint,omitempty"` +} + // StartRecordingRequest defines model for StartRecordingRequest. type StartRecordingRequest struct { // Framerate Recording framerate in fps (overrides server default) @@ -786,6 +896,9 @@ type UploadZstdMultipartRequestBody UploadZstdMultipartBody // StartFsWatchJSONRequestBody defines body for StartFsWatch for application/json ContentType. type StartFsWatchJSONRequestBody = StartFsWatchRequest +// StartFspipeJSONRequestBody defines body for StartFspipe for application/json ContentType. +type StartFspipeJSONRequestBody = StartFspipeRequest + // ExecutePlaywrightCodeJSONRequestBody defines body for ExecutePlaywrightCode for application/json ContentType. type ExecutePlaywrightCodeJSONRequestBody = ExecutePlaywrightRequest @@ -1002,6 +1115,17 @@ type ClientInterface interface { // WriteFileWithBody request with any body WriteFileWithBody(ctx context.Context, params *WriteFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // StartFspipeWithBody request with any body + StartFspipeWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + StartFspipe(ctx context.Context, body StartFspipeJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetFspipeStatus request + GetFspipeStatus(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // StopFspipe request + StopFspipe(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + // LogsStream request LogsStream(ctx context.Context, params *LogsStreamParams, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -1591,6 +1715,54 @@ func (c *Client) WriteFileWithBody(ctx context.Context, params *WriteFileParams, return c.Client.Do(req) } +func (c *Client) StartFspipeWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewStartFspipeRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) StartFspipe(ctx context.Context, body StartFspipeJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewStartFspipeRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetFspipeStatus(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetFspipeStatusRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) StopFspipe(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewStopFspipeRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) LogsStream(ctx context.Context, params *LogsStreamParams, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewLogsStreamRequest(c.Server, params) if err != nil { @@ -2995,6 +3167,100 @@ func NewWriteFileRequestWithBody(server string, params *WriteFileParams, content return req, nil } +// NewStartFspipeRequest calls the generic StartFspipe builder with application/json body +func NewStartFspipeRequest(server string, body StartFspipeJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewStartFspipeRequestWithBody(server, "application/json", bodyReader) +} + +// NewStartFspipeRequestWithBody generates requests for StartFspipe with any type of body +func NewStartFspipeRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/fspipe/start") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewGetFspipeStatusRequest generates requests for GetFspipeStatus +func NewGetFspipeStatusRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/fspipe/status") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewStopFspipeRequest generates requests for StopFspipe +func NewStopFspipeRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/fspipe/stop") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewLogsStreamRequest generates requests for LogsStream func NewLogsStreamRequest(server string, params *LogsStreamParams) (*http.Request, error) { var err error @@ -3772,6 +4038,17 @@ type ClientWithResponsesInterface interface { // WriteFileWithBodyWithResponse request with any body WriteFileWithBodyWithResponse(ctx context.Context, params *WriteFileParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*WriteFileResponse, error) + // StartFspipeWithBodyWithResponse request with any body + StartFspipeWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*StartFspipeResponse, error) + + StartFspipeWithResponse(ctx context.Context, body StartFspipeJSONRequestBody, reqEditors ...RequestEditorFn) (*StartFspipeResponse, error) + + // GetFspipeStatusWithResponse request + GetFspipeStatusWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetFspipeStatusResponse, error) + + // StopFspipeWithResponse request + StopFspipeWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*StopFspipeResponse, error) + // LogsStreamWithResponse request LogsStreamWithResponse(ctx context.Context, params *LogsStreamParams, reqEditors ...RequestEditorFn) (*LogsStreamResponse, error) @@ -4502,6 +4779,77 @@ func (r WriteFileResponse) StatusCode() int { return 0 } +type StartFspipeResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *FspipeStartResult + JSON400 *BadRequestError + JSON409 *ConflictError + JSON500 *InternalError +} + +// Status returns HTTPResponse.Status +func (r StartFspipeResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r StartFspipeResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetFspipeStatusResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *FspipeStatus + JSON500 *InternalError +} + +// Status returns HTTPResponse.Status +func (r GetFspipeStatusResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetFspipeStatusResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type StopFspipeResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *BadRequestError + JSON500 *InternalError +} + +// Status returns HTTPResponse.Status +func (r StopFspipeResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r StopFspipeResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type LogsStreamResponse struct { Body []byte HTTPResponse *http.Response @@ -5218,6 +5566,41 @@ func (c *ClientWithResponses) WriteFileWithBodyWithResponse(ctx context.Context, return ParseWriteFileResponse(rsp) } +// StartFspipeWithBodyWithResponse request with arbitrary body returning *StartFspipeResponse +func (c *ClientWithResponses) StartFspipeWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*StartFspipeResponse, error) { + rsp, err := c.StartFspipeWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseStartFspipeResponse(rsp) +} + +func (c *ClientWithResponses) StartFspipeWithResponse(ctx context.Context, body StartFspipeJSONRequestBody, reqEditors ...RequestEditorFn) (*StartFspipeResponse, error) { + rsp, err := c.StartFspipe(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseStartFspipeResponse(rsp) +} + +// GetFspipeStatusWithResponse request returning *GetFspipeStatusResponse +func (c *ClientWithResponses) GetFspipeStatusWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetFspipeStatusResponse, error) { + rsp, err := c.GetFspipeStatus(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetFspipeStatusResponse(rsp) +} + +// StopFspipeWithResponse request returning *StopFspipeResponse +func (c *ClientWithResponses) StopFspipeWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*StopFspipeResponse, error) { + rsp, err := c.StopFspipe(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseStopFspipeResponse(rsp) +} + // LogsStreamWithResponse request returning *LogsStreamResponse func (c *ClientWithResponses) LogsStreamWithResponse(ctx context.Context, params *LogsStreamParams, reqEditors ...RequestEditorFn) (*LogsStreamResponse, error) { rsp, err := c.LogsStream(ctx, params, reqEditors...) @@ -6497,23 +6880,136 @@ func ParseWriteFileResponse(rsp *http.Response) (*WriteFileResponse, error) { return response, nil } -// ParseLogsStreamResponse parses an HTTP response from a LogsStreamWithResponse call -func ParseLogsStreamResponse(rsp *http.Response) (*LogsStreamResponse, error) { +// ParseStartFspipeResponse parses an HTTP response from a StartFspipeWithResponse call +func ParseStartFspipeResponse(rsp *http.Response) (*StartFspipeResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - response := &LogsStreamResponse{ + response := &StartFspipeResponse{ Body: bodyBytes, HTTPResponse: rsp, } - return response, nil -} - -// ParseExecutePlaywrightCodeResponse parses an HTTP response from a ExecutePlaywrightCodeWithResponse call + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest FspipeStartResult + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest BadRequestError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 409: + var dest ConflictError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON409 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest InternalError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseGetFspipeStatusResponse parses an HTTP response from a GetFspipeStatusWithResponse call +func ParseGetFspipeStatusResponse(rsp *http.Response) (*GetFspipeStatusResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetFspipeStatusResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest FspipeStatus + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest InternalError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseStopFspipeResponse parses an HTTP response from a StopFspipeWithResponse call +func ParseStopFspipeResponse(rsp *http.Response) (*StopFspipeResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &StopFspipeResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest BadRequestError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest InternalError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseLogsStreamResponse parses an HTTP response from a LogsStreamWithResponse call +func ParseLogsStreamResponse(rsp *http.Response) (*LogsStreamResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &LogsStreamResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseExecutePlaywrightCodeResponse parses an HTTP response from a ExecutePlaywrightCodeWithResponse call func ParseExecutePlaywrightCodeResponse(rsp *http.Response) (*ExecutePlaywrightCodeResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) defer func() { _ = rsp.Body.Close() }() @@ -7140,6 +7636,15 @@ type ServerInterface interface { // Write or create a file // (PUT /fs/write_file) WriteFile(w http.ResponseWriter, r *http.Request, params WriteFileParams) + // Start fspipe daemon to monitor Chrome downloads + // (POST /fspipe/start) + StartFspipe(w http.ResponseWriter, r *http.Request) + // Get fspipe daemon status + // (GET /fspipe/status) + GetFspipeStatus(w http.ResponseWriter, r *http.Request) + // Stop the fspipe daemon + // (POST /fspipe/stop) + StopFspipe(w http.ResponseWriter, r *http.Request) // Stream logs over SSE // (GET /logs/stream) LogsStream(w http.ResponseWriter, r *http.Request, params LogsStreamParams) @@ -7356,6 +7861,24 @@ func (_ Unimplemented) WriteFile(w http.ResponseWriter, r *http.Request, params w.WriteHeader(http.StatusNotImplemented) } +// Start fspipe daemon to monitor Chrome downloads +// (POST /fspipe/start) +func (_ Unimplemented) StartFspipe(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get fspipe daemon status +// (GET /fspipe/status) +func (_ Unimplemented) GetFspipeStatus(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Stop the fspipe daemon +// (POST /fspipe/stop) +func (_ Unimplemented) StopFspipe(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + // Stream logs over SSE // (GET /logs/stream) func (_ Unimplemented) LogsStream(w http.ResponseWriter, r *http.Request, params LogsStreamParams) { @@ -7999,6 +8522,48 @@ func (siw *ServerInterfaceWrapper) WriteFile(w http.ResponseWriter, r *http.Requ handler.ServeHTTP(w, r) } +// StartFspipe operation middleware +func (siw *ServerInterfaceWrapper) StartFspipe(w http.ResponseWriter, r *http.Request) { + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.StartFspipe(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// GetFspipeStatus operation middleware +func (siw *ServerInterfaceWrapper) GetFspipeStatus(w http.ResponseWriter, r *http.Request) { + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetFspipeStatus(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// StopFspipe operation middleware +func (siw *ServerInterfaceWrapper) StopFspipe(w http.ResponseWriter, r *http.Request) { + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.StopFspipe(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + // LogsStream operation middleware func (siw *ServerInterfaceWrapper) LogsStream(w http.ResponseWriter, r *http.Request) { @@ -8504,6 +9069,15 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Put(options.BaseURL+"/fs/write_file", wrapper.WriteFile) }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/fspipe/start", wrapper.StartFspipe) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/fspipe/status", wrapper.GetFspipeStatus) + }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/fspipe/stop", wrapper.StopFspipe) + }) r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/logs/stream", wrapper.LogsStream) }) @@ -9755,6 +10329,108 @@ func (response WriteFile500JSONResponse) VisitWriteFileResponse(w http.ResponseW return json.NewEncoder(w).Encode(response) } +type StartFspipeRequestObject struct { + Body *StartFspipeJSONRequestBody +} + +type StartFspipeResponseObject interface { + VisitStartFspipeResponse(w http.ResponseWriter) error +} + +type StartFspipe200JSONResponse FspipeStartResult + +func (response StartFspipe200JSONResponse) VisitStartFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type StartFspipe400JSONResponse struct{ BadRequestErrorJSONResponse } + +func (response StartFspipe400JSONResponse) VisitStartFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type StartFspipe409JSONResponse struct{ ConflictErrorJSONResponse } + +func (response StartFspipe409JSONResponse) VisitStartFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(409) + + return json.NewEncoder(w).Encode(response) +} + +type StartFspipe500JSONResponse struct{ InternalErrorJSONResponse } + +func (response StartFspipe500JSONResponse) VisitStartFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetFspipeStatusRequestObject struct { +} + +type GetFspipeStatusResponseObject interface { + VisitGetFspipeStatusResponse(w http.ResponseWriter) error +} + +type GetFspipeStatus200JSONResponse FspipeStatus + +func (response GetFspipeStatus200JSONResponse) VisitGetFspipeStatusResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetFspipeStatus500JSONResponse struct{ InternalErrorJSONResponse } + +func (response GetFspipeStatus500JSONResponse) VisitGetFspipeStatusResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type StopFspipeRequestObject struct { +} + +type StopFspipeResponseObject interface { + VisitStopFspipeResponse(w http.ResponseWriter) error +} + +type StopFspipe200Response struct { +} + +func (response StopFspipe200Response) VisitStopFspipeResponse(w http.ResponseWriter) error { + w.WriteHeader(200) + return nil +} + +type StopFspipe400JSONResponse struct{ BadRequestErrorJSONResponse } + +func (response StopFspipe400JSONResponse) VisitStopFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type StopFspipe500JSONResponse struct{ InternalErrorJSONResponse } + +func (response StopFspipe500JSONResponse) VisitStopFspipeResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + type LogsStreamRequestObject struct { Params LogsStreamParams } @@ -10492,6 +11168,15 @@ type StrictServerInterface interface { // Write or create a file // (PUT /fs/write_file) WriteFile(ctx context.Context, request WriteFileRequestObject) (WriteFileResponseObject, error) + // Start fspipe daemon to monitor Chrome downloads + // (POST /fspipe/start) + StartFspipe(ctx context.Context, request StartFspipeRequestObject) (StartFspipeResponseObject, error) + // Get fspipe daemon status + // (GET /fspipe/status) + GetFspipeStatus(ctx context.Context, request GetFspipeStatusRequestObject) (GetFspipeStatusResponseObject, error) + // Stop the fspipe daemon + // (POST /fspipe/stop) + StopFspipe(ctx context.Context, request StopFspipeRequestObject) (StopFspipeResponseObject, error) // Stream logs over SSE // (GET /logs/stream) LogsStream(ctx context.Context, request LogsStreamRequestObject) (LogsStreamResponseObject, error) @@ -11395,6 +12080,85 @@ func (sh *strictHandler) WriteFile(w http.ResponseWriter, r *http.Request, param } } +// StartFspipe operation middleware +func (sh *strictHandler) StartFspipe(w http.ResponseWriter, r *http.Request) { + var request StartFspipeRequestObject + + var body StartFspipeJSONRequestBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode JSON body: %w", err)) + return + } + request.Body = &body + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.StartFspipe(ctx, request.(StartFspipeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "StartFspipe") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(StartFspipeResponseObject); ok { + if err := validResponse.VisitStartFspipeResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetFspipeStatus operation middleware +func (sh *strictHandler) GetFspipeStatus(w http.ResponseWriter, r *http.Request) { + var request GetFspipeStatusRequestObject + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetFspipeStatus(ctx, request.(GetFspipeStatusRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetFspipeStatus") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetFspipeStatusResponseObject); ok { + if err := validResponse.VisitGetFspipeStatusResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// StopFspipe operation middleware +func (sh *strictHandler) StopFspipe(w http.ResponseWriter, r *http.Request) { + var request StopFspipeRequestObject + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.StopFspipe(ctx, request.(StopFspipeRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "StopFspipe") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(StopFspipeResponseObject); ok { + if err := validResponse.VisitStopFspipeResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + // LogsStream operation middleware func (sh *strictHandler) LogsStream(w http.ResponseWriter, r *http.Request, params LogsStreamParams) { var request LogsStreamRequestObject @@ -11811,133 +12575,151 @@ func (sh *strictHandler) StopRecording(w http.ResponseWriter, r *http.Request) { // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9eXMbN/bgV0H1TpWlHV7ykWw0fzm2nGhtxy5L2cwk9PIHdj+S+Kkb6ABoUrTL89m3", - "8IC+0bwkWVZ2qlIxRXYDD3gn3oXPQSiSVHDgWgWnnwMJKhVcAf7xI40+wJ8ZKH0mpZDmq1BwDVybjzRN", - "YxZSzQQf/rcS3HynwgUk1Hz6m4RZcBr8j2E5/tD+qoZ2tC9fvvSCCFQoWWoGCU7NhMTNGHzpBS8En8Us", - "/Fqz59OZqc+5Bslp/JWmzqcjFyCXIIl7sBf8IvQrkfHoK8Hxi9AE5wvMb+5xM9qLmIVXb0WmIMePASCK", - "mHmRxu+lSEFqZuhmRmMFvSCtfPU5mGZaWwjrE+KQxP5KtCDMbAQNNVkxvQh6AfAsCU7/CGKY6aAXSDZf", - "mH8TFkUxBL1gSsOroBfMhFxRGQUfe4FepxCcBkpLxudmC0MD+sR+3Zz+cp0CETOCzxAa4tflrJFYmT+z", - "NHDDeCdYiDiaXMFa+ZYXsRkDSczPZn3mWRJl5lWiF2AnDnoB05Dg+63R3RdUSro2f/MsmeBbbroZzWId", - "nJ60UJklU5BmcZolgJNLSIHq2rxudLPtc0CKu26v4p8kFEJGjFONu1UMQFKhmNuz9kjr9kj/OmSkL71A", - "wp8ZkxAZpFwHZugSEWL632CZ9oUEquElkxBqIdeHUWoiIg+hvEvt6yTKRyfmQXIkQk1jYtHVIzCYD8j3", - "z54dD8hLixnc+O+fPRsEvSCl2rB5cBr83z9G/e8/fn7Se/rlb4GHpFKqF20gnk+ViDMNFSDMg2aGEJfe", - "mGQ4+J/twRu7iTP5NvMlxKDhPdWLw/ZxyxJywCOc5vYB/wAhEtr8MOhZ1Ib9PAKuLTs70pX5JJWVkOdx", - "uqA8S0CykAhJFut0AbyJf9r/9Lz/+6j/Q//j3//mXWx7YUylMV0bNcXme65nASg5W2t6kUkJXJPIjk3s", - "c4RxkrJriJWXsSXMJKjFRFIN24d0TxPztBn450/kKKFrMgXCszgmbEa40CQCDaGm0xiOvZOuWOQjqOZs", - "+NhG+L1bK+n8K2i3SNJ5h2YrNJpVcT49E0FM1zWhP2oK/ZfmEbP6hMUxUxAKHikyBb0C4DkgRqsRyiOi", - "NJXaUW8ilkBoLJxeMtw1QLA4SwygIx9ObqL5zF7spfj8AuWdjEBCRGKmtGHLP657ZP2xqmZSyqQqlqgX", - "UmTzBVktWGyBmDM+H5C3mdLEGFeUcUI1iYEqTR6TVDCu1aAKaRPkyoYk9Prc/voY9678o7majT8qDekE", - "0T1J6mr+2Z4olxBTzZZAzJCqsWpyZBjPIINxppnRbmaw4+2Ix9EmKciJgnni7NHSFhl1GyMFQIgNC1UK", - "krhxzEIK+iNvLRDkpAbRyVYToVM3FGZ0Q+eDUnQOHjJsDJw/6B37GsJMw/uYrlfIxLvKkvpWubcMwYId", - "kZRDktBYJ03xE3pNFmPbXuDfw/9Nl9R+xAEqYw/IpTHBzJcLqggNQ1DILI9SOodHPfIIDxzX+lEPRcaj", - "qRQrBfIRWVLJjLRWgzE/u6ZJGsMpGQd0RZkm5uXBXGhx9GihdapOh0OwzwxCkTw6/geRoDPJSeVxzXQM", - "R8f/GAdj7rOJjBkrMj1RENao7bsWtb2l10g2do3MyF6WoO5x7FFYZ4Qp8t0Iqcu+E5w+GY32ojXc/B3p", - "QSHAe5KDeclwToMKytW16AFyKq8PhcRPHAkbtVvuz4yyGCLfrssC6AZ1LYAsaZyBwyREZLq29jzaxWxG", - "KF8fW2ERgfTAc6Epj6iMCMJLZlIkOEB1YS14lI5EpjcMJjKdZnrX0TIk+PZwvy1AL0CWC3L8EhH3yiyL", - "43U55FSIGChvUUc+gY9AXrEYzvlMtOURU5OIyc1QoQHNFKHlaWDggadnDjQTQ//t4d4YFZegorZuBOST", - "gT1PJ1QHp0FENfTxbc/u+Y9KZln2cDRlWpEjcybqkXEQydW17Jv/xoGxi8dBX676sm/+GwfHA98MnPrg", - "/pEqIOan3A6fmSmF9O7Ezoeq3ORpEwn7BJPpWoOHTi7YJxQs+POAjMisAgYDNdh+nsU1Ouhqk/VyOqjg", - "0G16FzldrJWG5GxZaOQmYhQ+QMIF5XMgYB4ctOTHLuRHZzMIDT/sTIeH4rKY6lCk7kclfkcRbikxvw0q", - "tvuLD2fPL8+CXvDbh3P89+XZmzP88OHsl+dvzzxmfAP5+Guv22B5w5RGvHnWaKxFs7b2jjFuGdiwNHCd", - "E2JhuG7yDRZSyWOCvxHzDtp6TmIxx7nWpeitOCjbRFaxuRpSScwLJWUsj0GXMaA0TVKPZjK63kxfQrSi", - "iqRSRFloqWgX8dZh+VWn9iHsrVjCDU6SNzlRGYt6rxPVNldfeWYCEmZSCUm0OMjVt+tIO7v6zDYf7puK", - "QOnJNh8bKG2ANzyUq4ZtLqpeoGS4bWAlMhnCzmM2DYp8gl5lFb4denf1wcVy9rQ4fwKOrqt3r0keDWpz", - "r7iq2eBaZtCOaUSG+UHlJtNgu7kkrrxreU91uHDurwP5qsP/9bLb71WcAR4/He3vBXvZ6f0akPMZEQnT", - "GqIeyRQoZIsFmy/MuY8uKYvNwcq+YuwJ62pE8nGi1Cmg70a9J6Pe42e9k9FHP4i4tRMWxbAdXzOCXxuQ", - "MwU2YGDMEbJaACexObQvGayMqikcn0MJuExjAITmXO/X/RLQ1zQJF1IkzMD+uXt2fJS8cI8SOtMgK+vP", - "jRdziOUqk0CYJjSiqfW1c1gRA3XtjIc0gXu5ABrNsriHsxXfxB3k2el2fNnpbizI5snj0W7Ox/cSlHoN", - "B1J2lElqgdroGHRPFXrD0BQqEvQGNtxHVRI16B717LNUAtE0Ta0WPdg3WARTkm0q7QrWJDXbQ5TZHB7C", - "YC8N55//jfMVmtHVOpmKGCfHiQbkjIYLYqYgaiGyOCJTILTyLFFZmgqp7Yn3OhJaiHjMjxQA+efJCa5l", - "nZAIZuhVE1wdD4jzkCjCeBhnEZBx8AHPzePAnI0uFmym7ccXWsb20/PYffXq2TgYjK2/0DrImLIOzxAB", - "pLESBspQJFOnspSLRdnx/q7zIxf+hbP9/ZJOcdg9NrQhrXF3vfJaCiPwz64hvDUnGDXLS9BtveZGjnCR", - "qXjdVk1Uzus+0z8+tiP9diQq51kCTf/uVqqiaiKFqPs8/cvInDfT7ge6/ol5laSSLVkMc+gQO1RNMgWe", - "M1hzSKosOZinzVA8i1F75DK+HQ63a/cccXCjUfMISdQC4rjYcqMLMu61xMOVZ6zfhLwyPFweSY5o9Uh2", - "7EZ0/hU7CeO+BWy3uYAvu8nrsy+O4nD2uZX/cMaXTAqOnujCwWlgVaALVey2vrIbJeW3nJT7+SW7Edjt", - "frTo3MqGN/I90irTFQgr1tFmwlwrFfGLNqWZ9eePtRSQ95QB10xP/M5ut1RiHkGHnX8E64qcTL976vdE", - "fPe0D9y8HhH7KJlms5nlrA5X5K6DiUx3D/alG3uvWRwfJkQv2NwoWaRey8MN6q2jTOHjNaEWXJ59eBts", - "HrfqD3GPvz5/8yboBee/XAa94Odf3293g7i5NxDxBzRFD9UmaMZS8v7yX/0pDa8g6t6GUMQekv0FVkSD", - "TJhZeSjiLOFqW1CqF0ix2jaWeWTP6BaO2rOAbtixi5SueHXD4vjdLDj9Y7P7x6O6v/Sa/mkax8Ic7SZa", - "r7drwefuaUJJqiCLRL9Y/dH7y38dNwWrtexREeXpYBjBNBqpQ136kXbuoppNxNkDTXUR5oxgxO2hKG3N", - "ZB47fJq2OPjYwusB8vy84hakUyOQKFFmtE38kPpSYd5dFMg6f+kXte73ie91m+XYp8rwPUSElZk1HiVb", - "eOuyjEV+QUyNOT6h2u8NRG+dxUaVzNxrezgEO1lNU52pPbGRZ64ofNlq2W6plGaTNPSs70xpllBzGHnx", - "/leSodc0BRkC13Re1YIcQ/Bb1OhZrj4Jm9X2akGtbrXbtc1G6QUJJF0hkxJiCQoxTxJIjI1ooS+iKR0a", - "3OtueV/iVNdc9DLj3KDPLhsivy7qRmzE+GFK5yXV1EiylWTWAdogPRutZDzNPBGYiGq6k2ERVWcZbPUe", - "FuN+3LrmG9mLBhyXWKTMcO0Vmic08C4iKRNG8AHiHh8Eu7pU3FIk0DIcto/tdHFGUrqOBTVkmkpQRkLx", - "eYFBF2YWksRsBuE6jF04Td0Um0X4pCQWswqvCQr+aMybOkituJVhBW+K2U6ioRCkdnCmyBhfHAddLGvg", - "92gB6wi3P+dBOtyCcJHxqyrALupf5BLsxsQ2BxSkP8g+Y5ypxW5qo0z0zN/qUhpbz99WH7a/VkXGauX3", - "iomzh5IroXUvHQhsQ3ig8q3C6RMiF6EE4Goh9AeYO7fkLfjpf7b++SLvdu4OjRuyVDs8t7+hx3afgXbM", - "iLdjPTLma9qPYWa4RXKQN8mN32NMb+gs34VevrHbUHaIB1oWiN50sGgRhpdlL0Ipdj/vNqN6saaT682O", - "8J+FZJ8Ex6R9nIvQRGRcD8h7rEAwBw38XhHMteoRDnNa+97gwS/pLARbcnT/j4E43GH+SKy4Z/os9U9+", - "k9CxHftWg8dUk9WChZjkn4I08qc+1f5MsfeQO4eTL0C/wLD0gdFFFkXAt2SR2bB3GVNwL22NibrnOsB+", - "xWJ4b06dSjHB1WHwz6XIUr+jAn9yCTqS/FQ77e2bCeaplfnu6dPj/UpjxIr7/OIGVvwJPeE5vL92wLtL", - "1tBqIRSepfK9teEvG2nBEGR0aNnKhiyuC6OxX6nfqA5vtfCmqIrC04IZfeBP9zR0ypaw3a1TELcbjxTv", - "xusdQv2diQu4Azcs35lJmoA/MP+hNOXyh4z+n6WGQJcgJYtAEWXrMN0OHFcThB+PtvmIvB6TPObp8XVU", - "7DVAUrulIiIEOo/8nvML61vvjkuUcFT98nlJwebd2bghCb3G7ET2Cc752x+7IcBUNuVyKt/+uCNGTkaj", - "etL2joH3Cy3SmxKakCGYcbbzy3mSQMSohnhNlBYpRgNFpslc0hBmWUzUItNG6Q/I5YIpkmD6CB6pGcf4", - "p5RZqiEiSxaBwM3yu0P3qV6zHGwAusPStct1CpdwrQ827G5W+GTMHi3FFaitaQsarn0HLLjGYLTGemF7", - "+l0IDMAnaaarBnlXoqcZty3uzGPMHU+xACI4DV6D5BCT84TOQZHn78+DXrAEqSwoo8HJYISKMAVOUxac", - "Bk8Go8ETl0WKGzbM82yGs5jOc60QetTCW5BzwJwZfNJGqOGaKXR2CA6qR7LUnBlJY1BPps6SUaKyFOSS", - "KSGj3phTHhGs8Mi4ZjFuW/H0S1heChErMg5ipjRwxufjALM2Y8aBMEXEFLnemEszIfNSAxSULqUM0xcM", - "rVgZFwWnNlksn+UVrt+iApT+UUTrvaroG9ye72bDk5svye6hFiTBbXWp73+Mg37/igl1ZdM5+v2IKXPs", - "7s/TbBx8PD48A8MC5Cer8jlzuLdJWGVvh8ejkcdgQ/gtviOs9ymW5pDdLID40gue2pF8Z79ixmGzlcSX", - "XvBsl/fqfRiwKUGWJFSug9PgV0uXBYgxzXi4cEgwwDuY8bWSerM0FjTqw7UGjnZdn/Konz9rcC6URwT8", - "iq8ZljCSMTHkWAxBPrGUUBku2NIwDFxr7GGgF5CQjBsRO1yIBIZXyNnDcurhOBuNnoTGXMVP0BtzBZpI", - "wy9JdQa7KsYPYEOSc+GYf0U2tPt1Viz1OY8+uD3exI5JFmuWUqmH5njXj6immziy3MruNK/yGcOaFv24", - "JxhYNEZihf/qw/trFl6J2OAUDxnmKBrTEFytUY6u/bDeULDP+7/T/qdR/4fBpP/x80nv8bNn/rPQJ5ZO", - "jBXQBvH3kiDzqlaDL2ogS20EvKCAEuqjJFO6SFFLKGczUHpgxOJx1Yc4Zdyw4DadV4Dnij981v5G8VbB", - "7mEy7sTnxy6owZICRD2PmLNcUzAHU0QCje5b4LVEUIHNCpEfUWUEkjquCsFiiU4aOrtlaLujJCKzeeK5", - "7Kvzctn95QaqdJNzsN1e5lAVZkvubSeX3EkE0b2i7YIlWWzzH3Cfa91m/NZkA0foOupGT+G9uiPstLxj", - "uyPnVuavlDL42jZZx9qSKTZlMdPrwoD5ZiyVn1nkkirFquIMbKA5knTe5sRmnBuTPnlkXbg5RdnODj0i", - "nJchXluzeyYkoWZaqW1tf89Mz5vdHuZsCbbKxYmMGKiCwZhf1gpNt/RY8FkBRWONOyLNVuOOQ+WGGegb", - "kRcIii3oQlmGaKKIhwbFGDRuk91FQdodYaBV8HYzye3c5GZl94uFt3m9WlKFy+VxqBRCNmMQVZhA7SLK", - "scZgcgXrLSzuioLKeTByg+zMCy4v3HQD8tr8XMYWKpUNY+6rVxiQVygaDGASFsZ0WELB4JXXe0QBjLkB", - "xl/cQKgmeY+HcM70YCYBIlBXWqQDIefDa/O/VAothtcnJ/ZDGlPGh3awCGaDhRU1zse3EFxIVXXl9GNY", - "QrleRTLlPLih2woVA6TK2d0WCyLyugdctc0dsUOzmOdQbkCEIrV8S4rMqp+qAYp0uQPhqyL82y2qLukV", - "lGHiuzJmWtHuLw5HG60XltA5DFObnVHOtP1I1LJXSgAIDnqvCH1BU51JY5qWCMr9w1vQKeK4W4jZOD5Z", - "ulh3vDaGxVAY3s7j7+Y7XTE/KpK0bshgzyJj7hiWr5WMOQulFki3YTrGSSzmGGbXLLxSttWRTfKw56IK", - "BZEpLOiSGZKma7Kkcv0PojM8MLtGZTkDD8b8N2M/TYVeVJaCA+ZrJZgFYMFIpVgyPGHqUrzhzFbAJ66u", - "TTNc6lExBlpp5QTH1pU6pTpcACYWQ+zSzZwo/C8n2N3hot93zR5/If0+Wn5kRKzbwdqK1vHwXz4JeZGH", - "0++I/SoJHodKR0de38j5zgJT2goWPVQbo821tdxFROadJzqEowuh3BFemhGaQzFjIyXr9FvSWtjlVRvA", - "urHg+gfWQiWeuIKr+70r48FT5/6Vz9r1JpMe9fWrO1znDRdDfDIvQr4Bmp+Oftj+Xr0l9C1GETqWY0hj", - "poa2veqkKGdEMsl8nrJ6C9q7cpf5G90e6hItc0PsOr8h1rUrJRRDlOX253ixPVd3wIttCnvXeGn3zD3Y", - "HVGgxC4xuhlnPd3+Xr3T+K34MRDyamOoJt7y2MUGlL2y8YNvG1uY6PYXQBTio8CRWPFY0Mhw1+QTwwyX", - "OWhfRpXOJFeEkt/P39sUnkrIyVZ4I7pUfrIo3Rq1XlwN/Lv5XzL5O0sxRCZpAhqkwirGnXtj53EwY0Hn", - "i8KCf/PenxmgOLCRvjw9r04DvWr4cVu638e9lLPb1xsdKM2u52ssUnuQsKob/BDp0iGrKkIIzQnNLbmD", - "XpWOdiBYTeXgk9LkSFNZiZcmueMF02HMWMcb6XrMNxA2+V3piIjZDKQiis05tlvkOl6TGVUaZDEh1mXy", - "aMwjqH5lPlMJWMH9iaXuQEzDBYOlgWQKujkKspHfIV/hKrNHD4Wtep/bPTyK5aJ3cEB+ZvMFSPtX0fCN", - "qITGMRToVWSaaaLpFZBY8DnIwZj3LSaUPiX/Nti2Q5CTHnE5hgaxEJGjfz8ZjfrPRiPy9sehOjYvupS0", - "+otPemRKY8pDY0qZN4eIAXL075NnlXct4uqvft/L8Zm/8mzU/1+1l1pgnvTw2+KNx6P+0+KNDoxUqGWC", - "wwRVdJQdAPJPZS2W26qgV/nNgowflK+ybF+p6Lj3RmLx0vH2/2eiUdeXXYhHI78meaqhE4t10VB0ftxV", - "JmxtrvktaNj9bMKy+2WboNDKq7TWfIBk8xPoWnPQvAtAC3sF2cRMabTTVSfdlD1KD1MmD5NSylV7SKU8", - "vsU2lfYB0gqmzyHmbfp5mzaw4WjX8S3v0HmHYefbOLphmLd0dzxAPOEKsCcjJiRuYmYJNCoO3V5e/gA0", - "ckfu3VgZJ8tNQjP+t8LNItSg+2Xt+Y1sCRT9ZnW35hm7J2Ix+C2PMnjrX04cCqygn1RK3jq5u115eHe5", - "Zx0ljodyfGWoPFPsASLyArSn8XcFdUOshlQLlhYYtvmu3UHb53EsVnlaLKZ3Mz63U9i07BicQnBpMBIS", - "4WSAbSw/6EgDz82DW8v7LiySjsTtQzo8V7q1OIN2t57PuUDdNz3apUZvbuO8ufwDd+HWUqMRS0VW9EMX", - "dZ5s6Zmz16rskLs2N1Z9UHS8IL/Zxoe2wINpVfo2W6lhvg7iPuaw3s1bY419ST+qVgZXSleKg7MWu/FB", - "tRrhBqUCm/jhQML+naUlWVcQ+JchclqtQGqQaIvenXNlC8Hv6xrt4osx384Y212kNY/omDdcot31R87H", - "eWvMlXtVvPchNVwvhQrZygy9+2Na8ymd1C+B7i6zLdtyxWBNBFSc5eu2lliyNO8u4mDD6qKYXeEmkX4f", - "n+mX7229c64hL3I83Im4eO728C8uMprk2iE2Vnk6TEfxSaVhxV2dATw9MXbH7YG1vrhsb/fKXzn7MwNf", - "I4eSK1duO7bWxrfPmrhMctvVtvdEbHYxVSe12SvbPkXVSWz4Od/yL67oH2z/jia9ibQkt4aTAh0PztPg", - "/A4FHjf5Hra7GjzN+3JEiTR9+Ii6wI4UZkVYJ+pxHjWRNLT5p52uJNt88ZU6s499RVw13UIarrWF1usP", - "2hYPqF6q5svnvjir9DAsz8IuPxd7r9EIV/05+Gf/4uKs/8LC1r/03jX2FiJGXauJGTHDY1NEl+571BRi", - "x7XIXR6la4k6T1Duy0MkU9zo1i67SjcrdguKNYf5zUlGv5lHdnF4vqwYX7Tl/PyKce+ikdCs6K7V2Vir", - "dgn9d0+fdoGZ2FtlvWBtbMdlmW8XjX9Dd+yB3oy8b+yDV6PoljKaM8+HLFO1YjFXw3Jj/SE6MXfNcDvk", - "cIMg7B1lGyk3FzT5vZVFwwlvc1b/NDMRx2LlzzyodSStNBFrolnweF3UZxA2y+9XY4o40DYwZrdW2Wee", - "ytr9s5UPTFxT3+DeNFpxh+NWVWYI65vWXj7NYIAmYgnSTG0ZJC0uTh66y4S63R9n+W1Dcsq0pHLdunYZ", - "Y6H2TrfyHhd3STahc8q4sidxd1M2cR3Ix1xwEouQxguh9OkPjx8/vp3Lty/t7XCu83bjwmLs4abKO5rd", - "9erFxX4ex0nr3uoXVjvcxcmu8870r1z10HVXt6/dQPdt0PeZKH/Wuit+WF4AbynCQ5yOQaxMQu7oPuhX", - "bga5s9rZ9t0jX5cO2jcmeSigvL7IXY7+LeC943q0OoLxso+tGMYLRu4WxbW7ae4Hx9VrVHyq0N6L8o3h", - "lm5A7ufyxpUvwytWr871Ivo1wzLP7efyyl0um0zCLRe17H5YOAih1YuyvqkGNe9eP8j8AiNKipu+crO1", - "m+LsXbFbac7exfXXobr6vWT/obubJyh13tW2gfhUcQGT9/hbv6bpa9PeHesxuyifCnO/PMgs5cpNSXZ5", - "3aiP2A42DT71l5E6tXup7sl+qlwT5SG+H6vXNj1Yj1up+ew9VpvpUGR6myOu3DyR6Y0euXuSRzfwLHku", - "3drqY2pcp2Vs3OZ9Wv8JoNxBAKVC1SLTDYdZebt/GYT1S1dbOVzeCHWXhdqtTv3dfZu6bny4txLte+pt", - "URR2pxKWDM+Medf/6iUCLay74rJOKZZXn1URvzF6VgStijsHyuyJAcGWSiIxqqLeKSnL++C5qEDxelcg", - "C4WeP4y17daC7aIRN2yYpE9vXE5QuYPEhh5rAq74tf/KXTbXf77x0jcxK+/ka99UNyA/ZVRSrsHmy02B", - "fHj14smTJz8MNkdAaqBc2HyUgyDJL1o9EBADyuPR402MzYwkY3GMN7lJMZegVI+k2MaUaLm2vk8SU1ul", - "WtnuD6Dluv98pn2XCl1k87mtFcVuqo3b2ivt0OXaMkG5iI3X+355wAWnts2VQl4ETNHcQaLEzGqPzvrB", - "/KpGWyRwA8u1qAfYpFBqF0O2k+xb/Jp3kpcFlLdWYEfjuDpsfdtaVxJ4Uu/uWvn6r2Py6t6TTSyaX0X5", - "8DpE4Q4UHRJLuTYg73i8xgKDUtalIMn5SxJSbvsGzpnSICGy7eCMBBm0sSzSTUiuXFJ0Zzj2XIS0v3nl", - "UuHutxmfFmld/eBC/l8AAAD//90+RAzopgAA", + "H4sIAAAAAAAC/+x9aXMbN7boX0H1mypLb7hZsjM3mk+OLU/0YscqSX6ZSejHC3YfkrjqBnoANCna5fnt", + "r3CA3tHcJHmZe6tSMUV2Awc4K86GT0EoklRw4FoFZ58CCSoVXAH+8RONruCfGSh9LqWQ5qtQcA1cm480", + "TWMWUs0EH/6XEtx8p8IFJNR8+pOEWXAW/K9hOf7Q/qqGdrTPnz/3gghUKFlqBgnOzITEzRh87gUvBZ/F", + "LPxSs+fTmakvuAbJafyFps6nI9cglyCJe7AX/Cr0a5Hx6AvB8avQBOcLzG/ucTPay5iFt29FpiDHjwEg", + "iph5kcaXUqQgNTN0M6Oxgl6QVr76FEwzrS2E9QlxSGJ/JVoQZjaChpqsmF4EvQB4lgRnfwQxzHTQCySb", + "L8y/CYuiGIJeMKXhbdALZkKuqIyCD71Ar1MIzgKlJeNzs4WhAX1iv25Of7NOgYgZwWcIDfHrctZIrMyf", + "WRq4YbwTLEQcTW5hrXzLi9iMgSTmZ7M+8yyJMvMq0QuwEwe9gGlI8P3W6O4LKiVdm795lkzwLTfdjGax", + "Ds6etlCZJVOQZnGaJYCTS0iB6tq8bnSz7XNAirtrr+LvJBRCRoxTjbtVDEBSoZjbs/ZI6/ZI/zhkpM+9", + "QMI/MyYhMki5C8zQJSLE9L/AMu1LCVTDKyYh1EKuD6PUREQeQnmX2tdJlI9OzIPkSISaxsSiq0dgMB+Q", + "vzx/fjwgryxmcOP/8vz5IOgFKdWGzYOz4P/9Mer/5cOn096zz38KPCSVUr1oA/FiqkScaagAYR40M4S4", + "9MYkw8H/bg/e2E2cybeZryAGDZdULw7bxy1LyAGPcJqHB/wKQiS0+WHQs6gN+0UEXFt2dqQr80kqKyEv", + "4nRBeZaAZCERkizW6QJ4E/+0//FF//dR/8f+hz//ybvY9sKYSmO6NmqKzfdczwJQcrbW9DKTErgmkR2b", + "2OcI4yRldxArL2NLmElQi4mkGrYP6Z4m5mkz8M8fyVFC12QKhGdxTNiMcKFJBBpCTacxHHsnXbHIR1DN", + "2fCxjfB7t1bS+RfQbpGk8w7NVmg0q+J8eiaCmK5rQn/UFPqvzCNm9QmLY6YgFDxSZAp6BcBzQIxWI5RH", + "RGkqtaPeRCyB0Fg4vWS4a4BgcZYYQEc+nNxH85m92Evx+QXKOxmBhIjETGnDln/c9cj6Q1XNpJRJVSxR", + "L6TI5guyWrDYAjFnfD4gbzOliTGuKOOEahIDVZqckFQwrtWgCmkT5MqGJPTuwv56gntX/tFczcYflYZ0", + "guieJHU1/3xPlEuIqWZLIGZI1Vg1OTKMZ5DBONPMaDcz2PF2xONokxTkRME8cfZoaYuMuo2RAiDEhoUq", + "BUncOGYhBf2RtxYI8rQG0dOtJkKnbijM6IbOB6XoHDxk2Bg4f9A79h2EmYbLmK5XyMS7ypL6Vrm3DMGC", + "HZGUQ5LQWCdN8RN6TRZj217j38P/Q5fUfsQBKmMPyI0xwcyXC6oIDUNQyCxPUjqHJz3yBA8cd/pJD0XG", + "k6kUKwXyCVlSyYy0VoMxP7+jSRrDGRkHdEWZJublwVxocfRkoXWqzoZDsM8MQpE8Of4rkaAzyUnlcc10", + "DEfHfx0HY+6ziYwZKzI9URDWqO2HFrW9pXdINnaNzMhelqDucexRWGeEKfLDCKnLvhOcnY5Ge9Eabv6O", + "9KAQ4D3JwbxkOKdBBeXqWvQAOZXXh0LiJ46Ejdot92dGWQyRb9dlAXSDuhZAljTOwGESIjJdW3se7WI2", + "I5Svj62wiEB64LnWlEdURgThJTMpEhygurAWPEpHItMbBhOZTjO962gZEnx7uN8WoBcgywU5fomIe2WW", + "xfG6HHIqRAyUt6gjn8BHIK9ZDBd8JtryiKlJxORmqNCAZorQ8jQw8MDTMweaiaH/9nBvjIpLUFFbNwLy", + "ycCepxOqg7Mgohr6+LZn9/xHJbMseziaMq3IkTkT9cg4iOTqTvbNf+PA2MXjoC9Xfdk3/42D44FvBk59", + "cP9EFRDzU26Hz8yUQnp3YudDVW7ytImEfYTJdK3BQyfX7CMKFvx5QEZkVgGDgRpsP8/iGh10tcl6OR1U", + "cOg2vYucrtdKQ3K+LDRyEzEKHyDhgvI5EDAPDlryYxfyo7MZhIYfdqbDQ3FZTHUoUvejEr+jCLeUmN8G", + "Fdv95dX5i5vzoBf8dnWB/746f3OOH67Of33x9txjxjeQj7/2ug2W1yplKVwbQ/1Q9YH+XEJn2lhZhcVv", + "9gPHJhGFxKNEFkBjvZgAj9AEbm/J+6s3xc7akewrwwS0ZKEiCn2ZfsGRcT3xo+4ml22rBUjA4ZdM6ozG", + "iEBHwEwRHKRDaWWcm48bKdiu24zExYrkr/goV51Opll4Cx1q8PqU2J/JFMzeZgoiVIBaUq5SIfUExSFT", + "RJ0ee2mu9qB/luIZJ1uLqSr0uIKpEgioAdp7iFypDSg18/wG02scg+TPbV9WMe/xVrdNZZ/ri65RxUZW", + "0JnakwvsSw1q7aD7h6RN3C634uMHINTQOjvi9dclV6Wp3oACLbMWBm4K6jUvM6WNgDhytqgRFVz3rBK1", + "YMxASoh6BHQ4qMBQUsO9WKaJlz34p5zXLGSD+6ucPBScA4YWcPHQNbt7DjlaQv4WIjliqvz1W2ZrH+O+", + "YUqj7eHR01LSteHLttZn3BqhZtuA69yYKpwvm+JbhWXtcSO9EfMO++gFicUc51qXx4dKkK1tKFX8Bg3L", + "WsyLg5Y5PQ+6DrRK0yT1YMycV830JUQrqkgqRZSF1hLaxUTv8F5Up/Yh7K1Ywj28offxCiZiCXt5BbeF", + "q0q/HxjxqYQkWhwUrtp1pJ3DVWabD4+vRKC61FQZJwKlDfCGh/LjzbYwSy9QMtw2sBKZDGHnMZuH4nyC", + "XmUVvh16d5vbr3sq/L8Bx/DLu19IntHQ5l5xW/Mj+dTWBY8M8xsNZQ/yg+1HfnHrXcsl1eHChXAO5KuO", + "GM6r7thN4cc6eTbaP5LzqjOCMyAXMyISprVR0pkyansBZMHmC1Ca0CVlMZ3GYF8xZ2IbLkPycaLUqb0f", + "Rr3TUe/kee/p6IMfRNzaCYti2I4vo8Yy6BmQMwU26G2O1MZi4yRmS2OxwcqomiJ4N5SAyzSH2FCzJfjP", + "rxLw9DQJF1IkzMD+qXt2fJS8dI+601e5/vwArgUBrjIJhGlCI5raeDGHFTFQ1/yUSBO4lwug0SyLezhb", + "8U3cQZ6dobNXnSGzgmxOT0a7BdAuJSj1CxxI2VEmqQVqY3DLPVXoDUNTqEgwotUIgVRJ1KB71LPPUmO4", + "0zS1WvTg+FaREJBsU2m3sCap2R6izObwEAZ7aTj//G9cvMuMrtbJVMQ4OU40IOc0XBAzBVELkcURmQKh", + "lWeJylJj6lmv7V0ktBDxmB8pAPL3p09xLeuERDDDyJDg6nhAnJdfEcbDOIuAjIMr9P2Ogx4ZB9cLNtP2", + "40stY/vpRey+ev18HAzGNuZlgzxM2aBdiADSWAkDZSiSqVNZyuVT2PH+rHO3If6Fs/35hk5x2D02tCGt", + "cXe98loKI/DP7yB8sEAONctLMPS65kaOcJGpeN1WTVTO63G/Pz60s9XsSFTOswSaMcqtVEXVRApRj9v5", + "l5G5iJzdDwxfE/MqSSVbshjm0CF2qJpkCjx+xOaQVFlyME+boXgWo/bIZXw7pcuu3eOmw41GzSMkUQuI", + "42LLjS7IuNcSD1eesX4T8tbwcHkkOaJVt+KxG9HFCOwkjPsWsN3mAr7sJq9PvlwAh7NPrRy+c75kUnCM", + "phZBOgOrAl2oYrf1A+/Ruhlo2y+21o3A7hCaRedWNrxX/IxWma5AWLGONhPmWqmIwbcpzaw/f6ylgLyn", + "DLhjeuIP2LqlEvMIBp38I9hw2mT6wzO/N/2HZ33g5vWI2EfJNJvNLGd1hNN2HUxkunuwz93Y+4XF8WFC", + "9JrNjZJF6rU83KDeOsoUPl4TasHN+dXbYPO4VZ++e/yXizdvgl5w8etN0At+fn+53ZXv5t5AxFdoih6q", + "TdCMpeTy5h/9KQ1vIerehlDEHpL9FVZEg0yYWXko4izhaltiRS+QYrVtLPPInhkaOGrPArphx65TuuLV", + "DYvjd7Pg7I/N7h+P6v7ca8ZYaRwLc7SbaL3ergVfuKcJJamCLBL9YvVHlzf/OG4KVmvZoyLKU5oxC8do", + "pA516UfahcvMaSLOHmiqizBnBCNuD0Vpaybz2OHTtMXBhxZeD5DnFxW3IJ0agUSJMqNt4ofUl8757rpA", + "1sUrv6h1v098r9tM/T5Vhu8hIqzMDvUo2cJbl2Us6vCpG3N8QrXfG4jeOouNKpm51/ZwCHay2iGBldzZ", + "rYoAC90gldJskoae9Z0rzRJqDiMvL9+TDL2mKcgQuKbzqhbkmEa2RY2e5+qTsFltrxbU6la7XdtslF6Q", + "QNIV9i8hlqAQ8ySBxNiIFvoiI6BDg3vdLZclTnUtzFwGeyz4fl3UjdiI8cOUziuqqZFkK8msA7RBejbj", + "hvE082QRRFTTnQyLqDrLYKv3sBj3w9Y138teNOC45Fhlhmuv0DyhgXcRSZn0aANb7vFBsKtLxS1FAi1T", + "Ovaxna7PSUrXsaCGTFMJykgoPi8w6FKlhCQxm0G4DmOXEqLui80ifFISi1mF1wQFfzTmTR2kVu6FYQVv", + "KGwn0VAIUjs4U2SML46DLpY18Hu0gHWE25/zADNuQbjI+G0VYJe5VuTD7cbEto4BpD9RbMY4U4vd1EZZ", + "rJC/1aU0tp6/rT5sf62KqovK79XI9O5KroTWvXQgsA3hgcq3CqdPiFyf7lhj0aCF0+HVCVFaSKMCQhwi", + "d2y2HDwYTZjcwtprXLywebi3sCYXr3xM0xXg/8lG9l1OmcfN0BUjvj7tG0uaamZs1iJG/P7qTZnBl6fy", + "vnj58t37X28mF68G8mQQxiKLZjGV4JY+CEUyDryZA6mEGbvb4E11ztIZu0MrKkuN9IKIWOQof4bqvOk6", + "DmiGobmmaDcPkqNMARnjI+MAZ7k68ac5QChBT0pU+YxA80ieNm0e2UaABQYKHPYaxOCb2EuloQTgaiH0", + "VbEDDxBN+tlGkYoKp7lzbWyoB+qIL/yGcYV9Btqx9tCO9cQcstJ+DDMj0yWvpprtX4W4x5jeAG++C718", + "Y7eh7JA4SUnqm46/LcLwKpbrUIrdvTLN2HOs6eRuc7jmZyHZR8GxPBLnIhTTogbkEms9zXEYv1cEs9p7", + "hMOc1r43ePDrYwvBlmqo/2sgDneYPxIr7pk+S/2T3yfBwY79oCkOVJPVgoVYTpmCNFqyPtX+TLH3kDsn", + "PVyDfonJEwfGwFkUAd+Sr2+TM8rIl3tpa+TePdcB9msWwyXIhCnFBFeHwT+XIkv97jT8yaVCS/K3mk9i", + "35x7T1XyD8+eHe9XhCxW3Be9MbDiTxivyeF93wHvLvnZq4VQeOLP99YGaW08EAPl0aEFwhvy5TGp+rX6", + "jerwQUuci/pzPNOa0Qd+s8XQKVvCdudjQdxuPFK8G693SEjpTK9xO5CyFO4d4rQJDzunladC1mOP/zH6", + "sSW/L4UsI1b+HPOfb24uy0Tzoojrh+fPT59X/ZOjk2deF08j2Te3H4cLkcDwFiSHePhKrLgxQ1XLorzc", + "mgu8YnFsyNglBNfbAvhn2TFJ63QSFoeUjdZAfpjZliXqyRA1e2+PtvbYGEOlnmdljgIxUxo4yLMfR6PR", + "0OIoj9OvlHliMBiMg+PBmL/NzObEawJ3YZwZ6rWx5GIlA1/d3+cuur1ngf9M0gT8aU9X5UE5f8jYrbPU", + "CNYlSMkiyKsbiKOZ4yr1nYy2eeC9/uj8DOTxJFdOw4Ai8oHaDCDQeV7NBb+2kcvuqG8JRzXqmRcdb96d", + "jRuS0DusX2If4YK//akbAkwUVq7q6u1PO2Lk6WhUL+vcMa3pWov0voQmZAhmnO1y/iJJIGJUQ7wmSosU", + "+UNkmswlDWGWxUQtMm2M1QG5WTBFEkzOQ4cl45hdImWWaojIkkUgcLP8waZ9+ltYzWMAesTmFjfrFG7g", + "Th98ILlfawRjrmspbkFtTQrTcOdzX8Ed6kGNHYWsb3EhML0pSTPtq4NqloKZcdtq2jzGnPMPS6SDs+AX", + "VBnkIsHiiReXF0EvWIJUFpTR4OlghAZcCpymLDgLTgejwamrM8MNG+ZZjMNZTOe5NRN6zJm3IOeAGYn4", + "pJXZcMcUupIFB9UjWRoZIdkY1JMHuWSUqCwFuWRKyKg35pRHBGvAM65ZjNtWPP0KljdCxIqMA6tnGJ+P", + "A8yJjxnHEgUxRa43Zv5MyLwYGQWlS9hFpWJoxcq4yKrtcJHP8hrXb1EBSv8kovVefbYa3J7vZiNOli/J", + "7qEWJMFtdcr0j3HQ798yoW5tsly/HzFFpzH052k2Dj4cH57fZgHyk1X5nJYZ2BTXsvvbyWjkOWgg/Bbf", + "EXYEKJbmkN0skf7cC57ZkXxWSjHjsNls7nMveL7Le/VObdi2LEsSKtfBWfDe0mUBYkwzHi4cEgzwDmZ8", + "raRe63Xsw50GjueRPuVRP3/W4FwoX0EkvmZYwkjGxJBjMQT5yFJCZbhgS8MwcKexy5leQEIybkRszRgs", + "px6Os9HoNDTHLPwEvTFXoIk0/JJUZ7CrYvwANiQ5F475F2RDu1/nxVJf8OjK7fEmdkyyWLOUSj2cCZn0", + "I6rpJo4st7I7ibZ8xrCmRT/uCaZtGCOxwn/14f1Vza9FbHCKh2MtSBrTEFw3ghxd+2G9oWBf9H+n/Y+j", + "/o+DSf/Dp6e9k+fP/Wf4jyydGCugDeLvJUHmfW8MvqiBLLX5RQUFlFAfJZnSRQJwQjmbgdIDIxaPqxGa", + "KeOGBbfpvAI8Vx7uO6VuFG8V7B4m4576ooQFNeTxh55HzFmuKZiDKSKBRl9b4LVEUIHNCpEfUWUEkjqu", + "CsFiiU4aOrtlaPsnJiKzVTi57Kvzctkf8h6qdNMxtt2A8lAVZpty2V6PuXMToq+KtmuWZLHNLsN9rvWj", + "9FuTDRyhy7MbPYXX9ZGw0/Lq7o6cB5m/Uijma+xqHcJLptiUxUyvCwPmm7FUfmaRS1kXq4oTu4HmSNJ5", + "mxObWUSYUs8jG3rIKcr2fusR4bwM8dqa3TMhCTXTSm27f/XM9LzZD27OlmBrCJ3IiIEqGIz5Ta0VzZYu", + "bD4roGi990ik2Wrtd6jcMAN9I/ICQbHlsijLEE0U8dCgGIPGbbK7KPd9JAy0yonvJ7ldeMes7Oti4W1e", + "DZxU4XJZciqFkM0YRBUmULuIcqzgyrMNNrC4K7ks58GII7IzL7i8cNMNyC/m5zImVqkbG3NfNdiAvEbR", + "YACTsDCmwxIKBq+83iMKYMwNMP7SMUI1yVNHwjnTg5kEiEDdapEOhJwP78z/Uim0GN49fWo/pDFlfGgH", + "i2A2WFhR43x8C8GFVFVXTj+GJZTrVSRTzoMbuq1QMUCqnN1tsSAir3vA1TI+Ejs0SyUP5QZEKFLLt6TI", + "rPqpGqBIlzsQvirSFrpF1Q29hTK94bGMmVaWxmeHo43WC0voHIapzX0rZ9p+JGrZKyUABAf9qgh9SVOd", + "SWOalgjK/cNb0CniuFuI2fwTsnQ5GvHaGBZDYXg7zxsx3+mK+VGRpHVDBruaGnPHsHytINdZKLUEEBte", + "ZpzEYo7pIZqFt8o2Q7XJSfZcVKEgMoUFXTJD0nRNllSu/0p0hgdm18o4Z+DBmP9m7Kep0IvKUnDAfK0E", + "s1csGKkUS4YnTF2KN5zZCvjEVQ1rhks9KsZAK62c4Ni6UqdUhwvAsg2IXTKvE4X/6QS7O1z0+64d/K+k", + "30fLj4yIdTtYW9E6Hv7TJyGv8zSQR2K/SmLSodLRkdc3cr6zwJS2gkUP1cZoc43vdxGReW+6DuHoQiiP", + "hJdmhOZQzNhIyTr9lrQW3gOhDWDdWHAdxmuhEk9cwXVVeCzjwdNF5Auftett6D3q6707XOct2euZ0PdA", + "87PRj9vfq18a84BRhI7lGNKYqaG9gGFSFIsjmWQ+T1n9korHcpf5r8I41CVa5jTZdX5DrGtXSiiGKMvt", + "z/Fib2XYAS/22ojHxkv7Vo2D3REFSuwSo/tx1rPt79XvInoQPwZCXm0d28RbHrvYgLLXNn7wbWMLEzT/", + "DRCF+Chw5FLWDHdNPjLMcJn7qlJsmxhFKPn94tKm8FRCTrZ/BqKr6NFZujVq3Xob+Hfzv2Lyd5ZiiEzS", + "BDRIhTXiO9+ek8fBjAWdLwrbqZj3/pkBigMb6cvTSus00KuGH7elqX7YSzm7fb3XgdLser7GIrUHCau6", + "wd8jXTpkVUUIoTmhuSV30KvS0Q4Eq6kcfFSaHGkqK/HSJHe8YDqMGet4I12P+QbCJr8rHRExm4FURLE5", + "x4bs2Od1RpUGWUyIVe88GvMIql9h5ZYE7I/xkaXuQEzDBYOlgWQKujkKspHfIV/hKrNH3wtb9T61OyQV", + "y0Xv4ID8zOYLkPavop0mUQmNYyjQq8g000TTWyCx4HOQgzHvW0wofUb+ZbBthyBPe8TlGBrEQkSO/nU6", + "GvWfj0bk7U9DdWxedClp9RdPe2RKY8pDY0qZN4eIAXL0r6fPK+9axNVf/Usvx2f+yvNR/z9qL7XAfNrD", + "b4s3Tkb9Z8UbHRipUMsEhwmq6ChzpfNPZaWr26qgV/nNgowflK9ud1+p6Lj3XmLxxvH2fzPRqOvLLsSj", + "kV+TPNXQicW6aCj66u4qE7a23/8WNOx+NmHZW7hNUGjlVRoXf4dk8zfQtdbLeY+VFvYKsomZ0minq066", + "KTtAH6ZMvk9KKVftIZXy+BbbVNrvkFYwfc5WhWD6eZs2sJ1z1/Et73/8iGHnhzi6YZi3dHd8h3jCFWDH", + "W0xI3MTMEmhUHLq9vHwFNHJH7t1YGSfLTUIz/rfCzSLUoPtlZ4972RIo+s3qHswz9pWIxeC3PMrgveA5", + "cSiwgn5SKdXs5O52xezj5Z51lOYeyvGVofJMse8QkdegPdcqVFA3xCpetWBpgWGb79odtH0Rx2KVp8Vi", + "ejfjczuFTcuOwSkElwYjIRFOBtiKyEFHGnhuHjxY3ndhkXQkbh/SP7/SC8sZtLsVa+YCdd/0aJcavblJ", + "/ubyD9yFB0uNRiyVXVm+c1HnyZaeOXutyg65a3Nj1QdFxwvym20raws8mFalb7OVGua7n8HHHNa7+WCs", + "sS/pR9WK9krpSnFw1mI3PqhWI9yjVGATPxxI2L+ztCTrCgL/bYicViuQGiTaonfnXNlC8Pu6Rrv4Ysy3", + "M8Z2F2nNIzrmDZdod/2R83E+GHPlXhX/jVQNj1OuQrYyQ+/rMa35lE5KuttcZls2PYzBmgioOMvXbS2x", + "ZGneFcfBhtVFMbvFTSL9Pj7TL9/beit1Q17keHgUcfHC7eG/uchokmuH2Fjl6TAdxSeVRiuPdQbw9HLZ", + "HbcH1vrisr3t+95z9s8MfI0cSq5cue3YWhvfPmviMslDV9t+JWKzi6k6qc1e2bY/qk5iw0/5ln92Rf9g", + "+3c06U2kJbk1nBToeHCeBud3KPC4yfew3dXgaY2aI0qk6fePqGvsSGFWhHWiHudRE0lDm3/a6UqyrW1f", + "q3P72BfEVdMtpOFOW2i9/qBt8YDqtcu+fO7r80qH2Ep3IMgXvgAa4ao/BX/vX1+f919a2Po33tuI30LE", + "qGs1MSNmeGw569J9j5pC7LgWucujdC1R5wnKff4eyRQ3urXLrtLNit2CYs1hfnOS0W/mkV0cnq8qxhdt", + "OT+/YNy7aCQ0K7rCdTaEq/Wj+uHZsy4w3RW9XrA2tpGzzLeLxr+nO/ZAb0belfu7V6PoljKaM8+HrKZq", + "pSyF4Zb2FWhDqXYLN9cJ0o6rCC3anL1+f31e5TO9oNrJOWVpD9nLnu9o7gqsnlGOyp5jeSMxswRspHw8", + "IDfl5cfYRw2rN2zWfKF8xtyCx5S9QkyBJlTZWnZ4ooq8k2q6jf3RVuznVhQeVs3aMY3UvTTmpVWSp/1i", + "iceY39TuFlZnY94v79g9I5e2rINUOq4d2DnNDIwbUg5adExzzXjcBD13r3OP1BoK90irn7BtXVAUCdq2", + "skPbd9lX7lH2CXxc+73aifALZ7W3L9r3BXlqfPHwBvhXSW/HNTdYHu+65UwLmTNLVDRBbIgUd2nIxjQ6", + "V9DeuCqkLWlsXxFjXq5cy0umn6j8ruyeLTItGbFxwbel6sZt2/aqcZ/752+ga1fKfwniMvPsQlf43ANl", + "c8y8g9eQKNJNakGkFoMODfUBeyTjrqrO3wLTYSUWrmJ5zH03ow82SWwj1LHbGLMFr67VUKZLqhxz1BBc", + "uJQ9Ms0vZQBvja89IxYSbWv6dmMPH/ZA9zBHs3bvVURyLOZqWFpV/vwcMXf3jHQcwhrWoL3+eaPZmp8y", + "nH1bdpvy3nvhn2Ym4lis/GmHtcseKp1vm5gTPF4XxZmEzfKrqw0pWdA2WOXdR8p95qms3T9b+cDE3ZcS", + "fLXjbHE9/tZzrCGsb/ro6jsWGqCJWII0U1sGSWO6XmEH+qG7p7VbGJ7nF7nKKdOSyjW5LN52d05xY3rj", + "ddnlFZmImjtN6JwyrqwbfirFSoHMpeqYC05iEdJ4IZQ++/Hk5MQawDjqgqr8kgktyJOUzuFJjzxx4z6x", + "MvaJG/JJee2nK3+WxaVGOh+xBA7NYKOnDd3y/KJue2e6R3K6LSjX/dIeDR/DLGzN9ZWMQw8cXSbiy3Jz", + "vyUNkRNuuQSs571GyC1FeIjTMYiVScgd3V7+yqWLj9Y4o32t45elg/ZltB4KKG+Gle6ZbwDvHTdP1xGM", + "9yhuxTDe3fi4KK5d+/l1cFy9odKnCu2Vk98YbukG5H4qL7P8PLxl9dYcXkT/wrDHw3anfOWazE0m4ZY7", + "MHf3FB6E0OodxN9Ud7p3v3yXyYVGlBSXKOdmazfFSby/eCvN2WuO/32orn7l8//Q3f2zkzuvwd5AfC03", + "lV+x5X6gL0t7j6zHOp1O14Wb6TssUapcQlu4szpQH7EdbBp86t9G6tSu/P1K9lPlBl4P8f1UvRH3uw23", + "lZrPXhG8mQ5Fprc54srNE5ne6JH7SvLoHp4lz33GW31MjZuKjY3bvKr4f7InHiF7okLVItMNh1lxCdCw", + "zMDyS1fbNqS8bPcxu7S0runpbtrYdd3TV+vP8pUif0VXl1TCkuGZMb/yp3qDUAvrLujSKcXy0vMq4jem", + "zrwrI9HuwqEydXJAsJ+iSIyqqLdJzPImuC4qULzelcVib9f15rBsu7Jou2jEDRsm6bN71xJWLiCzeUc1", + "AVf82n/t7vHuv9h4n7aYldedty8BH5C/ZVRSrsEmy0+BXL1+eXp6+uNgcwSkBsq1jYUfBImLox8KiAHl", + "ZHSyibGZkWQsjvH6YSnmEpTqkRR7mBMt19b3SWJqW1RUtvsKtFz3X8y07ybM62w+t40isJU63sdSuQ6t", + "vAtFri0TlIvYdBva96g3im4TtselQl4ErM/YQaLEzGqPzuYB+S34947SF8WAmxRK7c79doVdi1/za2Rk", + "AeWDVdfTOK4OW9+2VkKXJ23nsZWv/y5Gr+59uolF81v+v9f8mbw9cinXBuQdj9dYXVjKuhQkuXhFQspt", + "0+A5UxokRDarzUiQQRvL9fSMdiLD4+PYcwvi/uaVS5v4NjIliv3Fhfz/AAAA//9KpX3sB7sAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/server/lib/policy/policy.go b/server/lib/policy/policy.go index 3eda5332..6a114b00 100644 --- a/server/lib/policy/policy.go +++ b/server/lib/policy/policy.go @@ -26,6 +26,8 @@ type Policy struct { DefaultNotificationsSetting int `json:"DefaultNotificationsSetting"` ExtensionInstallForcelist []string `json:"ExtensionInstallForcelist,omitempty"` ExtensionSettings map[string]ExtensionSetting `json:"ExtensionSettings"` + DownloadDirectory string `json:"DownloadDirectory,omitempty"` + PromptForDownloadLocation *bool `json:"PromptForDownloadLocation,omitempty"` } // ExtensionSetting represents settings for a specific extension @@ -249,3 +251,40 @@ func ExtractExtensionIDFromUpdateXML(updateXMLPath string) (string, error) { return appID, nil } + +// SetDownloadDirectory sets the download directory in Chrome enterprise policy. +// This forces Chrome to use the specified directory for all downloads, regardless +// of user preferences. Set disablePrompt to true to prevent "Save As" dialogs. +func (p *Policy) SetDownloadDirectory(downloadDir string, disablePrompt bool) error { + p.mu.Lock() + defer p.mu.Unlock() + + policy, err := p.readPolicyUnlocked() + if err != nil { + return err + } + + policy.DownloadDirectory = downloadDir + + promptValue := !disablePrompt + policy.PromptForDownloadLocation = &promptValue + + return p.writePolicyUnlocked(policy) +} + +// ClearDownloadDirectory removes the download directory setting from policy, +// allowing Chrome to use the user's default download location. +func (p *Policy) ClearDownloadDirectory() error { + p.mu.Lock() + defer p.mu.Unlock() + + policy, err := p.readPolicyUnlocked() + if err != nil { + return err + } + + policy.DownloadDirectory = "" + policy.PromptForDownloadLocation = nil + + return p.writePolicyUnlocked(policy) +} diff --git a/server/openapi.yaml b/server/openapi.yaml index cb7cd4bd..386fe5bc 100644 --- a/server/openapi.yaml +++ b/server/openapi.yaml @@ -1091,6 +1091,74 @@ paths: $ref: "#/components/responses/BadRequestError" "500": $ref: "#/components/responses/InternalError" + + /fspipe/start: + post: + summary: Start fspipe daemon to monitor Chrome downloads + description: | + Starts the fspipe daemon which creates a virtual FUSE filesystem that streams file writes + to a remote destination (WebSocket listener or S3/R2). The daemon will mount at a directory + which is also set as Chrome's download directory. Chrome is restarted with the new download + directory configured. + + Transport modes: + - WebSocket: Provide ws_endpoint (e.g., "ws://listener:9000/fspipe" or "wss://...") + - S3/R2: Provide s3_config with endpoint, bucket, access_key_id, secret_access_key, and optional region/prefix + operationId: startFspipe + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/StartFspipeRequest" + responses: + "200": + description: Fspipe daemon started successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FspipeStartResult" + "400": + $ref: "#/components/responses/BadRequestError" + "409": + description: Fspipe daemon is already running + $ref: "#/components/responses/ConflictError" + "500": + $ref: "#/components/responses/InternalError" + + /fspipe/stop: + post: + summary: Stop the fspipe daemon + description: | + Stops the running fspipe daemon, unmounts the virtual filesystem, and closes the + transport connection. Chrome's download directory setting is preserved but downloads + will no longer be streamed. + operationId: stopFspipe + responses: + "200": + description: Fspipe daemon stopped successfully + "400": + description: Fspipe daemon is not running + $ref: "#/components/responses/BadRequestError" + "500": + $ref: "#/components/responses/InternalError" + + /fspipe/status: + get: + summary: Get fspipe daemon status + description: | + Returns the current status of the fspipe daemon including whether it's running, + the configured transport mode, and connection statistics. + operationId: getFspipeStatus + responses: + "200": + description: Fspipe daemon status + content: + application/json: + schema: + $ref: "#/components/schemas/FspipeStatus" + "500": + $ref: "#/components/responses/InternalError" components: schemas: StartRecordingRequest: @@ -1722,6 +1790,113 @@ components: type: string description: Standard error from the execution additionalProperties: false + + S3Config: + type: object + description: S3/R2 storage configuration + required: [endpoint, bucket, access_key_id, secret_access_key] + properties: + endpoint: + type: string + description: S3-compatible endpoint URL (e.g., "https://ACCOUNT_ID.r2.cloudflarestorage.com") + bucket: + type: string + description: Bucket name + access_key_id: + type: string + description: Access key ID + secret_access_key: + type: string + description: Secret access key + region: + type: string + description: Region (use "auto" for R2) + default: "auto" + prefix: + type: string + description: Optional key prefix for uploaded objects + additionalProperties: false + + StartFspipeRequest: + type: object + description: Request to start the fspipe daemon + properties: + mount_path: + type: string + description: Path where the virtual filesystem will be mounted. Defaults to /home/kernel/fspipe-downloads. + pattern: "^/.*" + default: "/home/kernel/fspipe-downloads" + ws_endpoint: + type: string + description: | + WebSocket endpoint for streaming files (e.g., "ws://listener:9000/fspipe" or "wss://..."). + Mutually exclusive with s3_config. + s3_config: + $ref: "#/components/schemas/S3Config" + health_port: + type: integer + description: Port for the fspipe health/metrics HTTP server + minimum: 1024 + maximum: 65535 + default: 8090 + additionalProperties: false + + FspipeStartResult: + type: object + description: Response after starting the fspipe daemon + required: [running, transport_mode, mount_path] + properties: + running: + type: boolean + description: Whether the daemon is now running + transport_mode: + type: string + enum: [websocket, s3] + description: The transport mode being used + mount_path: + type: string + description: The path where the virtual filesystem is mounted + ws_endpoint: + type: string + description: The WebSocket endpoint being used (if transport_mode is websocket) + s3_bucket: + type: string + description: The S3 bucket being used (if transport_mode is s3) + health_endpoint: + type: string + description: URL of the fspipe health/metrics server + additionalProperties: false + + FspipeStatus: + type: object + description: Status of the fspipe daemon + required: [running] + properties: + running: + type: boolean + description: Whether the daemon is currently running + transport_mode: + type: string + enum: [websocket, s3] + description: The transport mode being used (if running) + transport_state: + type: string + enum: [connected, reconnecting, disconnected] + description: Current transport connection state (if running) + mount_path: + type: string + description: The path where the virtual filesystem is mounted (if running) + ws_endpoint: + type: string + description: The WebSocket endpoint being used (if transport_mode is websocket) + s3_bucket: + type: string + description: The S3 bucket being used (if transport_mode is s3) + stats: + type: object + description: Transport statistics (messages sent, bytes transferred, etc.) + additionalProperties: true + additionalProperties: false responses: BadRequestError: description: Bad Request