diff --git a/main.go b/main.go index fb5e2e8..a6d549f 100644 --- a/main.go +++ b/main.go @@ -5,7 +5,6 @@ import ( "fmt" "log" "os" - "os/exec" "sort" "strings" "time" @@ -230,18 +229,13 @@ func runIt(recipe playground.Recipe) error { return nil } - // validate that override is being applied to a service in the manifest - for k := range overrides { - if _, ok := svcManager.GetService(k); !ok { - return fmt.Errorf("service '%s' in override not found in manifest", k) - } + if err := svcManager.ApplyOverrides(overrides); err != nil { + return err } cfg := &playground.RunnerConfig{ Out: artifacts.Out, Manifest: svcManager, - Overrides: overrides, - Interactive: interactive, BindHostPortsLocally: !bindExternal, NetworkName: networkName, Labels: labels, @@ -249,9 +243,14 @@ func runIt(recipe playground.Recipe) error { Platform: platform, } + if interactive { + i := playground.NewInteractiveDisplay(svcManager) + cfg.Callback = i.HandleUpdate + } + // Add callback to log service updates in debug mode if logLevel == playground.LevelDebug { - cfg.Callback = func(serviceName, update string) { + cfg.Callback = func(serviceName string, update playground.TaskStatus) { log.Printf("[DEBUG] [%s] %s\n", serviceName, update) } } @@ -296,7 +295,7 @@ func runIt(recipe playground.Recipe) error { fmt.Printf("\nWaiting for network to be ready for transactions...\n") networkReadyStart := time.Now() - if err := playground.CompleteReady(ctx, dockerRunner.Instances()); err != nil { + if err := playground.CompleteReady(ctx, svcManager.Services); err != nil { dockerRunner.Stop() return fmt.Errorf("network not ready: %w", err) } @@ -318,7 +317,7 @@ func runIt(recipe playground.Recipe) error { watchdogErr := make(chan error, 1) if watchdog { go func() { - if err := playground.RunWatchdog(artifacts.Out, dockerRunner.Instances()); err != nil { + if err := playground.RunWatchdog(artifacts.Out, svcManager.Services); err != nil { watchdogErr <- fmt.Errorf("watchdog failed: %w", err) } }() @@ -345,26 +344,3 @@ func runIt(recipe playground.Recipe) error { } return nil } - -func isExecutableValid(path string) error { - // First check if file exists - _, err := os.Stat(path) - if err != nil { - return fmt.Errorf("file does not exist or is inaccessible: %w", err) - } - - // Try to execute with a harmless flag or in a way that won't run the main program - cmd := exec.Command(path, "--version") - // Redirect output to /dev/null - cmd.Stdout = nil - cmd.Stderr = nil - - if err := cmd.Start(); err != nil { - return fmt.Errorf("cannot start executable: %w", err) - } - - // Immediately kill the process since we just want to test if it starts - cmd.Process.Kill() - - return nil -} diff --git a/playground/components.go b/playground/components.go index 8606228..0557b87 100644 --- a/playground/components.go +++ b/playground/components.go @@ -358,13 +358,13 @@ func (o *OpGeth) Apply(manifest *Manifest) { WithArtifact("/data/p2p_key.txt", o.Enode.Artifact) } -func opGethReadyFn(ctx context.Context, instance *instance) error { - opGethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) +func opGethReadyFn(ctx context.Context, service *Service) error { + opGethURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) return waitForFirstBlock(ctx, opGethURL, 60*time.Second) } -func opGethWatchdogFn(out io.Writer, instance *instance, ctx context.Context) error { - gethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) +func opGethWatchdogFn(out io.Writer, service *Service, ctx context.Context) error { + gethURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) return watchChainHead(out, gethURL, 2*time.Second) } @@ -417,7 +417,7 @@ func (r *RethEL) Apply(manifest *Manifest) { "--chain", "/data/genesis.json", "--datadir", "/data_reth", "--color", "never", - "--ipcpath", "/data_reth/reth.ipc", + "--ipcdisable", "--addr", "0.0.0.0", "--port", `{{Port "rpc" 30303}}`, // "--disable-discovery", @@ -441,12 +441,12 @@ func (r *RethEL) Apply(manifest *Manifest) { logLevelToRethVerbosity(manifest.ctx.LogLevel), ). WithRelease(rethELRelease). - WithWatchdog(func(out io.Writer, instance *instance, ctx context.Context) error { - rethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) + WithWatchdog(func(out io.Writer, service *Service, ctx context.Context) error { + rethURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) return watchChainHead(out, rethURL, 12*time.Second) }). - WithReadyFn(func(ctx context.Context, instance *instance) error { - elURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) + WithReadyFn(func(ctx context.Context, service *Service) error { + elURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) return waitForFirstBlock(ctx, elURL, 60*time.Second) }). WithArtifact("/data/genesis.json", "genesis.json"). @@ -579,8 +579,8 @@ func (m *MevBoostRelay) Apply(manifest *Manifest) { } } -func mevboostRelayWatchdogFn(out io.Writer, instance *instance, ctx context.Context) error { - beaconNodeURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) +func mevboostRelayWatchdogFn(out io.Writer, service *Service, ctx context.Context) error { + beaconNodeURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) watchGroup := newWatchGroup() watchGroup.watch(func() error { @@ -633,8 +633,8 @@ func (o *OpReth) Apply(manifest *Manifest) { "--addr", "0.0.0.0", "--port", `{{Port "rpc" 30303}}`). WithRelease(opRethRelease). - WithWatchdog(func(out io.Writer, instance *instance, ctx context.Context) error { - rethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) + WithWatchdog(func(out io.Writer, service *Service, ctx context.Context) error { + rethURL := fmt.Sprintf("http://localhost:%d", service.MustGetPort("http").HostPort) return watchChainHead(out, rethURL, 2*time.Second) }). WithArtifact("/data/jwtsecret", "jwtsecret"). diff --git a/playground/components_test.go b/playground/components_test.go index d50933b..8c114f6 100644 --- a/playground/components_test.go +++ b/playground/components_test.go @@ -19,15 +19,15 @@ func TestRecipeOpstackSimple(t *testing.T) { tt := newTestFramework(t) defer tt.Close() - tt.test(&OpRecipe{}) + tt.test(&OpRecipe{}, nil) } func TestRecipeOpstackExternalBuilder(t *testing.T) { tt := newTestFramework(t) defer tt.Close() - tt.test(&OpRecipe{ - externalBuilder: "http://host.docker.internal:4444", + tt.test(&OpRecipe{}, []string{ + "--external-builder", "http://host.docker.internal:4444", }) } @@ -36,8 +36,8 @@ func TestRecipeOpstackEnableForkAfter(t *testing.T) { defer tt.Close() forkTime := uint64(10) - manifest := tt.test(&OpRecipe{ - enableLatestFork: &forkTime, + manifest := tt.test(&OpRecipe{}, []string{ + "--enable-latest-fork", "10", }) elService := manifest.MustGetService("op-geth") @@ -49,15 +49,15 @@ func TestRecipeL1Simple(t *testing.T) { tt := newTestFramework(t) defer tt.Close() - tt.test(&L1Recipe{}) + tt.test(&L1Recipe{}, nil) } func TestRecipeL1UseNativeReth(t *testing.T) { tt := newTestFramework(t) defer tt.Close() - tt.test(&L1Recipe{ - useNativeReth: true, + tt.test(&L1Recipe{}, []string{ + "--use-native-reth", }) } @@ -65,7 +65,7 @@ func TestComponentBuilderHub(t *testing.T) { tt := newTestFramework(t) defer tt.Close() - tt.test(&BuilderHub{}) + tt.test(&BuilderHub{}, nil) // TODO: Calling the port directly on the host machine will not work once we have multiple // tests running in parallel @@ -83,7 +83,7 @@ func newTestFramework(t *testing.T) *testFramework { return &testFramework{t: t} } -func (tt *testFramework) test(s ServiceGen) *Manifest { +func (tt *testFramework) test(s ServiceGen, args []string) *Manifest { t := tt.t // use the name of the repo and the current timestamp to generate @@ -104,13 +104,14 @@ func (tt *testFramework) test(s ServiceGen) *Manifest { } o := &output{ - dst: e2eTestDir, + dst: e2eTestDir, + homeDir: filepath.Join(e2eTestDir, "artifacts"), } if recipe, ok := s.(Recipe); ok { // We have to parse the flags since they are used to set the // default values for the recipe inputs - err := recipe.Flags().Parse([]string{}) + err := recipe.Flags().Parse(args) require.NoError(t, err) _, err = recipe.Artifacts().OutputDir(e2eTestDir).Build() @@ -142,7 +143,7 @@ func (tt *testFramework) test(s ServiceGen) *Manifest { require.NoError(t, err) require.NoError(t, dockerRunner.WaitForReady(context.Background(), 20*time.Second)) - require.NoError(t, CompleteReady(context.Background(), dockerRunner.Instances())) + require.NoError(t, CompleteReady(context.Background(), svcManager.Services)) return svcManager } diff --git a/playground/interactive.go b/playground/interactive.go new file mode 100644 index 0000000..a6c7027 --- /dev/null +++ b/playground/interactive.go @@ -0,0 +1,112 @@ +package playground + +import ( + "fmt" + "sync" + + "github.com/charmbracelet/bubbles/spinner" + "github.com/charmbracelet/lipgloss" +) + +type InteractiveDisplay struct { + manifest *Manifest + taskUpdateCh chan struct{} + status sync.Map +} + +type taskUI struct { + tasks map[string]string + spinners map[string]spinner.Model + style lipgloss.Style +} + +func NewInteractiveDisplay(manifest *Manifest) *InteractiveDisplay { + i := &InteractiveDisplay{ + manifest: manifest, + taskUpdateCh: make(chan struct{}), + } + + go i.printStatus() + return i +} + +func (i *InteractiveDisplay) HandleUpdate(serviceName string, status TaskStatus) { + i.status.Store(serviceName, status) + + select { + case i.taskUpdateCh <- struct{}{}: + default: + } +} + +func (i *InteractiveDisplay) printStatus() { + fmt.Print("\033[s") + lineOffset := 0 + + // Get ordered service names from manifest + orderedServices := make([]string, 0, len(i.manifest.Services)) + for _, svc := range i.manifest.Services { + orderedServices = append(orderedServices, svc.Name) + } + + // Initialize UI state + ui := taskUI{ + tasks: make(map[string]string), + spinners: make(map[string]spinner.Model), + style: lipgloss.NewStyle(), + } + + // Initialize spinners for each service + for _, name := range orderedServices { + sp := spinner.New() + sp.Spinner = spinner.Dot + ui.spinners[name] = sp + } + + tickSpinner := func(name string) spinner.Model { + sp := ui.spinners[name] + sp.Tick() + ui.spinners[name] = sp + return sp + } + + for { + select { + case <-i.taskUpdateCh: + // Clear the previous lines and move cursor up + if lineOffset > 0 { + fmt.Printf("\033[%dA", lineOffset) + fmt.Print("\033[J") + } + + lineOffset = 0 + // Use ordered services instead of ranging over map + for _, name := range orderedServices { + status, ok := i.status.Load(name) + if !ok { + status = TaskStatusPending + } + + var statusLine string + switch status { + case TaskStatusStarted, TaskStatusHealthy: + sp := tickSpinner(name) + statusLine = ui.style.Foreground(lipgloss.Color("2")).Render(fmt.Sprintf("%s [%s] Running", sp.View(), name)) + case TaskStatusDie: + statusLine = ui.style.Foreground(lipgloss.Color("1")).Render(fmt.Sprintf("✗ [%s] Failed", name)) + case TaskStatusPulled, TaskStatusPending: + sp := tickSpinner(name) + statusLine = ui.style.Foreground(lipgloss.Color("3")).Render(fmt.Sprintf("%s [%s] Pending", sp.View(), name)) + case TaskStatusPulling: + sp := tickSpinner(name) + statusLine = ui.style.Foreground(lipgloss.Color("3")).Render(fmt.Sprintf("%s [%s] Pulling", sp.View(), name)) + default: + panic(fmt.Sprintf("BUG: status '%s' not handled", name)) + } + + fmt.Println(statusLine) + lineOffset++ + } + } + } +} diff --git a/playground/local_runner.go b/playground/local_runner.go index 27dc76b..f3badd3 100644 --- a/playground/local_runner.go +++ b/playground/local_runner.go @@ -17,8 +17,6 @@ import ( "text/template" "time" - "github.com/charmbracelet/bubbles/spinner" - "github.com/charmbracelet/lipgloss" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" @@ -49,10 +47,6 @@ type LocalRunner struct { // since we reserve ports for all the services before they are used reservedPorts map[int]bool - // overrides is a map of service name to the path of the executable to run - // on the host machine instead of a container. - overrides map[string]string - // handles stores the references to the processes that are running on host machine // they are executed sequentially so we do not need to lock the handles handles []*exec.Cmd @@ -60,40 +54,31 @@ type LocalRunner struct { // exitError signals when one of the services fails exitErr chan error - // TODO: Merge instance with tasks - instances []*instance - // tasks tracks the status of each service - tasksMtx sync.Mutex - tasks map[string]*task - taskUpdateCh chan struct{} + tasksMtx sync.Mutex + tasks map[string]*task // whether to remove the network name after execution (used in testing) cleanupNetwork bool - - // callback is called to report service updates - callback func(serviceName, update string) } type task struct { - status string + status TaskStatus ready bool logs *os.File } +type TaskStatus string + var ( - taskStatusPending = "pending" - taskStatusStarted = "started" - taskStatusDie = "die" - taskStatusHealthy = "healthy" + TaskStatusPulling TaskStatus = "pulling" + TaskStatusPulled TaskStatus = "pulled" + TaskStatusPending TaskStatus = "pending" + TaskStatusStarted TaskStatus = "started" + TaskStatusDie TaskStatus = "die" + TaskStatusHealthy TaskStatus = "healthy" ) -type taskUI struct { - tasks map[string]string - spinners map[string]spinner.Model - style lipgloss.Style -} - func newDockerClient() (*client.Client, error) { client, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { @@ -105,14 +90,12 @@ func newDockerClient() (*client.Client, error) { type RunnerConfig struct { Out *output Manifest *Manifest - Overrides map[string]string - Interactive bool BindHostPortsLocally bool NetworkName string Labels map[string]string LogInternally bool Platform string - Callback func(serviceName, update string) + Callback func(serviceName string, update TaskStatus) } func NewLocalRunner(cfg *RunnerConfig) (*LocalRunner, error) { @@ -121,78 +104,37 @@ func NewLocalRunner(cfg *RunnerConfig) (*LocalRunner, error) { return nil, fmt.Errorf("failed to create docker client: %w", err) } - // merge the overrides with the manifest overrides - if cfg.Overrides == nil { - cfg.Overrides = make(map[string]string) - } - maps.Copy(cfg.Overrides, cfg.Manifest.overrides) - - // Create the concrete instances to run - instances := []*instance{} - for _, service := range cfg.Manifest.Services { - instance := &instance{ - service: service, - } - if cfg.LogInternally { - log_output, err := cfg.Out.LogOutput(service.Name) - if err != nil { - return nil, fmt.Errorf("error getting log output: %w", err) - } - instance.logs = &serviceLogs{ - logRef: log_output, - path: log_output.Name(), - } - } - instances = append(instances, instance) - } - // download any local release artifacts for the services that require them // TODO: it feels a bit weird to have all this logic on the new command. We should split it later on. - for _, instance := range instances { - ss := instance.service - if ss.Labels[useHostExecutionLabel] == "true" { + for _, service := range cfg.Manifest.Services { + if service.Labels[useHostExecutionLabel] == "true" { // If the service wants to run on the host, it must implement the ReleaseService interface // which provides functions to download the release artifact. - releaseArtifact := instance.service.release + releaseArtifact := service.release if releaseArtifact == nil { - return nil, fmt.Errorf("service '%s' must implement the ReleaseService interface", ss.Name) + return nil, fmt.Errorf("service '%s' must implement the ReleaseService interface", service.Name) } bin, err := DownloadRelease(cfg.Out.homeDir, releaseArtifact) if err != nil { - return nil, fmt.Errorf("failed to download release artifact for service '%s': %w", ss.Name, err) + return nil, fmt.Errorf("failed to download release artifact for service '%s': %w", service.Name, err) } - cfg.Overrides[ss.Name] = bin + service.HostPath = bin } } - // Now, the override can either be one of two things (we are overloading the override map): - // - docker image: In that case, change the manifest and remove from override map - // - a path to an executable: In that case, we need to run it on the host machine - // and use the override map <- We only check this case, and if it is not a path, we assume - // it is a docker image. If it is not a docker image either, the error will be catched during the execution - for k, v := range cfg.Overrides { - if _, err := os.Stat(v); err != nil { - // this is a path to an executable, remove it from the overrides since we - // assume it s a docker image and add it to manifest - parts := strings.Split(v, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid override docker image %s, expected image:tag", v) - } - - srv := cfg.Manifest.MustGetService(k) - srv.Image = parts[0] - srv.Tag = parts[1] + tasks := map[string]*task{} + for _, service := range cfg.Manifest.Services { + var logs *os.File - delete(cfg.Overrides, k) - continue + if cfg.LogInternally { + if logs, err = cfg.Out.LogOutput(service.Name); err != nil { + return nil, fmt.Errorf("error getting log output: %w", err) + } } - } - tasks := map[string]*task{} - for _, svc := range cfg.Manifest.Services { - tasks[svc.Name] = &task{ - status: taskStatusPending, - logs: nil, + tasks[service.Name] = &task{ + status: TaskStatusPending, + logs: logs, } } @@ -200,9 +142,8 @@ func NewLocalRunner(cfg *RunnerConfig) (*LocalRunner, error) { cfg.NetworkName = defaultNetworkName } - callback := cfg.Callback - if callback == nil { - callback = func(serviceName, update string) {} // noop + if cfg.Callback == nil { + cfg.Callback = func(serviceName string, update TaskStatus) {} // noop } d := &LocalRunner{ @@ -211,96 +152,14 @@ func NewLocalRunner(cfg *RunnerConfig) (*LocalRunner, error) { manifest: cfg.Manifest, client: client, reservedPorts: map[int]bool{}, - overrides: cfg.Overrides, handles: []*exec.Cmd{}, tasks: tasks, - taskUpdateCh: make(chan struct{}), exitErr: make(chan error, 2), - instances: instances, - callback: callback, - } - - if cfg.Interactive { - go d.printStatus() - - select { - case d.taskUpdateCh <- struct{}{}: - default: - } } return d, nil } -func (d *LocalRunner) Instances() []*instance { - return d.instances -} - -func (d *LocalRunner) printStatus() { - fmt.Print("\033[s") - lineOffset := 0 - - // Get ordered service names from manifest - orderedServices := make([]string, 0, len(d.manifest.Services)) - for _, svc := range d.manifest.Services { - orderedServices = append(orderedServices, svc.Name) - } - - // Initialize UI state - ui := taskUI{ - tasks: make(map[string]string), - spinners: make(map[string]spinner.Model), - style: lipgloss.NewStyle(), - } - - // Initialize spinners for each service - for _, name := range orderedServices { - sp := spinner.New() - sp.Spinner = spinner.Dot - ui.spinners[name] = sp - } - - for { - select { - case <-d.taskUpdateCh: - d.tasksMtx.Lock() - - // Clear the previous lines and move cursor up - if lineOffset > 0 { - fmt.Printf("\033[%dA", lineOffset) - fmt.Print("\033[J") - } - - lineOffset = 0 - // Use ordered services instead of ranging over map - for _, name := range orderedServices { - status := d.tasks[name].status - var statusLine string - - switch status { - case taskStatusStarted: - sp := ui.spinners[name] - sp.Tick() - ui.spinners[name] = sp - statusLine = ui.style.Foreground(lipgloss.Color("2")).Render(fmt.Sprintf("%s [%s] Running", sp.View(), name)) - case taskStatusDie: - statusLine = ui.style.Foreground(lipgloss.Color("1")).Render(fmt.Sprintf("✗ [%s] Failed", name)) - case taskStatusPending: - sp := ui.spinners[name] - sp.Tick() - ui.spinners[name] = sp - statusLine = ui.style.Foreground(lipgloss.Color("3")).Render(fmt.Sprintf("%s [%s] Pending", sp.View(), name)) - } - - fmt.Println(statusLine) - lineOffset++ - } - - d.tasksMtx.Unlock() - } - } -} - func (d *LocalRunner) AreReady() bool { d.tasksMtx.Lock() defer d.tasksMtx.Unlock() @@ -312,7 +171,7 @@ func (d *LocalRunner) AreReady() bool { } // first ensure the task has started - if task.status != taskStatusStarted { + if task.status != TaskStatusStarted { return false } @@ -344,23 +203,20 @@ func (d *LocalRunner) WaitForReady(ctx context.Context, timeout time.Duration) e } } -func (d *LocalRunner) updateTaskStatus(name, status string) { +func (d *LocalRunner) updateTaskStatus(name string, status TaskStatus) { d.tasksMtx.Lock() defer d.tasksMtx.Unlock() - if status == taskStatusHealthy { + if status == TaskStatusHealthy { d.tasks[name].ready = true } else { d.tasks[name].status = status } - if status == taskStatusDie { + if status == TaskStatusDie { d.exitErr <- fmt.Errorf("container %s failed", name) } - select { - case d.taskUpdateCh <- struct{}{}: - default: - } + d.config.Callback(name, status) } func (d *LocalRunner) ExitErr() <-chan error { @@ -743,8 +599,7 @@ func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{} } func (d *LocalRunner) isHostService(name string) bool { - _, ok := d.overrides[name] - return ok + return d.manifest.MustGetService(name).HostPath != "" } func (d *LocalRunner) generateDockerCompose() ([]byte, error) { @@ -834,7 +689,7 @@ func (d *LocalRunner) runOnHost(ss *Service) error { } } - execPath := d.overrides[ss.Name] + execPath := ss.HostPath cmd := exec.Command(execPath, args...) logOutput, err := d.out.LogOutput(ss.Name) @@ -898,8 +753,7 @@ func (d *LocalRunner) trackContainerStatusAndLogs() { switch event.Action { case events.ActionStart: - d.updateTaskStatus(name, taskStatusStarted) - d.callback(name, "container started") + d.updateTaskStatus(name, TaskStatusStarted) if d.config.LogInternally { // the container has started, we can track the logs now @@ -910,13 +764,11 @@ func (d *LocalRunner) trackContainerStatusAndLogs() { }() } case events.ActionDie: - d.updateTaskStatus(name, taskStatusDie) - d.callback(name, "container died") + d.updateTaskStatus(name, TaskStatusDie) log.Info("container died", "name", name) case events.ActionHealthStatusHealthy: - d.updateTaskStatus(name, taskStatusHealthy) - d.callback(name, "container healthy") + d.updateTaskStatus(name, TaskStatusHealthy) log.Info("container is healthy", "name", name) } @@ -989,11 +841,6 @@ func CreatePrometheusServices(manifest *Manifest, out *output) error { return nil } -const ( - pullingImageEvent = "pulling image" - imagePulledEvent = "image pulled" -) - func (d *LocalRunner) ensureImage(ctx context.Context, imageName string) error { // Check if image exists locally _, err := d.client.ImageInspect(ctx, imageName) @@ -1005,7 +852,7 @@ func (d *LocalRunner) ensureImage(ctx context.Context, imageName string) error { } // Image not found locally, pull it - d.callback(imageName, pullingImageEvent) + d.config.Callback(imageName, TaskStatusPulling) reader, err := d.client.ImagePull(ctx, imageName, image.PullOptions{}) if err != nil { @@ -1019,7 +866,7 @@ func (d *LocalRunner) ensureImage(ctx context.Context, imageName string) error { return fmt.Errorf("failed to read image pull output %s: %w", imageName, err) } - d.callback(imageName, imagePulledEvent) + d.config.Callback(imageName, TaskStatusPulled) return nil } @@ -1056,13 +903,6 @@ func (d *LocalRunner) Run(ctx context.Context) error { return fmt.Errorf("failed to write docker-compose.yaml: %w", err) } - // generate the output log file for each service so that it is available after Run is done - for _, instance := range d.instances { - if instance.logs != nil { - d.tasks[instance.service.Name].logs = instance.logs.logRef - } - } - // Pull all required images in parallel if err := d.pullNotAvailableImages(ctx); err != nil { return err diff --git a/playground/local_runner_test.go b/playground/local_runner_test.go index ea7b97f..8cc5448 100644 --- a/playground/local_runner_test.go +++ b/playground/local_runner_test.go @@ -32,7 +32,7 @@ func TestRunnerPullImages(t *testing.T) { } numEvents := 0 - callback := func(serviceName, event string) { + callback := func(serviceName string, event TaskStatus) { numEvents++ } diff --git a/playground/manifest.go b/playground/manifest.go index 49d703c..e15da14 100644 --- a/playground/manifest.go +++ b/playground/manifest.go @@ -40,6 +40,36 @@ type Manifest struct { out *output } +func (m *Manifest) ApplyOverrides(overrides map[string]string) error { + // Now, the override can either be one of two things (we are overloading the override map): + // - docker image: In that case, change the manifest and remove from override map + // - a path to an executable: In that case, we need to run it on the host machine + // and use the override map <- We only check this case, and if it is not a path, we assume + // it is a docker image. If it is not a docker image either, the error will be catched during the execution + for k, v := range overrides { + srv, ok := m.GetService(k) + if !ok { + return fmt.Errorf("service '%s' not found", k) + } + + if _, err := os.Stat(v); err == nil { + srv.HostPath = v + } else { + // this is a path to an executable, remove it from the overrides since we + // assume it s a docker image and add it to manifest + parts := strings.Split(v, ":") + if len(parts) != 2 { + return fmt.Errorf("invalid override docker image %s, expected image:tag", v) + } + + srv.Image = parts[0] + srv.Tag = parts[1] + } + } + + return nil +} + func NewManifest(ctx *ExContext, out *output) *Manifest { ctx.Output = out return &Manifest{ctx: ctx, out: out, overrides: make(map[string]string)} @@ -114,7 +144,7 @@ type ServiceGen interface { } type ServiceReady interface { - Ready(instance *instance) error + Ready(service *Service) error } func (s *Manifest) AddService(srv ServiceGen) { @@ -283,6 +313,7 @@ type Service struct { Tag string `json:"tag,omitempty"` Image string `json:"image,omitempty"` Entrypoint string `json:"entrypoint,omitempty"` + HostPath string `json:"host_path,omitempty"` release *release watchdogFn watchdogFn @@ -290,15 +321,10 @@ type Service struct { } type ( - watchdogFn func(out io.Writer, instance *instance, ctx context.Context) error - readyFn func(ctx context.Context, instance *instance) error + watchdogFn func(out io.Writer, service *Service, ctx context.Context) error + readyFn func(ctx context.Context, service *Service) error ) -type instance struct { - service *Service - logs *serviceLogs -} - type DependsOnCondition string const ( diff --git a/playground/watchdog.go b/playground/watchdog.go index 1a91bd3..ab27c24 100644 --- a/playground/watchdog.go +++ b/playground/watchdog.go @@ -5,19 +5,19 @@ import ( "fmt" ) -func RunWatchdog(out *output, instances []*instance) error { - watchdogErr := make(chan error, len(instances)) +func RunWatchdog(out *output, services []*Service) error { + watchdogErr := make(chan error, len(services)) output, err := out.LogOutput("watchdog") if err != nil { return fmt.Errorf("failed to create log output: %w", err) } - for _, s := range instances { - if watchdogFn := s.service.watchdogFn; watchdogFn != nil { + for _, s := range services { + if watchdogFn := s.watchdogFn; watchdogFn != nil { go func() { if err := watchdogFn(output, s, context.Background()); err != nil { - watchdogErr <- fmt.Errorf("service %s watchdog failed: %w", s.service.Name, err) + watchdogErr <- fmt.Errorf("service %s watchdog failed: %w", s.Name, err) } }() } @@ -30,9 +30,9 @@ func RunWatchdog(out *output, instances []*instance) error { return nil } -func CompleteReady(ctx context.Context, instances []*instance) error { - for _, s := range instances { - if readyFn := s.service.readyFn; readyFn != nil { +func CompleteReady(ctx context.Context, services []*Service) error { + for _, s := range services { + if readyFn := s.readyFn; readyFn != nil { if err := readyFn(ctx, s); err != nil { return err }