Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 21 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
.PHONY: build-api sqlc fmt lint
.PHONY: build-server build-worker build-testrunner sqlc fmt lint \
docker-testrunner docker-server docker-worker docker-all

build-api:
go build -o bin/api ./cmd/api
build-server:
go build -o bin/server ./cmd/server

build-worker:
go build -o bin/worker ./cmd/worker

build-testrunner:
CGO_ENABLED=1 go build -o bin/testrunner ./cmd/testrunner

sqlc:
sqlc generate
Expand All @@ -11,3 +18,14 @@ fmt:

lint:
golangci-lint run ./...

docker-testrunner:
docker build --platform linux/amd64 --build-arg SERVICE=testrunner -t plugin-tests-testrunner:dev .

docker-server:
docker build --platform linux/amd64 --build-arg SERVICE=server -t plugin-tests-server:dev .

docker-worker:
docker build --platform linux/amd64 --build-arg SERVICE=worker -t plugin-tests-worker:dev .

docker-all: docker-server docker-worker docker-testrunner
2 changes: 1 addition & 1 deletion cmd/api/main.go → cmd/server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func main() {
producer := queue.NewProducer(client)
defer producer.Close()

server := api.NewServer(cfg.Server.Host, cfg.Server.Port, db, producer, logger)
server := api.NewServer(cfg, db, producer, logger)

err = server.Start(ctx)
if err != nil {
Expand Down
182 changes: 182 additions & 0 deletions cmd/testrunner/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
package main

import (
"context"
"os"
"time"

"github.com/sirupsen/logrus"

"github.com/vultisig/plugin-tests/internal/testrunner"
)

var logger = logrus.New()

func main() {
logger.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})

if len(os.Args) < 2 {
logger.Fatal("usage: testrunner <seed|smoke|integration>")
}

switch os.Args[1] {
case "seed":
runSeed()
case "smoke":
runSmoke()
case "integration":
runIntegration()
default:
logger.Fatalf("unknown command: %s", os.Args[1])
}
}

func runSeed() {
ctx := context.Background()

fixture, err := testrunner.LoadFixture()
if err != nil {
logger.WithError(err).Fatal("failed to load fixture")
}

plugins := testrunner.GetTestPlugins()

seeder := testrunner.NewSeeder(testrunner.SeederConfig{
DSN: requireEnv("POSTGRES_DSN"),
S3: testrunner.S3Config{
Endpoint: requireEnv("MINIO_ENDPOINT"),
Region: "us-east-1",
AccessKey: requireEnv("MINIO_ACCESS_KEY"),
SecretKey: requireEnv("MINIO_SECRET_KEY"),
Bucket: requireEnv("MINIO_BUCKET"),
},
Fixture: fixture,
Plugins: plugins,
EncryptionSecret: requireEnv("ENCRYPTION_SECRET"),
}, logger)

logger.Info("seeding database")
err = seeder.SeedDatabase(ctx)
if err != nil {
logger.WithError(err).Fatal("failed to seed database")
}

logger.Info("seeding vaults to MinIO")
err = seeder.SeedVaults(ctx)
if err != nil {
logger.WithError(err).Fatal("failed to seed vaults")
}

logger.Info("seeding completed successfully")
}

func runSmoke() {
fixture, err := testrunner.LoadFixture()
if err != nil {
logger.WithError(err).Fatal("failed to load fixture")
}

plugins := testrunner.GetTestPlugins()
verifierURL := requireEnv("VERIFIER_URL")
jwtSecret := requireEnv("JWT_SECRET")

jwtToken, err := testrunner.GenerateJWT(jwtSecret, fixture.Vault.PublicKey, "integration-token-1", 24)
if err != nil {
logger.WithError(err).Fatal("failed to generate JWT")
}

pluginURL := requireEnv("PLUGIN_ENDPOINT")

client := testrunner.NewTestClient(verifierURL)
pluginCli := testrunner.NewTestClient(pluginURL)

logger.WithFields(logrus.Fields{
"verifier_url": verifierURL,
"plugin_url": pluginURL,
}).Info("waiting for verifier health")
err = client.WaitForHealth(60 * time.Second)
if err != nil {
logger.WithError(err).Fatal("verifier not healthy")
}
logger.Info("verifier is healthy")

suite := testrunner.NewTestSuite(client, pluginCli, fixture, plugins, jwtToken, logger)
suite.RunAll()

if suite.Failed > 0 {
for _, e := range suite.Errors {
logger.WithField("error", e).Error("test failure")
}
logger.WithFields(logrus.Fields{
"passed": suite.Passed,
"failed": suite.Failed,
"total": suite.Total,
}).Fatal("test suite failed")
}

logger.WithFields(logrus.Fields{
"passed": suite.Passed,
"total": suite.Total,
}).Info("all tests passed")
}

func runIntegration() {
fixture, err := testrunner.LoadFixture()
if err != nil {
logger.WithError(err).Fatal("failed to load fixture")
}

verifierURL := requireEnv("VERIFIER_URL")
relayURL := requireEnv("RELAY_URL")
jwtSecret := requireEnv("JWT_SECRET")
pluginID := requireEnv("PLUGIN_ID")
encryptionSecret := os.Getenv("ENCRYPTION_SECRET")

jwtToken, err := testrunner.GenerateJWT(jwtSecret, fixture.Vault.PublicKey, "integration-token-1", 24)
if err != nil {
logger.WithError(err).Fatal("failed to generate JWT")
}

pluginAPIKey := requireEnv("PLUGIN_API_KEY")
testTargetAddress := requireEnv("TEST_TARGET_ADDRESS")

cfg := testrunner.InstallConfig{
VerifierURL: verifierURL,
RelayURL: relayURL,
JWTToken: jwtToken,
PluginID: pluginID,
PluginAPIKey: pluginAPIKey,
TestTargetAddress: testTargetAddress,
Fixture: fixture,
EncryptionSecret: encryptionSecret,
}

logger.WithFields(logrus.Fields{
"verifier_url": verifierURL,
"relay_url": relayURL,
"plugin_id": pluginID,
}).Info("starting plugin install (MPC reshare)")

reshareResult, err := testrunner.RunInstall(cfg, logger)
if err != nil {
logger.WithError(err).Fatal("install failed")
}
logger.Info("reshare completed successfully")

logger.Info("starting policy CRUD tests")
err = testrunner.RunPolicyCRUD(cfg, reshareResult, logger)
if err != nil {
logger.WithError(err).Fatal("policy CRUD failed")
}
logger.Info("policy CRUD completed successfully")

logger.Info("install completed successfully")
}

func requireEnv(key string) string {
val := os.Getenv(key)
if val == "" {
logger.WithField("var", key).Fatal("required environment variable is not set")
}
return val
}
119 changes: 119 additions & 0 deletions cmd/worker/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
package main

import (
"context"
"fmt"
"os"
"os/signal"
"syscall"

"github.com/hibiken/asynq"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"

"github.com/vultisig/plugin-tests/config"
"github.com/vultisig/plugin-tests/internal/health"
"github.com/vultisig/plugin-tests/internal/logging"
"github.com/vultisig/plugin-tests/internal/queue"
"github.com/vultisig/plugin-tests/internal/storage/postgres"
"github.com/vultisig/plugin-tests/internal/worker"
)

func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigChan
cancel()
}()

cfg, err := config.ReadWorkerConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to read config: %v\n", err)
os.Exit(1)
}

logger := logging.NewLogger(cfg.LogFormat)

db, err := postgres.NewPostgresBackend(cfg.Database.DSN)
if err != nil {
logger.WithError(err).Fatal("failed to connect to database")
}
defer db.Close()

redisOpt, err := queue.NewRedisConnOpt(cfg.QueueRedis)
if err != nil {
logger.WithError(err).Fatal("failed to create redis connection")
}

k8sClient, err := buildK8sClient(cfg.KubeConfig)
if err != nil {
logger.WithError(err).Fatal("failed to create k8s client")
}

concurrency := cfg.Concurrency
if concurrency <= 0 {
concurrency = 1
}

srv := asynq.NewServer(redisOpt, asynq.Config{
Concurrency: concurrency,
Queues: map[string]int{
queue.QueueName: 1,
},
Logger: logger,
})

go func() {
<-ctx.Done()
logger.Info("shutting down worker")
srv.Shutdown()
}()

consumer := worker.NewConsumer(db, k8sClient, logger, cfg.K8sJob, cfg.ArtifactS3)

janitor := worker.NewJanitor(db, k8sClient, logger, cfg.Janitor)
go janitor.Run(ctx)

if cfg.HealthPort > 0 {
healthServer := health.New(cfg.HealthPort)
go func() {
err := healthServer.Start(ctx, logger)
if err != nil {
logger.WithError(err).Error("health server failed")
}
}()
}

mux := asynq.NewServeMux()
mux.HandleFunc(queue.TypeRunIntegrationTest, consumer.Handle)

logger.WithField("concurrency", concurrency).Info("starting worker")

err = srv.Run(mux)
if err != nil {
logger.WithError(err).Fatal("worker failed")
}
}

func buildK8sClient(kubeconfig string) (kubernetes.Interface, error) {
cfg, err := rest.InClusterConfig()
if err != nil {
if kubeconfig == "" {
return nil, fmt.Errorf("not in cluster and KUBECONFIG not set: %w", err)
}
cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to build kubeconfig from %s: %w", kubeconfig, err)
}
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %w", err)
}
return client, nil
}
Loading