Init minimal e2e tests (#6391)

This commit is contained in:
6543
2026-04-17 00:46:53 +02:00
committed by GitHub
parent 4390796985
commit 755fc2a14f
49 changed files with 2250 additions and 2 deletions

View File

@@ -253,6 +253,7 @@
],
"ignorePaths": [
".cspell.json",
"e2e/**",
".git/**/*",
".gitignore",
".golangci.yaml",

View File

@@ -90,6 +90,13 @@ steps:
when:
- path: *when_path
test-e2e:
depends_on:
- vendor
image: *golang_image
commands:
- make test-e2e
sqlite:
depends_on:
- vendor
@@ -136,6 +143,7 @@ steps:
- coverage.out
- server-coverage.out
- datastore-coverage.out
- e2e-coverage.out
token:
from_secret: codecov_token
when:

View File

@@ -203,8 +203,11 @@ test-ui: ui-dependencies ## Test UI code
test-lib: ## Test lib code
go test -race -cover -coverprofile coverage.out -timeout 60s -tags 'test $(TAGS)' $(shell go list ./... | grep -v '/cmd\|/agent\|/cli\|/server')
test-e2e: ## Test by running yaml config and compare expected result
go test -race -cover -coverpkg=./... -coverprofile e2e-coverage.out -timeout 60s -tags 'test $(TAGS)' ./e2e/...
.PHONY: test
test: test-agent test-server test-server-datastore test-cli test-lib ## Run all tests
test: test-agent test-server test-server-datastore test-cli test-lib test-e2e ## Run all tests
##@ Build

View File

@@ -1,3 +1,4 @@
ignore:
- '**/mocks/mock_*.go'
- '**/fixtures/*.go'
- 'e2e/**/*.go'

View File

@@ -0,0 +1,147 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package scenarios
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/e2e/setup"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pipeline"
)
// labelRoutingYAML is a single-workflow pipeline that requires the label
// gpu=true. Only the gpu-agent should pick it up; the plain agent must not.
var labelRoutingYAML = []byte(`
labels:
gpu: "true"
steps:
- name: gpu-step
image: dummy
commands:
- echo running on gpu agent
`)
// TestAgentLabelRouting starts two agents — one plain, one with gpu=true —
// and asserts that the pipeline with labels: gpu: "true" is always picked up
// by the gpu agent and never by the plain agent.
func TestAgentLabelRouting(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: labelRoutingYAML},
})
// Plain agent: wildcard repo label only — cannot satisfy gpu=true.
plainAgent := setup.StartAgent(t.Context(), t, env.GRPCAddr,
setup.WithHostname("plain-agent"),
)
// GPU agent: carries gpu=true — the only agent that can accept the task.
gpuAgent := setup.StartAgent(t.Context(), t, env.GRPCAddr,
setup.WithHostname("gpu-agent"),
setup.WithCustomLabels(map[string]string{"gpu": "true"}),
)
setup.WaitForAgentRegistered(t, env.Store, plainAgent, gpuAgent)
// Ensure both agents are actively polling before enqueuing the task.
// Without this, the plain agent (which polls with repo=* and no gpu label)
// could theoretically win if the queue tries to assign before the gpu-agent
// has connected its poll goroutines. In practice label filtering prevents a
// wrong assignment here, but waiting avoids any startup-ordering flakiness.
setup.WaitForWorkersReady(t, env.Queue, 2*setup.AgentMaxWorkflows)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create pipeline")
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "pipeline should succeed")
// The single workflow (name="woodpecker" from SanitizePath(".woodpecker.yaml"))
// must have been executed by the gpu agent, not the plain agent.
setup.AssertWorkflowRanOnAgent(t, env.Store, finished, "woodpecker", gpuAgent)
}
/*
// TODO: The agent assignment is currently flaky and so is the test, fix that.
// orgPipelineYAML is a plain single-step pipeline used for org-preference tests.
Var orgPipelineYAML = []byte(`
steps:
- name: build
image: dummy
commands:
- echo building
`)
// TestOrgAgentPreferredOverGlobal starts a global agent and an org-scoped agent
// for the same org as the test repo. It asserts that the org agent is always
// preferred by the queue (score 10 vs 1) and picks up the pipeline.
Func TestOrgAgentPreferredOverGlobal(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: orgPipelineYAML},
})
// Global agent: matches org-id=* (score 1).
globalAgent := setup.StartAgent(t.Context(), t, env.GRPCAddr,
setup.WithHostname("global-agent"),
)
// Org agent: will be patched with the repo's OrgID (score 10).
orgAgent := setup.StartAgent(t.Context(), t, env.GRPCAddr,
setup.WithHostname("org-agent"),
setup.WithOrgID(env.Fixtures.Repo.OrgID),
)
setup.WaitForAgentRegistered(t, env.Store, globalAgent, orgAgent)
// Wait until both agents have connected their poll goroutines to the queue.
// The org-agent reads its OrgID label from the DB at Poll time — if we
// create the pipeline before the org-agent is polling, the global agent
// can steal the task first (it's already blocking on Poll and wins the
// race). agentMaxWorkflows slots per agent = 8 workers total.
setup.WaitForWorkersReady(t, env.Queue, 2*setup.AgentMaxWorkflows)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create pipeline")
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "pipeline should succeed")
// The workflow must have been picked up by the org-scoped agent, not the
// global one — the queue scores exact org-id matches 10× higher.
setup.AssertWorkflowRanOnAgent(t, env.Store, finished, "woodpecker", orgAgent)
}.
*/

View File

@@ -0,0 +1,117 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package scenarios
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/e2e/setup"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pipeline"
)
// cancelPipelineYAML has one long-sleeping step followed by one that must
// be skipped when the pipeline is canceled.
var cancelPipelineYAML = []byte(`
steps:
- name: long-running
image: dummy
commands:
- echo starting long job
environment:
SLEEP: "30s"
- name: after-cancel
image: dummy
commands:
- echo this should never run
`)
// TestCancelRunningPipeline triggers a long-running pipeline, waits for it
// to enter StatusRunning, then cancels it via pipeline.Cancel and asserts:
// - pipeline ends up as StatusKilled
// - the running step exits with code 130 (dummy cancel convention = SIGINT)
// - the subsequent step is skipped
func TestCancelRunningPipeline(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: cancelPipelineYAML},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create pipeline")
require.NotNil(t, created)
// Wait until the agent has picked it up and set it to running.
setup.WaitForPipelineStatus(t, env.Store, created.ID, model.StatusRunning, 10*time.Second)
// Also wait for the specific step to reach StatusRunning in the DB.
// The pipeline transitions to StatusRunning as soon as the agent starts
// the workflow, but the step itself may not yet have entered its
// sleepWithContext call in the dummy backend. If we cancel before the
// step is actually sleeping, WaitStep returns immediately with success
// before the cancel context propagates — causing "success" instead of
// "killed". Waiting here ensures the dummy sleep is genuinely in progress.
setup.WaitForStepRunning(t, env.Store, created.ID, "long-running")
// Resolve the forge instance (MockForge) via the manager.
forge, err := env.Manager.ForgeByID(env.Fixtures.Forge.ID)
require.NoError(t, err, "resolve forge")
// Fetch the latest pipeline state from the store before canceling.
running, err := env.Store.GetPipeline(created.ID)
require.NoError(t, err, "get running pipeline")
// Cancel through the normal server API path — same as the HTTP handler does.
err = pipeline.Cancel(t.Context(), forge, env.Store, env.Fixtures.Repo, env.Fixtures.Owner, running, nil)
require.NoError(t, err, "cancel pipeline")
// Wait for the pipeline to reach a terminal state.
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusKilled, finished.Status, "canceled pipeline should be killed")
t.Run("long-running step is killed", func(t *testing.T) {
// After pipeline.Cancel() the pipeline itself reaches a terminal state
// immediately, but the running step's status is written asynchronously
// by the agent's gRPC Done() call — which arrives *after* the cancel
// signal is processed. We therefore wait explicitly for the step to
// leave "running", giving the agent enough time to finish cleanup and
// report back.
step := setup.WaitForStepStatus(t, env.Store, finished, "long-running", model.StatusKilled, 30*time.Second)
assert.Equal(t, model.StatusKilled, step.State)
})
t.Run("after-cancel step is canceled", func(t *testing.T) {
// Pending steps get StatusCanceled synchronously by pipeline.Cancel()
// before any agent is involved, so this should already be set.
step := setup.WaitForStep(t, env.Store, finished, "after-cancel")
assert.Equal(t, model.StatusCanceled, step.State)
})
}

190
e2e/scenarios/fixtures.go Normal file
View File

@@ -0,0 +1,190 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package scenarios
import (
"embed"
"encoding/json"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
)
//go:embed fixtures/*.yaml fixtures/*.json fixtures/*/*.yaml fixtures/*/*.json
var fixtureFS embed.FS
// Scenario is the single source of truth for one integration test case.
//
// Single-workflow scenarios use a flat fixture pair:
//
// fixtures/NN_name.yaml — the pipeline YAML served by the mock forge
// fixtures/NN_name.json — assertions (Scenario fields)
//
// Multi-workflow scenarios use a subdirectory:
//
// fixtures/NN_name/workflow-a.yaml
// fixtures/NN_name/workflow-b.yaml
// fixtures/NN_name/scenario.json — assertions; Workflows field is populated from the YAMLs
type Scenario struct {
// Name is a human-readable label shown in test output.
Name string `json:"name"`
// Event is the webhook event that triggers the pipeline (default: push).
Event model.WebhookEvent `json:"event"`
// ExpectedStatus is the final pipeline status we assert on.
ExpectedStatus model.StatusValue `json:"expected_status"`
// ExpectedSteps lists per-step assertions (matched by step name).
// Steps not listed here are not checked.
ExpectedSteps []ExpectedStep `json:"expected_steps"`
// ExpectedWorkflows lists per-workflow assertions (matched by workflow name).
// Only checked when non-empty. For single-workflow pipelines, the workflow
// name is derived from the YAML filename by the step builder.
ExpectedWorkflows []ExpectedWorkflow `json:"expected_workflows"`
// Files is the set of workflow YAML files served by the mock forge.
// Single-workflow: one entry named ".woodpecker.yaml".
// Multi-workflow: one entry per file in the fixtures subdirectory,
// with paths like ".woodpecker/workflow-a.yaml".
// Populated by LoadScenarios — not present in the JSON.
Files []*forge_types.FileMeta `json:"-"`
}
// ExpectedStep describes what we expect for one named step after the pipeline finishes.
type ExpectedStep struct {
Name string `json:"name"`
Status model.StatusValue `json:"status"`
ExitCode int `json:"exit_code"`
}
// ExpectedWorkflow describes what we expect for one named workflow after the pipeline finishes.
type ExpectedWorkflow struct {
Name string `json:"name"`
Status model.StatusValue `json:"status"`
}
// LoadScenarios reads all fixture pairs and subdirectories from the embedded
// fixtures/ directory and returns them sorted by filesystem order.
//
// Flat pairs (NN_name.yaml + NN_name.json) → single-workflow scenario.
// Directories (NN_name/ with *.yaml + scenario.json) → multi-workflow scenario.
func LoadScenarios(t *testing.T) []Scenario {
t.Helper()
entries, err := fixtureFS.ReadDir("fixtures")
require.NoError(t, err, "read fixtures dir")
// Index flat YAML files by stem.
yamlByStem := make(map[string][]byte)
jsonByStem := make(map[string][]byte)
var scenarios []Scenario
for _, e := range entries {
name := e.Name()
if e.IsDir() {
// Multi-workflow scenario: load scenario.json + all *.yaml files.
s := loadMultiWorkflowScenario(t, name)
scenarios = append(scenarios, s)
continue
}
data, err := fixtureFS.ReadFile(filepath.Join("fixtures", name))
require.NoError(t, err, "read fixture %s", name)
stem := strings.TrimSuffix(strings.TrimSuffix(name, ".yaml"), ".json")
switch filepath.Ext(name) {
case ".yaml":
yamlByStem[stem] = data
case ".json":
jsonByStem[stem] = data
}
}
// Pair flat YAML + JSON files.
for stem, jsonData := range jsonByStem {
var s Scenario
require.NoError(t, json.Unmarshal(jsonData, &s), "parse %s.json", stem)
yamlData, ok := yamlByStem[stem]
require.True(t, ok, "missing %s.yaml for %s.json", stem, stem)
// Single-workflow: serve as ".woodpecker.yaml" so the config service
// calls File() and gets back the YAML directly.
s.Files = []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: yamlData},
}
if s.Event == "" {
s.Event = model.EventPush
}
scenarios = append(scenarios, s)
}
require.NotEmpty(t, scenarios, "no scenarios loaded")
return scenarios
}
// loadMultiWorkflowScenario reads a fixtures/dirName/ subdirectory.
// It expects a scenario.json and one or more *.yaml workflow files.
func loadMultiWorkflowScenario(t *testing.T, dirName string) Scenario {
t.Helper()
dir := filepath.Join("fixtures", dirName)
entries, err := fixtureFS.ReadDir(dir)
require.NoError(t, err, "read multi-workflow dir %s", dir)
var s Scenario
var files []*forge_types.FileMeta
for _, e := range entries {
if e.IsDir() {
continue
}
name := e.Name()
data, err := fixtureFS.ReadFile(filepath.Join(dir, name))
require.NoError(t, err, "read %s/%s", dirName, name)
switch {
case name == "scenario.json":
require.NoError(t, json.Unmarshal(data, &s), "parse %s/scenario.json", dirName)
case strings.HasSuffix(name, ".yaml"):
// Serve under .woodpecker/<filename> so Dir() returns them.
files = append(files, &forge_types.FileMeta{
Name: ".woodpecker/" + name,
Data: data,
})
}
}
require.NotEmpty(t, files, "no YAML files in multi-workflow dir %s", dirName)
require.NotEmpty(t, s.Name, "scenario.json missing 'name' in %s", dirName)
s.Files = forge_types.SortByName(files)
if s.Event == "" {
s.Event = model.EventPush
}
return s
}

View File

@@ -0,0 +1,10 @@
{
"name": "simple success",
"event": "push",
"expected_status": "success",
"expected_steps": [
{ "name": "clone", "status": "success", "exit_code": 0 },
{ "name": "build", "status": "success", "exit_code": 0 },
{ "name": "test", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,10 @@
steps:
- name: build
image: dummy
commands:
- echo building
- name: test
image: dummy
commands:
- echo testing

View File

@@ -0,0 +1,9 @@
{
"name": "step failure stops pipeline",
"event": "push",
"expected_status": "failure",
"expected_steps": [
{ "name": "build", "status": "failure", "exit_code": 1 },
{ "name": "deploy", "status": "skipped", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,14 @@
skip_clone: true
steps:
- name: build
image: dummy
commands:
- echo building
environment:
STEP_EXIT_CODE: '1'
- name: deploy
image: dummy
commands:
- echo deploying

View File

@@ -0,0 +1,9 @@
{
"name": "failure ignore continues pipeline",
"event": "push",
"expected_status": "success",
"expected_steps": [
{ "name": "lint", "status": "failure", "exit_code": 1 },
{ "name": "build", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,15 @@
skip_clone: true
steps:
- name: lint
image: dummy
commands:
- echo linting
failure: ignore
environment:
STEP_EXIT_CODE: '1'
- name: build
image: dummy
commands:
- echo building

View File

@@ -0,0 +1,9 @@
{
"name": "on-failure step runs after failure",
"event": "push",
"expected_status": "failure",
"expected_steps": [
{ "name": "build", "status": "failure", "exit_code": 2 },
{ "name": "notify", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,16 @@
skip_clone: true
steps:
- name: build
image: dummy
commands:
- echo building
environment:
STEP_EXIT_CODE: '2'
- name: notify
image: dummy
commands:
- echo notifying
when:
- status: [failure]

View File

@@ -0,0 +1,10 @@
{
"name": "service runs alongside steps",
"event": "push",
"expected_status": "success",
"expected_steps": [
{ "name": "clone", "status": "success", "exit_code": 0 },
{ "name": "test", "status": "success", "exit_code": 0 },
{ "name": "db", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,11 @@
steps:
- name: test
image: dummy
commands:
- echo running tests
services:
- name: db
image: dummy
environment:
SLEEP: '100ms'

View File

@@ -0,0 +1,11 @@
{
"name": "parallel steps with depends_on",
"event": "push",
"expected_status": "success",
"expected_steps": [
{ "name": "clone", "status": "success", "exit_code": 0 },
{ "name": "test-unit", "status": "success", "exit_code": 0 },
{ "name": "test-integration", "status": "success", "exit_code": 0 },
{ "name": "deploy", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,18 @@
steps:
- name: test-unit
image: dummy
commands:
- echo unit tests
depends_on: []
- name: test-integration
image: dummy
commands:
- echo integration tests
depends_on: []
- name: deploy
image: dummy
commands:
- echo deploying
depends_on: [test-unit, test-integration]

View File

@@ -0,0 +1,6 @@
{
"name": "OOM killed step fails pipeline",
"event": "push",
"expected_status": "failure",
"expected_steps": [{ "name": "hungry", "status": "failure", "exit_code": 137 }]
}

View File

@@ -0,0 +1,10 @@
skip_clone: true
steps:
- name: hungry
image: dummy
commands:
- echo eating memory
environment:
STEP_OOM_KILLED: 'true'
STEP_EXIT_CODE: '137'

View File

@@ -0,0 +1,10 @@
{
"name": "always-run step executes on failure",
"event": "push",
"expected_status": "failure",
"expected_steps": [
{ "name": "build", "status": "failure", "exit_code": 1 },
{ "name": "always-cleanup", "status": "success", "exit_code": 0 },
{ "name": "deploy", "status": "skipped", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,21 @@
skip_clone: true
steps:
- name: build
image: dummy
commands:
- echo building
environment:
STEP_EXIT_CODE: '1'
- name: always-cleanup
image: dummy
commands:
- echo cleaning up
when:
- status: [success, failure]
- name: deploy
image: dummy
commands:
- echo deploying

View File

@@ -0,0 +1,11 @@
skip_clone: true
steps:
- name: compile
image: dummy
commands:
- echo compiling
- name: test
image: dummy
commands:
- echo testing

View File

@@ -0,0 +1,7 @@
skip_clone: true
steps:
- name: lint
image: dummy
commands:
- echo linting

View File

@@ -0,0 +1,14 @@
{
"name": "two parallel workflows both succeed",
"event": "push",
"expected_status": "success",
"expected_workflows": [
{ "name": "build", "status": "success" },
{ "name": "lint", "status": "success" }
],
"expected_steps": [
{ "name": "compile", "status": "success", "exit_code": 0 },
{ "name": "test", "status": "success", "exit_code": 0 },
{ "name": "lint", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,9 @@
skip_clone: true
steps:
- name: bad-step
image: dummy
environment:
STEP_EXIT_CODE: '1'
commands:
- echo this will fail

View File

@@ -0,0 +1,7 @@
skip_clone: true
steps:
- name: ok-step
image: dummy
commands:
- echo this is fine

View File

@@ -0,0 +1,13 @@
{
"name": "one workflow fails pipeline is failure",
"event": "push",
"expected_status": "failure",
"expected_workflows": [
{ "name": "failing", "status": "failure" },
{ "name": "passing", "status": "success" }
],
"expected_steps": [
{ "name": "ok-step", "status": "success", "exit_code": 0 },
{ "name": "bad-step", "status": "failure", "exit_code": 1 }
]
}

View File

@@ -0,0 +1,11 @@
skip_clone: true
steps:
- name: flaky
image: dummy
environment:
STEP_EXIT_CODE: '1'
commands:
- echo flaky step
when:
- failure: ignore

View File

@@ -0,0 +1,7 @@
skip_clone: true
steps:
- name: build
image: dummy
commands:
- echo building

View File

@@ -0,0 +1,13 @@
{
"name": "two workflows one fails pipeline is failure",
"event": "push",
"expected_status": "failure",
"expected_workflows": [
{ "name": "flaky", "status": "failure" },
{ "name": "main", "status": "success" }
],
"expected_steps": [
{ "name": "build", "status": "success", "exit_code": 0 },
{ "name": "flaky", "status": "failure", "exit_code": 1 }
]
}

View File

@@ -0,0 +1,11 @@
skip_clone: true
steps:
- name: compile
image: dummy
commands:
- echo compiling
- name: unit-test
image: dummy
commands:
- echo unit testing

View File

@@ -0,0 +1,10 @@
skip_clone: true
depends_on:
- build
steps:
- name: deploy
image: dummy
commands:
- echo deploying

View File

@@ -0,0 +1,10 @@
skip_clone: true
depends_on:
- build
steps:
- name: notify
image: dummy
commands:
- echo notifying

View File

@@ -0,0 +1,16 @@
{
"name": "workflows with depends_on run in order",
"event": "push",
"expected_status": "success",
"expected_workflows": [
{ "name": "build", "status": "success" },
{ "name": "deploy", "status": "success" },
{ "name": "notify", "status": "success" }
],
"expected_steps": [
{ "name": "compile", "status": "success", "exit_code": 0 },
{ "name": "unit-test", "status": "success", "exit_code": 0 },
{ "name": "deploy", "status": "success", "exit_code": 0 },
{ "name": "notify", "status": "success", "exit_code": 0 }
]
}

View File

@@ -0,0 +1,9 @@
skip_clone: true
steps:
- name: compile
image: dummy
environment:
STEP_EXIT_CODE: '1'
commands:
- echo compile failed

View File

@@ -0,0 +1,10 @@
skip_clone: true
depends_on:
- build
steps:
- name: deploy
image: dummy
commands:
- echo this should not run

View File

@@ -0,0 +1,13 @@
{
"name": "downstream workflow skipped when dependency fails",
"event": "push",
"expected_status": "failure",
"expected_workflows": [
{ "name": "build", "status": "failure" },
{ "name": "deploy", "status": "skipped" }
],
"expected_steps": [
{ "name": "compile", "status": "failure", "exit_code": 1 },
{ "name": "deploy", "status": "killed", "exit_code": 0, "_comment": "TODO: it should be skipped not killed" }
]
}

View File

@@ -0,0 +1,92 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
// Package scenarios contains end-to-end integration tests that run a real
// in-process Woodpecker server (with MockForge) and a real in-process agent
// (with the dummy backend). Tests trigger pipelines via server/pipeline.Create
// and assert on final DB state.
package scenarios
import (
"os"
"testing"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/e2e/setup"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pipeline"
)
// TestMain sets global log level to warn so test output isn't buried in JSON.
// Override by setting WOODPECKER_LOG_LEVEL=trace before running tests.
func TestMain(m *testing.M) {
level := zerolog.WarnLevel
if lvl := os.Getenv("WOODPECKER_LOG_LEVEL"); lvl != "" {
if l, err := zerolog.ParseLevel(lvl); err == nil {
level = l
}
}
zerolog.SetGlobalLevel(level)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true})
os.Exit(m.Run())
}
// simpleSuccessYAML is the minimal pipeline config for the smoke test.
// "image: dummy" is handled by the dummy backend (requires -tags test).
var simpleSuccessYAML = []byte(`
steps:
- name: step-one
image: dummy
commands:
- echo hello
- name: step-two
image: dummy
commands:
- echo world
`)
// TestInfraSmoke verifies the full server+agent stack can start, accept a
// pipeline, run it through the dummy backend, and reach StatusSuccess.
// This is the "does the plumbing work at all" gate — it runs first.
func TestInfraSmoke(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: simpleSuccessYAML},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
draftPipeline := &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
}
createdPipeline, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, draftPipeline)
require.NoError(t, err, "create pipeline")
require.NotNil(t, createdPipeline)
t.Logf("pipeline %d created with status=%s", createdPipeline.ID, createdPipeline.Status)
finished := setup.WaitForPipeline(t, env.Store, createdPipeline.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "pipeline should succeed")
}

View File

@@ -0,0 +1,287 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package scenarios
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/e2e/setup"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pipeline"
)
// matrixPipelineYAML defines a 2×2 matrix (GO_VERSION × OS), yielding 4
// workflows. Each step echoes its matrix variables so we can confirm the
// dummy backend receives the interpolated values via the step environment.
var matrixPipelineYAML = []byte(`
matrix:
GO_VERSION:
- "1.24"
- "1.26"
OS:
- linux
- windows
steps:
- name: build
image: dummy
commands:
- echo "go=${GO_VERSION} os=${OS}"
`)
// matrixIncludePipelineYAML uses the matrix.include form to specify exact
// combinations, verifying the alternative matrix syntax is also handled.
var matrixIncludePipelineYAML = []byte(`
matrix:
include:
- GO_VERSION: "1.24"
OS: linux
- GO_VERSION: "1.26"
OS: linux
- GO_VERSION: "1.26"
OS: windows
steps:
- name: build
image: dummy
commands:
- echo "go=${GO_VERSION} os=${OS}"
`)
// TestMatrixPipeline verifies that a matrix YAML expands into the correct
// number of workflows, that every workflow succeeds, and that each workflow's
// Environ map carries the right variable combination.
func TestMatrixPipeline(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: matrixPipelineYAML},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create matrix pipeline")
require.NotNil(t, created)
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "matrix pipeline should succeed")
workflows, err := env.Store.WorkflowGetTree(finished)
require.NoError(t, err, "get workflow tree")
// 2 GO_VERSION values × 2 OS values = 4 workflows
const wantWorkflows = 4
assert.Len(t, workflows, wantWorkflows,
"matrix should expand to %d workflows", wantWorkflows)
// Build the set of expected (GO_VERSION, OS) pairs and verify each
// workflow accounts for exactly one, with no duplicates.
type combo struct{ goVersion, os string }
expected := map[combo]bool{
{"1.24", "linux"}: true,
{"1.24", "windows"}: true,
{"1.26", "linux"}: true,
{"1.26", "windows"}: true,
}
seen := make(map[combo]bool, len(workflows))
for _, wf := range workflows {
assert.Equal(t, model.StatusSuccess, wf.State,
"workflow axis %d should succeed", wf.AxisID)
assert.NotZero(t, wf.AxisID,
"matrix workflows must have a non-zero AxisID")
goVer := wf.Environ["GO_VERSION"]
os := wf.Environ["OS"]
c := combo{goVer, os}
assert.True(t, expected[c],
"unexpected matrix combination GO_VERSION=%q OS=%q", goVer, os)
assert.False(t, seen[c],
"duplicate matrix combination GO_VERSION=%q OS=%q", goVer, os)
seen[c] = true
}
// Every expected combination must have been present.
for c := range expected {
assert.True(t, seen[c],
"missing matrix combination GO_VERSION=%q OS=%q", c.goVersion, c.os)
}
}
// TestMatrixIncludePipeline verifies the matrix.include syntax produces the
// exact explicit combinations listed (3 workflows, not a full cross product).
func TestMatrixIncludePipeline(t *testing.T) {
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: matrixIncludePipelineYAML},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create matrix include pipeline")
require.NotNil(t, created)
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "matrix include pipeline should succeed")
workflows, err := env.Store.WorkflowGetTree(finished)
require.NoError(t, err, "get workflow tree")
// matrix.include has 3 explicit entries — no cross product.
const wantWorkflows = 3
assert.Len(t, workflows, wantWorkflows,
"matrix include should produce exactly %d workflows", wantWorkflows)
type combo struct{ goVersion, os string }
expected := map[combo]bool{
{"1.24", "linux"}: true,
{"1.26", "linux"}: true,
{"1.26", "windows"}: true,
}
seen := make(map[combo]bool, len(workflows))
for _, wf := range workflows {
assert.Equal(t, model.StatusSuccess, wf.State,
"workflow (axis %d) should succeed", wf.AxisID)
c := combo{wf.Environ["GO_VERSION"], wf.Environ["OS"]}
assert.True(t, expected[c],
"unexpected combination GO_VERSION=%q OS=%q", c.goVersion, c.os)
assert.False(t, seen[c],
"duplicate combination GO_VERSION=%q OS=%q", c.goVersion, c.os)
seen[c] = true
}
for c := range expected {
assert.True(t, seen[c],
"missing combination GO_VERSION=%q OS=%q", c.goVersion, c.os)
}
}
// TestMatrixSingleAxis verifies a single-axis matrix (TAG: [1.7, 1.8, latest])
// — the simplest possible matrix — to ensure no edge cases in the axis
// calculation code.
func TestMatrixSingleAxis(t *testing.T) {
yaml := []byte(`
matrix:
TAG:
- "1.7"
- "1.8"
- latest
steps:
- name: build
image: dummy
commands:
- echo "tag=${TAG}"
`)
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: yaml},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create single-axis matrix pipeline")
require.NotNil(t, created)
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status, "single-axis matrix pipeline should succeed")
workflows, err := env.Store.WorkflowGetTree(finished)
require.NoError(t, err, "get workflow tree")
assert.Len(t, workflows, 3, "single-axis matrix [1.7, 1.8, latest] should produce 3 workflows")
wantTags := map[string]bool{"1.7": true, "1.8": true, "latest": true}
seenTags := make(map[string]bool, 3)
for _, wf := range workflows {
assert.Equal(t, model.StatusSuccess, wf.State,
"workflow for TAG=%q should succeed", wf.Environ["TAG"])
tag := wf.Environ["TAG"]
assert.True(t, wantTags[tag], "unexpected TAG value %q", tag)
assert.False(t, seenTags[tag], "duplicate TAG value %q", tag)
seenTags[tag] = true
}
}
// TestMatrixNoMatrix is a regression guard: a YAML without a matrix section
// must produce exactly one workflow (the existing behavior must not break).
func TestMatrixNoMatrix(t *testing.T) {
yaml := []byte(`
steps:
- name: build
image: dummy
commands:
- echo "no matrix"
`)
env := setup.StartServer(t.Context(), t, []*forge_types.FileMeta{
{Name: ".woodpecker.yaml", Data: yaml},
})
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: model.EventPush,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create non-matrix pipeline")
require.NotNil(t, created)
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, model.StatusSuccess, finished.Status)
workflows, err := env.Store.WorkflowGetTree(finished)
require.NoError(t, err, "get workflow tree")
assert.Len(t, workflows, 1, "non-matrix pipeline should produce exactly 1 workflow")
assert.Zero(t, workflows[0].AxisID,
"non-matrix workflow should have AxisID=0")
assert.Empty(t, workflows[0].Environ,
"non-matrix workflow should have no Environ variables")
}

144
e2e/scenarios/suite_test.go Normal file
View File

@@ -0,0 +1,144 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package scenarios
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/e2e/setup"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pipeline"
)
// TestScenarios is the table-driven runner for all fixture-based scenarios.
// Each subtest gets its own isolated server+agent environment so they cannot
// interfere with each other.
//
// Subtests do NOT run in parallel because StartServer writes to the
// server.Config package-level global — running concurrently would race.
func TestScenarios(t *testing.T) {
for _, sc := range LoadScenarios(t) {
t.Run(sc.Name, func(t *testing.T) {
runScenario(t, sc)
})
}
}
// runScenario starts a fresh server+agent, triggers one pipeline described by
// sc, waits for it to finish, then asserts the expected DB state.
func runScenario(t *testing.T, sc Scenario) {
t.Helper()
env := setup.StartServer(t.Context(), t, sc.Files)
agent := setup.StartAgent(t.Context(), t, env.GRPCAddr)
setup.WaitForAgentRegistered(t, env.Store, agent)
created, err := pipeline.Create(t.Context(), env.Store, env.Fixtures.Repo, &model.Pipeline{
Event: sc.Event,
Branch: "main",
Commit: "deadbeef",
Ref: "refs/heads/main",
Author: env.Fixtures.Owner.Login,
Sender: env.Fixtures.Owner.Login,
})
require.NoError(t, err, "create pipeline")
require.NotNil(t, created)
finished := setup.WaitForPipeline(t, env.Store, created.ID)
assert.Equal(t, sc.ExpectedStatus, finished.Status, "pipeline final status")
if len(sc.ExpectedSteps) == 0 {
return
}
steps, err := env.Store.StepList(finished)
require.NoError(t, err, "list steps for pipeline %d", finished.ID)
require.ElementsMatch(t, expStepsToName(sc.ExpectedSteps), modelStepsToName(steps), "we got different steps reported back as we expected")
// Index steps by name for O(1) lookup.
byName := make(map[string]*model.Step, len(steps))
for _, s := range steps {
byName[s.Name] = s
}
for _, want := range sc.ExpectedSteps {
step, ok := byName[want.Name]
if !assert.Truef(t, ok, "step %q not found in pipeline %d", want.Name, finished.ID) {
continue
}
assert.Equalf(t, want.Status, step.State, "step %q status", want.Name)
assert.Equalf(t, want.ExitCode, step.ExitCode, "step %q exit code", want.Name)
}
if len(sc.ExpectedWorkflows) == 0 {
return
}
workflows, err := env.Store.WorkflowGetTree(finished)
require.NoError(t, err, "list workflows for pipeline %d", finished.ID)
require.ElementsMatch(t, expWorkflowsToName(sc.ExpectedWorkflows), modelWorkflowsToName(workflows), "we got different workflows reported back as we expected")
byWorkflowName := make(map[string]*model.Workflow, len(workflows))
for _, w := range workflows {
byWorkflowName[w.Name] = w
}
for _, want := range sc.ExpectedWorkflows {
wf, ok := byWorkflowName[want.Name]
if !assert.Truef(t, ok, "workflow %q not found in pipeline %d", want.Name, finished.ID) {
continue
}
assert.Equalf(t, want.Status, wf.State, "workflow %q status", want.Name)
}
}
func expStepsToName(in []ExpectedStep) []string {
out := make([]string, 0, len(in))
for _, s := range in {
out = append(out, s.Name)
}
return out
}
func modelStepsToName(in []*model.Step) []string {
out := make([]string, 0, len(in))
for _, s := range in {
out = append(out, s.Name)
}
return out
}
func expWorkflowsToName(in []ExpectedWorkflow) []string {
out := make([]string, 0, len(in))
for _, s := range in {
out = append(out, s.Name)
}
return out
}
func modelWorkflowsToName(in []*model.Workflow) []string {
out := make([]string, 0, len(in))
for _, s := range in {
out = append(out, s.Name)
}
return out
}

240
e2e/setup/agent.go Normal file
View File

@@ -0,0 +1,240 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package setup
import (
"context"
"testing"
"time"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"go.woodpecker-ci.org/woodpecker/v3/agent"
agent_rpc "go.woodpecker-ci.org/woodpecker/v3/agent/rpc"
"go.woodpecker-ci.org/woodpecker/v3/pipeline/backend/dummy"
"go.woodpecker-ci.org/woodpecker/v3/rpc"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/version"
)
const (
AgentMaxWorkflows = 4
agentAuthRefreshEvery = 30 * time.Minute
)
// AgentEnv holds the running state of one in-process test agent.
// Use AgentID to assert which agent picked up a workflow.
type AgentEnv struct {
// AgentID is the server-assigned ID after registration.
// Valid only after WaitForAgentRegistered returns.
AgentID int64
// name is used for logging and as the hostname label.
name string
// requestedOrgID is applied to the DB record by WaitForAgentRegistered
// so the server's GetServerLabels returns the right org-id filter.
// model.IDNotSet (-1) means global (default).
requestOrgID int64
}
// AgentOption configures an agent before it registers with the server.
type AgentOption func(*agentConfig)
type agentConfig struct {
// hostname is sent to the server as the agent's hostname metadata and label.
hostname string
// customLabels are merged into the agent's filter labels.
// They are matched against task Labels set in pipeline YAML (labels: key: value).
customLabels map[string]string
// orgID pins the agent to a specific organization (-1 = global).
// Org agents score higher than global agents for tasks in the same org,
// so they are always preferred by the queue when available.
orgID int64
}
// WithHostname sets the agent's hostname label (default: "test-agent").
func WithHostname(name string) AgentOption {
return func(c *agentConfig) { c.hostname = name }
}
// WithCustomLabels merges extra labels into the agent's filter set.
// Use this to test label-based task routing, e.g.:
//
// setup.StartAgent(ctx, t, addr, setup.WithCustomLabels(map[string]string{"gpu": "true"}))
//
// The pipeline YAML must set a matching label:
//
// labels:
// gpu: "true"
func WithCustomLabels(labels map[string]string) AgentOption {
return func(c *agentConfig) {
for k, v := range labels {
c.customLabels[k] = v
}
}
}
// WithOrgID restricts the agent to a specific organization. Org agents score
// 10× higher than global agents (score 1) for tasks from the same org, so the
// queue always prefers them when both are available. Pass model.IDNotSet (-1)
// for a global agent (the default).
func WithOrgID(id int64) AgentOption {
return func(c *agentConfig) { c.orgID = id }
}
// StartAgent connects an in-process agent using the dummy backend to the gRPC
// server at grpcAddr and returns an *AgentEnv whose AgentID is populated once
// the agent has registered. Pass AgentOption values to configure labels, hostname,
// or org-scoping; multiple agents can be started in the same test.
func StartAgent(ctx context.Context, t *testing.T, grpcAddr string, opts ...AgentOption) *AgentEnv { //nolint:contextcheck
t.Helper()
cfg := &agentConfig{
hostname: "test-agent",
customLabels: make(map[string]string),
orgID: model.IDNotSet, // global by default
}
for _, o := range opts {
o(cfg)
}
env := &AgentEnv{name: cfg.hostname}
transport := grpc.WithTransportCredentials(insecure.NewCredentials())
keepaliveOpts := grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: defaultTimeout,
Timeout: shortTimeout,
})
authCtx, authCancel := context.WithCancelCause(context.Background())
t.Cleanup(func() { authCancel(nil) })
authConn, err := grpc.NewClient(grpcAddr, transport, keepaliveOpts)
if err != nil {
t.Fatalf("StartAgent(%s): create auth gRPC connection: %v", cfg.hostname, err)
}
t.Cleanup(func() { authConn.Close() })
authClient := agent_rpc.NewAuthGrpcClient(authConn, TestAgentToken, -1)
authInterceptor, err := agent_rpc.NewAuthInterceptor(authCtx, authClient, agentAuthRefreshEvery) //nolint:contextcheck
if err != nil {
t.Fatalf("StartAgent(%s): authenticate with server: %v", cfg.hostname, err)
}
conn, err := grpc.NewClient(
grpcAddr,
transport,
keepaliveOpts,
grpc.WithUnaryInterceptor(authInterceptor.Unary()),
grpc.WithStreamInterceptor(authInterceptor.Stream()),
)
if err != nil {
t.Fatalf("StartAgent(%s): create main gRPC connection: %v", cfg.hostname, err)
}
t.Cleanup(func() { conn.Close() })
client := agent_rpc.NewGrpcClient(ctx, conn)
grpcCtx := metadata.NewOutgoingContext(authCtx, metadata.Pairs("hostname", cfg.hostname))
backend := dummy.New()
if !backend.IsAvailable(ctx) {
t.Fatalf("StartAgent(%s): dummy backend is not available", cfg.hostname)
}
engInfo, err := backend.Load(ctx)
if err != nil {
t.Fatalf("StartAgent(%s): load dummy backend: %v", cfg.hostname, err)
}
env.AgentID, err = client.RegisterAgent(grpcCtx, rpc.AgentInfo{ //nolint:contextcheck
Version: version.String(),
Backend: backend.Name(),
Platform: engInfo.Platform,
Capacity: AgentMaxWorkflows,
CustomLabels: cfg.customLabels,
})
require.NoErrorf(t, err, "StartAgent(%s): register with server: %v", cfg.hostname, err)
// If a non-global org is requested, update the agent's OrgID in the DB so
// the server's GetServerLabels returns the right org-id filter (score 10).
if cfg.orgID != model.IDNotSet {
// The server stores agents; we patch via the store after registration.
// This is done in WaitForAgentRegistered which the caller must invoke.
// We stash the requested orgID so the wait helper can apply it.
env.requestOrgID = cfg.orgID
}
t.Cleanup(func() {
if err := client.UnregisterAgent(grpcCtx); err != nil {
log.Warn().Err(err).Str("hostname", cfg.hostname).Msg("test agent: unregister failed (expected during teardown)")
}
})
// Build the filter labels the agent advertises to the queue.
// org-id is handled server-side via GetServerLabels; we only set
// the labels the agent explicitly provides (platform, backend, repo wildcard,
// and any custom labels).
filter := rpc.Filter{
Labels: map[string]string{
"hostname": cfg.hostname,
"platform": engInfo.Platform,
"backend": backend.Name(),
"repo": "*",
},
}
for k, v := range cfg.customLabels {
filter.Labels[k] = v
}
counter := &agent.State{
Polling: AgentMaxWorkflows,
Metadata: make(map[string]agent.Info),
}
for i := range AgentMaxWorkflows {
go func(slot int) {
runner := agent.NewRunner(client, filter, cfg.hostname, counter, backend)
log.Debug().Int("slot", slot).Str("hostname", cfg.hostname).Msg("test agent: runner started")
for {
if ctx.Err() != nil {
return
}
if err := runner.Run(ctx); err != nil {
if ctx.Err() != nil {
return
}
log.Error().Err(err).Int("slot", slot).Str("hostname", cfg.hostname).Msg("test agent: runner error, retrying")
select {
case <-ctx.Done():
return
case <-time.After(500 * time.Millisecond):
}
}
}
}(i)
}
return env
}

81
e2e/setup/forge.go Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package setup
import (
"net/http"
"testing"
"github.com/stretchr/testify/mock"
forge_mocks "go.woodpecker-ci.org/woodpecker/v3/server/forge/mocks"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
)
// newMockForge builds a MockForge that serves the given files for any
// config-fetch call, no-ops status reporting, and stubs all other methods safely.
//
// Single-workflow (len(files)==1, name ".woodpecker.yaml"): File() returns the
// raw YAML bytes; Dir() is not called but is stubbed for safety.
//
// Multi-workflow (len(files)>1, names ".woodpecker/foo.yaml"): File() returns
// empty (causing the config service to fall through to Dir()); Dir() returns
// all files.
func newMockForge(t *testing.T, files []*forge_types.FileMeta) *forge_mocks.MockForge {
t.Helper()
m := forge_mocks.NewMockForge(t)
// Identity.
m.On("Name").Return("mock").Maybe()
m.On("URL").Return("https://forge.example.test").Maybe()
if len(files) == 1 {
// Single-workflow: config service calls File(".woodpecker.yaml").
m.On("File",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, ".woodpecker.yaml",
).Return(files[0].Data, nil).Maybe()
m.On("Dir",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, ".woodpecker",
).Return(files, nil).Maybe()
} else {
// Multi-workflow: config service calls Dir(".woodpecker").
// File() must return empty so the service falls through to Dir().
m.On("File",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, ".woodpecker.yaml",
).Return([]byte(nil), nil).Maybe()
m.On("Dir",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, ".woodpecker",
).Return(files, nil).Maybe()
}
// Status reporting back to forge — no-op.
m.On("Status",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything,
).Return(nil).Maybe()
// Netrc for clone steps.
m.On("Netrc",
mock.Anything, mock.Anything,
).Return(&model.Netrc{}, nil).Maybe()
return m
}
// compile-time import guard.
var _ *http.Request

209
e2e/setup/server.go Normal file
View File

@@ -0,0 +1,209 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package setup
import (
"context"
"net"
"sync"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
"go.woodpecker-ci.org/woodpecker/v3/rpc/proto"
"go.woodpecker-ci.org/woodpecker/v3/server"
"go.woodpecker-ci.org/woodpecker/v3/server/cache"
"go.woodpecker-ci.org/woodpecker/v3/server/forge"
forge_mocks "go.woodpecker-ci.org/woodpecker/v3/server/forge/mocks"
forge_types "go.woodpecker-ci.org/woodpecker/v3/server/forge/types"
"go.woodpecker-ci.org/woodpecker/v3/server/logging"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/pubsub/memory"
"go.woodpecker-ci.org/woodpecker/v3/server/queue"
server_rpc "go.woodpecker-ci.org/woodpecker/v3/server/rpc"
"go.woodpecker-ci.org/woodpecker/v3/server/scheduler"
"go.woodpecker-ci.org/woodpecker/v3/server/services"
"go.woodpecker-ci.org/woodpecker/v3/server/services/permissions"
"go.woodpecker-ci.org/woodpecker/v3/server/store"
)
const (
// TestAgentToken is the shared secret used between the in-process server
// and agent. Hard-coded for tests — not a real secret.
TestAgentToken = "test-agent-secret-for-integration-tests"
// TestJWTSecret is used for signing gRPC auth JWTs.
TestJWTSecret = "test-jwt-secret-for-integration-tests"
// TestForgeType is the forge type the mock pretends to bee.
TestForgeType = model.ForgeTypeGitea
)
var configLock = sync.Mutex{}
// ServerEnv holds all the pieces of a running test server environment.
type ServerEnv struct {
GRPCAddr string
Store store.Store
Queue queue.Queue
Fixtures *Fixtures
Forge *forge_mocks.MockForge
Manager services.Manager
}
// StartServer wires up the full in-process server stack:
// - in-memory sqlite store (fully migrated) with seeded fixtures
// - in-memory queue, pubsub, and logging
// - MockForge that serves the provided workflow files
// - gRPC server on a random TCP port
//
// files must contain at least one entry. Single-workflow scenarios pass one
// file named ".woodpecker.yaml"; multi-workflow scenarios pass multiple files
// named ".woodpecker/foo.yaml" etc. The repo's Config path is set accordingly.
//
// All resources are cleaned up via t.Cleanup.
func StartServer(ctx context.Context, t *testing.T, files []*forge_types.FileMeta) *ServerEnv {
t.Helper()
configLock.Lock()
defer configLock.Unlock()
memStore := newStore(ctx, t)
fixtures := seedFixtures(t, memStore)
mockForge := newMockForge(t, files)
mgr, err := newTestManager(memStore, mockForge)
require.NoError(t, err, "create services manager")
memQueue, err := queue.New(ctx, queue.Config{Backend: queue.TypeMemory})
require.NoError(t, err, "create queue")
// Save and restore server.Config around the test. server.Config is a
// package-level global read by server/pipeline and server/rpc. Tests run
// sequentially within a package, but we still need to clean up so the next
// subtest starts from a known-zero state rather than the previous test's values.
orig := server.Config
t.Cleanup(func() {
configLock.Lock()
defer configLock.Unlock()
server.Config = orig
})
server.Config.Services.Logs = logging.New()
server.Config.Services.Scheduler = scheduler.NewScheduler(memQueue, memory.New())
server.Config.Services.Membership = cache.NewMembershipService(memStore)
server.Config.Services.Manager = mgr
server.Config.Services.LogStore = memStore
server.Config.Server.AgentToken = TestAgentToken
server.Config.Server.Host = "http://localhost"
server.Config.Server.JWTSecret = TestJWTSecret
server.Config.Pipeline.DefaultClonePlugin = "docker.io/woodpeckerci/plugin-git:latest"
server.Config.Pipeline.TrustedClonePlugins = []string{"docker.io/woodpeckerci/plugin-git:latest"}
server.Config.Pipeline.DefaultApprovalMode = model.RequireApprovalNone
server.Config.Pipeline.DefaultTimeout = 60
server.Config.Pipeline.MaxTimeout = 60
server.Config.Permissions.Open = true
server.Config.Permissions.Admins = permissions.NewAdmins([]string{})
server.Config.Permissions.Orgs = permissions.NewOrgs([]string{})
server.Config.Permissions.OwnersAllowlist = permissions.NewOwnersAllowlist([]string{})
grpcAddr := startGRPCServer(ctx, t, memStore)
return &ServerEnv{
GRPCAddr: grpcAddr,
Store: memStore,
Queue: memQueue,
Fixtures: fixtures,
Forge: mockForge,
Manager: mgr,
}
}
// newTestManager builds a services.Manager whose SetupForge always returns
// the provided MockForge, bypassing real forge instantiation.
func newTestManager(s store.Store, mockForge *forge_mocks.MockForge) (services.Manager, error) {
cmd := &cli.Command{
Flags: []cli.Flag{
// Config fetch tuning.
&cli.DurationFlag{Name: "forge-timeout", Value: defaultTimeout},
&cli.UintFlag{Name: "forge-retry", Value: defaultRetry},
&cli.StringSliceFlag{Name: "environment"},
// Forge flags — gitea=true satisfies setupForgeService's type switch.
&cli.BoolFlag{Name: string(TestForgeType), Value: true},
&cli.StringFlag{Name: "forge-url", Value: "https://forge.example.test"},
},
}
setupForge := services.SetupForge(func(*model.Forge) (forge.Forge, error) {
return mockForge, nil
})
return services.NewManager(cmd, s, setupForge)
}
// startGRPCServer binds to a random TCP port, registers Woodpecker's gRPC
// services, and starts serving. Shutdown happens via t.Cleanup.
func startGRPCServer(ctx context.Context, t *testing.T, s store.Store) string {
t.Helper()
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err, "listen on random port for gRPC")
addr := lis.Addr().String()
jwtManager := server_rpc.NewJWTManager(TestJWTSecret)
authorizer := server_rpc.NewAuthorizer(jwtManager)
grpcServer := grpc.NewServer(
grpc.StreamInterceptor(authorizer.StreamInterceptor),
grpc.UnaryInterceptor(authorizer.UnaryInterceptor),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: shortTimeout,
}),
)
proto.RegisterWoodpeckerServer(grpcServer, server_rpc.NewTestWoodpeckerServer(
server.Config.Services.Scheduler,
server.Config.Services.Logs,
s,
prometheus.NewRegistry(),
))
proto.RegisterWoodpeckerAuthServer(grpcServer, server_rpc.NewWoodpeckerAuthServer(
jwtManager,
TestAgentToken,
s,
))
grpcCtx, grpcCancel := context.WithCancelCause(ctx)
go func() {
<-grpcCtx.Done()
grpcServer.GracefulStop()
}()
go func() {
if err := grpcServer.Serve(lis); err != nil {
grpcCancel(err)
}
}()
t.Cleanup(func() { grpcCancel(nil) })
return addr
}

99
e2e/setup/store.go Normal file
View File

@@ -0,0 +1,99 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package setup
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/store"
"go.woodpecker-ci.org/woodpecker/v3/server/store/datastore"
)
// Fixtures holds the pre-seeded database records shared across all tests.
type Fixtures struct {
Forge *model.Forge
Owner *model.User
Repo *model.Repo
}
// newStore creates a fully-migrated in-memory sqlite store.
func newStore(ctx context.Context, t *testing.T) store.Store {
t.Helper()
s, err := datastore.NewEngine(&store.Opts{
Driver: "sqlite3",
Config: ":memory:",
// MaxOpenConns=1 and MaxIdleConns=1 are required for in-memory sqlite:
// without them the pool drops idle connections, destroying the in-memory
// schema between calls and breaking migrations.
XORM: store.XORM{
MaxOpenConns: 1,
MaxIdleConns: 1,
},
})
require.NoError(t, err, "create in-memory store")
require.NoError(t, s.Ping(), "ping store")
require.NoError(t, s.Migrate(ctx, true), "migrate store")
t.Cleanup(func() { _ = s.Close() })
return s
}
// seedFixtures creates the minimal set of DB records every test needs:
// one Forge, one owner User, one Repo linked to both.
func seedFixtures(t *testing.T, s store.Store) *Fixtures {
t.Helper()
forge := &model.Forge{
Type: TestForgeType,
URL: "https://forge.example.test",
}
require.NoError(t, s.ForgeCreate(forge), "seed forge")
owner := &model.User{
ForgeID: forge.ID,
ForgeRemoteID: "1",
Login: "test-owner",
Email: "owner@example.test",
}
require.NoError(t, s.CreateUser(owner), "seed user")
repo := &model.Repo{
ForgeID: forge.ID,
ForgeRemoteID: "1",
UserID: owner.ID,
FullName: "test-owner/test-repo",
Owner: "test-owner",
Name: "test-repo",
Clone: "https://forge.example.test/test-owner/test-repo.git",
Branch: "main",
IsActive: true,
AllowPull: true,
}
require.NoError(t, s.CreateRepo(repo), "seed repo")
return &Fixtures{
Forge: forge,
Owner: owner,
Repo: repo,
}
}

241
e2e/setup/wait.go Normal file
View File

@@ -0,0 +1,241 @@
// Copyright 2026 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build test
package setup
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.woodpecker-ci.org/woodpecker/v3/server/model"
"go.woodpecker-ci.org/woodpecker/v3/server/queue"
"go.woodpecker-ci.org/woodpecker/v3/server/store"
)
const (
defaultTimeout = 30 * time.Second
defaultRetry = 3
shortTimeout = 10 * time.Second
defaultInterval = 100 * time.Millisecond
)
// isTerminal returns true if the status is a final (non-running) state.
func isTerminal(s model.StatusValue) bool {
switch s {
case model.StatusSuccess, model.StatusFailure, model.StatusKilled,
model.StatusError, model.StatusDeclined, model.StatusCanceled:
return true
}
return false
}
// WaitForPipeline polls the store until the pipeline with the given ID reaches
// a terminal status, then returns it. Fails the test if timeout is exceeded.
func WaitForPipeline(t *testing.T, s store.Store, pipelineID int64) *model.Pipeline {
t.Helper()
return WaitForPipelineStatus(t, s, pipelineID, "", defaultTimeout)
}
// WaitForPipelineStatus polls until the pipeline reaches wantStatus (or any
// terminal status if wantStatus is empty). Fails the test on timeout.
func WaitForPipelineStatus(t *testing.T, s store.Store, pipelineID int64, wantStatus model.StatusValue, timeout time.Duration) *model.Pipeline {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
p, err := s.GetPipeline(pipelineID)
require.NoError(t, err, "get pipeline %d", pipelineID)
if wantStatus != "" {
if p.Status == wantStatus {
return p
}
} else if isTerminal(p.Status) {
return p
}
time.Sleep(defaultInterval)
}
p, _ := s.GetPipeline(pipelineID)
t.Fatalf("timeout waiting for pipeline %d: last status=%q (want %q)", pipelineID, p.Status, wantStatus)
return nil
}
// WaitForAgentRegistered polls until all provided agents appear in the store
// (by AgentID), then applies any deferred DB patches (e.g. OrgID).
// Pass every *AgentEnv returned by StartAgent before triggering pipelines.
func WaitForAgentRegistered(t *testing.T, s store.Store, agents ...*AgentEnv) {
t.Helper()
deadline := time.Now().Add(shortTimeout)
for time.Now().Before(deadline) {
allFound := true
for _, env := range agents {
if env.AgentID == 0 {
allFound = false
break
}
if _, err := s.AgentFind(env.AgentID); err != nil {
allFound = false
break
}
}
if allFound {
// Apply any deferred OrgID patches.
for _, env := range agents {
if env.requestOrgID == model.IDNotSet {
continue
}
agent, err := s.AgentFind(env.AgentID)
require.NoError(t, err, "find agent %d to patch OrgID", env.AgentID)
agent.OrgID = env.requestOrgID
require.NoError(t, s.AgentUpdate(agent),
"patch OrgID on agent %d", env.AgentID)
}
return
}
time.Sleep(defaultInterval)
}
t.Fatal("timeout: not all agents registered with the server")
}
// WaitForStep polls the store until a named step in the given pipeline reaches
// a terminal status. It returns the final step state. Fails the test on timeout.
func WaitForStep(t *testing.T, s store.Store, pipeline *model.Pipeline, stepName string) *model.Step {
t.Helper()
return WaitForStepStatus(t, s, pipeline, stepName, "", defaultTimeout)
}
// WaitForStepStatus polls until a named step reaches wantState (or any terminal
// state when wantState is empty). This is useful after a pipeline.Cancel() call
// where the agent sends its final step status asynchronously via gRPC Done(),
// independently of the pipeline itself reaching a terminal status.
func WaitForStepStatus(t *testing.T, s store.Store, pipeline *model.Pipeline, stepName string, wantState model.StatusValue, timeout time.Duration) *model.Step {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
steps, err := s.StepList(pipeline)
require.NoError(t, err, "list steps for pipeline %d", pipeline.ID)
for _, step := range steps {
if step.Name != stepName {
continue
}
if wantState != "" {
if step.State == wantState {
return step
}
} else if isTerminal(step.State) {
return step
}
}
time.Sleep(defaultInterval)
}
steps, _ := s.StepList(pipeline)
var lastState model.StatusValue
for _, step := range steps {
if step.Name == stepName {
lastState = step.State
break
}
}
if wantState != "" {
t.Fatalf("timeout waiting for step %q in pipeline %d to reach state %q: last state=%q",
stepName, pipeline.ID, wantState, lastState)
} else {
t.Fatalf("timeout waiting for step %q in pipeline %d to reach terminal state: last state=%q",
stepName, pipeline.ID, lastState)
}
return nil
}
// AssertWorkflowRanOnAgent asserts that the named workflow in the finished
// pipeline was executed by the given agent. Use this to verify label-based
// routing and org-agent preference.
func AssertWorkflowRanOnAgent(t *testing.T, s store.Store, pipeline *model.Pipeline, workflowName string, agent *AgentEnv) {
t.Helper()
workflows, err := s.WorkflowGetTree(pipeline)
require.NoError(t, err, "get workflow tree for pipeline %d", pipeline.ID)
for _, wf := range workflows {
if wf.Name == workflowName {
assert.Equalf(t, agent.AgentID, wf.AgentID,
"workflow %q should have run on agent %d (%s) but ran on agent %d",
workflowName, agent.AgentID, agent.name, wf.AgentID)
return
}
}
t.Errorf("workflow %q not found in pipeline %d", workflowName, pipeline.ID)
}
// WaitForWorkersReady polls the queue until at least minWorkers worker slots
// are active (i.e. agents have connected and are blocking on Poll). Call this
// after WaitForAgentRegistered and before pipeline.Create in tests that rely
// on specific routing: the org-id label is read from the DB at Poll time, so
// the org-agent must have started its poll loop *after* its OrgID has been
// patched — otherwise the global agent can win the race and steal the task
// before the org-agent advertises its exact org-id label.
func WaitForWorkersReady(t *testing.T, q queue.Queue, minWorkers int) {
t.Helper()
deadline := time.Now().Add(shortTimeout)
for time.Now().Before(deadline) {
info := q.Info(context.Background())
if info.Stats.Workers >= minWorkers {
return
}
time.Sleep(defaultInterval)
}
info := q.Info(context.Background())
t.Fatalf("timeout waiting for %d workers to be ready in queue: got %d", minWorkers, info.Stats.Workers)
}
// WaitForStepRunning polls the store until a named step in the pipeline with
// the given ID reaches StatusRunning. This is used before triggering a cancel
// so we know the dummy backend's sleepWithContext is genuinely blocking — if
// we cancel before the step is running, the step may finish with StatusSuccess
// before the cancel context propagates to WaitStep.
func WaitForStepRunning(t *testing.T, s store.Store, pipelineID int64, stepName string) {
t.Helper()
deadline := time.Now().Add(shortTimeout)
for time.Now().Before(deadline) {
p, err := s.GetPipeline(pipelineID)
require.NoError(t, err, "get pipeline %d", pipelineID)
steps, err := s.StepList(p)
require.NoError(t, err, "list steps for pipeline %d", pipelineID)
for _, step := range steps {
if step.Name == stepName && step.State == model.StatusRunning {
return
}
}
time.Sleep(defaultInterval)
}
t.Fatalf("timeout waiting for step %q in pipeline %d to reach StatusRunning", stepName, pipelineID)
}

View File

@@ -59,10 +59,12 @@ func (r *Runtime) Run(runnerCtx context.Context) error {
}
for _, stage := range r.spec.Stages {
stageChan := r.runStage(runnerCtx, stage.Steps)
select {
case <-r.ctx.Done():
<-stageChan
return pipeline_errors.ErrCancel
case err := <-r.runStage(runnerCtx, stage.Steps):
case err := <-stageChan:
if err != nil {
r.err.Set(err)
}

View File

@@ -57,6 +57,32 @@ func NewWoodpeckerServer(scheduler scheduler.Scheduler, logger logging.Log, stor
return &WoodpeckerServer{peer: peer}
}
// NewTestWoodpeckerServer creates a WoodpeckerServer for e2e tests.
// It is using a caller-supplied prometheus registry.
// Use this in tests to avoid "duplicate metrics collector registration" panics when the server is created multiple times.
// (promauto in NewWoodpeckerServer registers into the global default registry, which panics on duplicate names).
func NewTestWoodpeckerServer(scheduler scheduler.Scheduler, logger logging.Log, store store.Store, registry *prometheus.Registry) proto.WoodpeckerServer {
factory := promauto.With(registry)
pipelineTime := factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "woodpecker",
Name: "pipeline_time",
Help: "Pipeline time.",
}, []string{"repo", "branch", "status", "pipeline"})
pipelineCount := factory.NewCounterVec(prometheus.CounterOpts{
Namespace: "woodpecker",
Name: "pipeline_count",
Help: "Pipeline count.",
}, []string{"repo", "branch", "status", "pipeline"})
peer := RPC{
store: store,
scheduler: scheduler,
logger: logger,
pipelineTime: pipelineTime,
pipelineCount: pipelineCount,
}
return &WoodpeckerServer{peer: peer}
}
// Version returns the server- & grpc-version.
func (s *WoodpeckerServer) Version(_ context.Context, _ *proto.Empty) (*proto.VersionResponse, error) {
return &proto.VersionResponse{