Compare commits
29 Commits
nektos/v0.
...
v0.243.0
Author | SHA1 | Date | |
---|---|---|---|
|
1dda0aec69 | ||
|
49e204166d | ||
|
a36b003f7a | ||
|
0671d16694 | ||
|
881dbdb81b | ||
|
1252e551b8 | ||
|
c614d8b96c | ||
|
84b6649b8b | ||
|
dca7801682 | ||
|
4b99ed8916 | ||
|
e46ede1b17 | ||
|
1ba076d321 | ||
|
0efa2d5e63 | ||
|
0a37a03f2e | ||
|
88cce47022 | ||
|
7920109e89 | ||
|
4cacc14d22 | ||
|
c6b8548d35 | ||
|
64cae197a4 | ||
|
7fb84a54a8 | ||
|
70cc6c017b | ||
|
d7e9ea75fc | ||
|
b9c20dcaa4 | ||
|
97629ae8af | ||
|
b9a9812ad9 | ||
|
113c3e98fb | ||
|
7815eec33b | ||
|
c051090583 | ||
|
0fa1fe0310 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -31,3 +31,4 @@ coverage.txt
|
||||
|
||||
# megalinter
|
||||
report/
|
||||
act
|
||||
|
1
LICENSE
1
LICENSE
@@ -1,5 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 The Gitea Authors
|
||||
Copyright (c) 2019
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
|
25
README.md
25
README.md
@@ -1,3 +1,28 @@
|
||||
## Forking rules
|
||||
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
|
||||
|
||||
It cannot be used as command line tool anymore, but only as a library.
|
||||
|
||||
It's a soft fork, which means that it will tracking the latest release of nektos/act.
|
||||
|
||||
Branches:
|
||||
|
||||
- `main`: default branch, contains custom changes, based on the latest release(not the latest of the master branch) of nektos/act.
|
||||
- `nektos/master`: mirror for the master branch of nektos/act.
|
||||
|
||||
Tags:
|
||||
|
||||
- `nektos/vX.Y.Z`: mirror for `vX.Y.Z` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- Examples:
|
||||
- `nektos/v0.2.23` -> `v0.223.*`
|
||||
- `nektos/v0.3.1` -> `v0.301.*`, not ~~`v0.31.*`~~
|
||||
- `nektos/v0.10.1` -> `v0.1001.*`, not ~~`v0.101.*`~~
|
||||
- `nektos/v0.3.100` -> not ~~`v0.3100.*`~~, I don't think it's really going to happen, if it does, we can find a way to handle it.
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
# Overview [](https://github.com/nektos/act/actions) [](https://gitter.im/nektos/act?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [](https://goreportcard.com/report/github.com/nektos/act) [](https://github.com/jonico/awesome-runners)
|
||||
|
@@ -25,3 +25,24 @@ func Logger(ctx context.Context) logrus.FieldLogger {
|
||||
func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context {
|
||||
return context.WithValue(ctx, loggerContextKeyVal, logger)
|
||||
}
|
||||
|
||||
type loggerHookKey string
|
||||
|
||||
const loggerHookKeyVal = loggerHookKey("logrus.Hook")
|
||||
|
||||
// LoggerHook returns the appropriate logger hook for current context
|
||||
// the hook affects job logger, not global logger
|
||||
func LoggerHook(ctx context.Context) logrus.Hook {
|
||||
val := ctx.Value(loggerHookKeyVal)
|
||||
if val != nil {
|
||||
if hook, ok := val.(logrus.Hook); ok {
|
||||
return hook
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLoggerHook adds a value to the context for the logger hook
|
||||
func WithLoggerHook(ctx context.Context, hook logrus.Hook) context.Context {
|
||||
return context.WithValue(ctx, loggerHookKeyVal, hook)
|
||||
}
|
||||
|
@@ -26,6 +26,9 @@ type NewContainerInput struct {
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
|
||||
// Gitea specific
|
||||
AutoRemove bool
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
|
@@ -434,6 +434,7 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
AutoRemove: input.AutoRemove,
|
||||
}
|
||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
|
185
pkg/jobparser/evaluator.go
Normal file
185
pkg/jobparser/evaluator.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ExpressionEvaluator is copied from runner.expressionEvaluator,
|
||||
// to avoid unnecessary dependencies
|
||||
type ExpressionEvaluator struct {
|
||||
interpreter exprparser.Interpreter
|
||||
}
|
||||
|
||||
func NewExpressionEvaluator(interpreter exprparser.Interpreter) *ExpressionEvaluator {
|
||||
return &ExpressionEvaluator{interpreter: interpreter}
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluate(in string, defaultStatusCheck exprparser.DefaultStatusCheck) (interface{}, error) {
|
||||
evaluated, err := ee.interpreter.Evaluate(in, defaultStatusCheck)
|
||||
|
||||
return evaluated, err
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateScalarYamlNode(node *yaml.Node) error {
|
||||
var in string
|
||||
if err := node.Decode(&in); err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return nil
|
||||
}
|
||||
expr, _ := rewriteSubExpression(in, false)
|
||||
res, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return node.Encode(res)
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateMappingYamlNode(node *yaml.Node) error {
|
||||
// GitHub has this undocumented feature to merge maps, called insert directive
|
||||
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
|
||||
for i := 0; i < len(node.Content)/2; {
|
||||
k := node.Content[i*2]
|
||||
v := node.Content[i*2+1]
|
||||
if err := ee.EvaluateYamlNode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
var sk string
|
||||
// Merge the nested map of the insert directive
|
||||
if k.Decode(&sk) == nil && insertDirective.MatchString(sk) {
|
||||
node.Content = append(append(node.Content[:i*2], v.Content...), node.Content[(i+1)*2:]...)
|
||||
i += len(v.Content) / 2
|
||||
} else {
|
||||
if err := ee.EvaluateYamlNode(k); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateSequenceYamlNode(node *yaml.Node) error {
|
||||
for i := 0; i < len(node.Content); {
|
||||
v := node.Content[i]
|
||||
// Preserve nested sequences
|
||||
wasseq := v.Kind == yaml.SequenceNode
|
||||
if err := ee.EvaluateYamlNode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// GitHub has this undocumented feature to merge sequences / arrays
|
||||
// We have a nested sequence via evaluation, merge the arrays
|
||||
if v.Kind == yaml.SequenceNode && !wasseq {
|
||||
node.Content = append(append(node.Content[:i], v.Content...), node.Content[i+1:]...)
|
||||
i += len(v.Content)
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) EvaluateYamlNode(node *yaml.Node) error {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
return ee.evaluateScalarYamlNode(node)
|
||||
case yaml.MappingNode:
|
||||
return ee.evaluateMappingYamlNode(node)
|
||||
case yaml.SequenceNode:
|
||||
return ee.evaluateSequenceYamlNode(node)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) Interpolate(in string) string {
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return in
|
||||
}
|
||||
|
||||
expr, _ := rewriteSubExpression(in, true)
|
||||
evaluated, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
value, ok := evaluated.(string)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Expression %s did not evaluate to a string", expr))
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func escapeFormatString(in string) string {
|
||||
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
|
||||
}
|
||||
|
||||
func rewriteSubExpression(in string, forceFormat bool) (string, error) {
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
strPattern := regexp.MustCompile("(?:''|[^'])*'")
|
||||
pos := 0
|
||||
exprStart := -1
|
||||
strStart := -1
|
||||
var results []string
|
||||
formatOut := ""
|
||||
for pos < len(in) {
|
||||
if strStart > -1 {
|
||||
matches := strPattern.FindStringIndex(in[pos:])
|
||||
if matches == nil {
|
||||
panic("unclosed string.")
|
||||
}
|
||||
|
||||
strStart = -1
|
||||
pos += matches[1]
|
||||
} else if exprStart > -1 {
|
||||
exprEnd := strings.Index(in[pos:], "}}")
|
||||
strStart = strings.Index(in[pos:], "'")
|
||||
|
||||
if exprEnd > -1 && strStart > -1 {
|
||||
if exprEnd < strStart {
|
||||
strStart = -1
|
||||
} else {
|
||||
exprEnd = -1
|
||||
}
|
||||
}
|
||||
|
||||
if exprEnd > -1 {
|
||||
formatOut += fmt.Sprintf("{%d}", len(results))
|
||||
results = append(results, strings.TrimSpace(in[exprStart:pos+exprEnd]))
|
||||
pos += exprEnd + 2
|
||||
exprStart = -1
|
||||
} else if strStart > -1 {
|
||||
pos += strStart + 1
|
||||
} else {
|
||||
panic("unclosed expression.")
|
||||
}
|
||||
} else {
|
||||
exprStart = strings.Index(in[pos:], "${{")
|
||||
if exprStart != -1 {
|
||||
formatOut += escapeFormatString(in[pos : pos+exprStart])
|
||||
exprStart = pos + exprStart + 3
|
||||
pos = exprStart
|
||||
} else {
|
||||
formatOut += escapeFormatString(in[pos:])
|
||||
pos = len(in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(results) == 1 && formatOut == "{0}" && !forceFormat {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
out := fmt.Sprintf("format('%s', %s)", strings.ReplaceAll(formatOut, "'", "''"), strings.Join(results, ", "))
|
||||
return out, nil
|
||||
}
|
80
pkg/jobparser/interpeter.go
Normal file
80
pkg/jobparser/interpeter.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// NewInterpeter returns an interpeter used in the server,
|
||||
// need github, needs, strategy, matrix, inputs context only,
|
||||
// see https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability
|
||||
func NewInterpeter(
|
||||
jobID string,
|
||||
job *model.Job,
|
||||
matrix map[string]interface{},
|
||||
gitCtx *model.GithubContext,
|
||||
results map[string]*JobResult,
|
||||
) exprparser.Interpreter {
|
||||
strategy := make(map[string]interface{})
|
||||
if job.Strategy != nil {
|
||||
strategy["fail-fast"] = job.Strategy.FailFast
|
||||
strategy["max-parallel"] = job.Strategy.MaxParallel
|
||||
}
|
||||
|
||||
run := &model.Run{
|
||||
Workflow: &model.Workflow{
|
||||
Jobs: map[string]*model.Job{},
|
||||
},
|
||||
JobID: jobID,
|
||||
}
|
||||
for id, result := range results {
|
||||
need := yaml.Node{}
|
||||
_ = need.Encode(result.Needs)
|
||||
run.Workflow.Jobs[id] = &model.Job{
|
||||
RawNeeds: need,
|
||||
Result: result.Result,
|
||||
Outputs: result.Outputs,
|
||||
}
|
||||
}
|
||||
|
||||
jobs := run.Workflow.Jobs
|
||||
jobNeeds := run.Job().Needs()
|
||||
|
||||
using := map[string]map[string]map[string]string{}
|
||||
for _, need := range jobNeeds {
|
||||
if v, ok := jobs[need]; ok {
|
||||
using[need] = map[string]map[string]string{
|
||||
"outputs": v.Outputs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ee := &exprparser.EvaluationEnvironment{
|
||||
Github: gitCtx,
|
||||
Env: nil, // no need
|
||||
Job: nil, // no need
|
||||
Steps: nil, // no need
|
||||
Runner: nil, // no need
|
||||
Secrets: nil, // no need
|
||||
Strategy: strategy,
|
||||
Matrix: matrix,
|
||||
Needs: using,
|
||||
Inputs: nil, // not supported yet
|
||||
}
|
||||
|
||||
config := exprparser.Config{
|
||||
Run: run,
|
||||
WorkingDir: "", // WorkingDir is used for the function hashFiles, but it's not needed in the server
|
||||
Context: "job",
|
||||
}
|
||||
|
||||
return exprparser.NewInterpeter(ee, config)
|
||||
}
|
||||
|
||||
// JobResult is the minimum requirement of job results for Interpeter
|
||||
type JobResult struct {
|
||||
Needs []string
|
||||
Result string
|
||||
Outputs map[string]string
|
||||
}
|
153
pkg/jobparser/jobparser.go
Normal file
153
pkg/jobparser/jobparser.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
)
|
||||
|
||||
func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
origin, err := model.ReadWorkflow(bytes.NewReader(content))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("model.ReadWorkflow: %w", err)
|
||||
}
|
||||
|
||||
workflow := &SingleWorkflow{}
|
||||
if err := yaml.Unmarshal(content, workflow); err != nil {
|
||||
return nil, fmt.Errorf("yaml.Unmarshal: %w", err)
|
||||
}
|
||||
|
||||
pc := &parseContext{}
|
||||
for _, o := range options {
|
||||
o(pc)
|
||||
}
|
||||
results := map[string]*JobResult{}
|
||||
for id, job := range origin.Jobs {
|
||||
results[id] = &JobResult{
|
||||
Needs: job.Needs(),
|
||||
Result: pc.jobResults[id],
|
||||
Outputs: nil, // not supported yet
|
||||
}
|
||||
}
|
||||
|
||||
var ret []*SingleWorkflow
|
||||
for id, job := range workflow.Jobs {
|
||||
for _, matrix := range getMatrixes(origin.GetJob(id)) {
|
||||
job := job.Clone()
|
||||
if job.Name == "" {
|
||||
job.Name = id
|
||||
}
|
||||
job.Name = nameWithMatrix(job.Name, matrix)
|
||||
job.Strategy.RawMatrix = encodeMatrix(matrix)
|
||||
evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results))
|
||||
runsOn := origin.GetJob(id).RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = evaluator.Interpolate(v)
|
||||
}
|
||||
job.RawRunsOn = encodeRunsOn(runsOn)
|
||||
job.EraseNeeds() // there will be only one job in SingleWorkflow, it cannot have needs
|
||||
ret = append(ret, &SingleWorkflow{
|
||||
Name: workflow.Name,
|
||||
RawOn: workflow.RawOn,
|
||||
Env: workflow.Env,
|
||||
Jobs: map[string]*Job{id: job},
|
||||
Defaults: workflow.Defaults,
|
||||
})
|
||||
}
|
||||
}
|
||||
sortWorkflows(ret)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func WithJobResults(results map[string]string) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.jobResults = results
|
||||
}
|
||||
}
|
||||
|
||||
func WithGitContext(context *model.GithubContext) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.gitContext = context
|
||||
}
|
||||
}
|
||||
|
||||
type parseContext struct {
|
||||
jobResults map[string]string
|
||||
gitContext *model.GithubContext
|
||||
}
|
||||
|
||||
type ParseOption func(c *parseContext)
|
||||
|
||||
func getMatrixes(job *model.Job) []map[string]interface{} {
|
||||
ret := job.GetMatrixes()
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return matrixName(ret[i]) < matrixName(ret[j])
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func encodeMatrix(matrix map[string]interface{}) yaml.Node {
|
||||
if len(matrix) == 0 {
|
||||
return yaml.Node{}
|
||||
}
|
||||
value := map[string][]interface{}{}
|
||||
for k, v := range matrix {
|
||||
value[k] = []interface{}{v}
|
||||
}
|
||||
node := yaml.Node{}
|
||||
_ = node.Encode(value)
|
||||
return node
|
||||
}
|
||||
|
||||
func encodeRunsOn(runsOn []string) yaml.Node {
|
||||
node := yaml.Node{}
|
||||
if len(runsOn) == 1 {
|
||||
_ = node.Encode(runsOn[0])
|
||||
} else {
|
||||
_ = node.Encode(runsOn)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
func nameWithMatrix(name string, m map[string]interface{}) string {
|
||||
if len(m) == 0 {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + " " + matrixName(m)
|
||||
}
|
||||
|
||||
func matrixName(m map[string]interface{}) string {
|
||||
ks := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
vs := make([]string, 0, len(m))
|
||||
for _, v := range ks {
|
||||
vs = append(vs, fmt.Sprint(m[v]))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%s)", strings.Join(vs, ", "))
|
||||
}
|
||||
|
||||
func sortWorkflows(wfs []*SingleWorkflow) {
|
||||
sort.Slice(wfs, func(i, j int) bool {
|
||||
ki := ""
|
||||
for k := range wfs[i].Jobs {
|
||||
ki = k
|
||||
break
|
||||
}
|
||||
kj := ""
|
||||
for k := range wfs[j].Jobs {
|
||||
kj = k
|
||||
break
|
||||
}
|
||||
return ki < kj
|
||||
})
|
||||
}
|
65
pkg/jobparser/jobparser_test.go
Normal file
65
pkg/jobparser/jobparser_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var f embed.FS
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options []ParseOption
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "multiple_jobs",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple_matrix",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_needs",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
content, err := f.ReadFile(filepath.Join("testdata", tt.name+".in.yaml"))
|
||||
require.NoError(t, err)
|
||||
want, err := f.ReadFile(filepath.Join("testdata", tt.name+".out.yaml"))
|
||||
require.NoError(t, err)
|
||||
got, err := Parse(content, tt.options...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := &strings.Builder{}
|
||||
for _, v := range got {
|
||||
if builder.Len() > 0 {
|
||||
builder.WriteString("---\n")
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
_ = encoder.Encode(v)
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
}
|
||||
}
|
207
pkg/jobparser/model.go
Normal file
207
pkg/jobparser/model.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// SingleWorkflow is a workflow with single job and single matrix
|
||||
type SingleWorkflow struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawOn yaml.Node `yaml:"on,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
Jobs map[string]*Job `yaml:"jobs,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Job() (string, *Job) {
|
||||
for k, v := range w.Jobs {
|
||||
return k, v
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Marshal() ([]byte, error) {
|
||||
return yaml.Marshal(w)
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawNeeds yaml.Node `yaml:"needs,omitempty"`
|
||||
RawRunsOn yaml.Node `yaml:"runs-on,omitempty"`
|
||||
Env yaml.Node `yaml:"env,omitempty"`
|
||||
If yaml.Node `yaml:"if,omitempty"`
|
||||
Steps []*Step `yaml:"steps,omitempty"`
|
||||
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
|
||||
Services map[string]*ContainerSpec `yaml:"services,omitempty"`
|
||||
Strategy Strategy `yaml:"strategy,omitempty"`
|
||||
RawContainer yaml.Node `yaml:"container,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
Outputs map[string]string `yaml:"outputs,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
}
|
||||
|
||||
func (j *Job) Clone() *Job {
|
||||
if j == nil {
|
||||
return nil
|
||||
}
|
||||
return &Job{
|
||||
Name: j.Name,
|
||||
RawNeeds: j.RawNeeds,
|
||||
RawRunsOn: j.RawRunsOn,
|
||||
Env: j.Env,
|
||||
If: j.If,
|
||||
Steps: j.Steps,
|
||||
TimeoutMinutes: j.TimeoutMinutes,
|
||||
Services: j.Services,
|
||||
Strategy: j.Strategy,
|
||||
RawContainer: j.RawContainer,
|
||||
Defaults: j.Defaults,
|
||||
Outputs: j.Outputs,
|
||||
Uses: j.Uses,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Job) Needs() []string {
|
||||
return (&model.Job{RawNeeds: j.RawNeeds}).Needs()
|
||||
}
|
||||
|
||||
func (j *Job) EraseNeeds() {
|
||||
j.RawNeeds = yaml.Node{}
|
||||
}
|
||||
|
||||
func (j *Job) RunsOn() []string {
|
||||
return (&model.Job{RawRunsOn: j.RawRunsOn}).RunsOn()
|
||||
}
|
||||
|
||||
type Step struct {
|
||||
ID string `yaml:"id,omitempty"`
|
||||
If yaml.Node `yaml:"if,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
Run string `yaml:"run,omitempty"`
|
||||
WorkingDirectory string `yaml:"working-directory,omitempty"`
|
||||
Shell string `yaml:"shell,omitempty"`
|
||||
Env yaml.Node `yaml:"env,omitempty"`
|
||||
With map[string]string `yaml:"with,omitempty"`
|
||||
ContinueOnError bool `yaml:"continue-on-error,omitempty"`
|
||||
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
|
||||
}
|
||||
|
||||
// String gets the name of step
|
||||
func (s *Step) String() string {
|
||||
return (&model.Step{
|
||||
ID: s.ID,
|
||||
Name: s.Name,
|
||||
Uses: s.Uses,
|
||||
Run: s.Run,
|
||||
}).String()
|
||||
}
|
||||
|
||||
type ContainerSpec struct {
|
||||
Image string `yaml:"image,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
Ports []string `yaml:"ports,omitempty"`
|
||||
Volumes []string `yaml:"volumes,omitempty"`
|
||||
Options string `yaml:"options,omitempty"`
|
||||
Credentials map[string]string `yaml:"credentials,omitempty"`
|
||||
}
|
||||
|
||||
type Strategy struct {
|
||||
FailFastString string `yaml:"fail-fast,omitempty"`
|
||||
MaxParallelString string `yaml:"max-parallel,omitempty"`
|
||||
RawMatrix yaml.Node `yaml:"matrix,omitempty"`
|
||||
}
|
||||
|
||||
type Defaults struct {
|
||||
Run RunDefaults `yaml:"run,omitempty"`
|
||||
}
|
||||
|
||||
type RunDefaults struct {
|
||||
Shell string `yaml:"shell,omitempty"`
|
||||
WorkingDirectory string `yaml:"working-directory,omitempty"`
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Name string
|
||||
Acts map[string][]string
|
||||
}
|
||||
|
||||
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
switch rawOn.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*Event{
|
||||
{Name: val},
|
||||
}, nil
|
||||
case yaml.SequenceNode:
|
||||
var val []interface{}
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for _, v := range val {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{Name: t})
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type %T", t)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
case yaml.MappingNode:
|
||||
var val map[string]interface{}
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for k, v := range val {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
})
|
||||
case []string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
})
|
||||
case map[string]interface{}:
|
||||
acts := make(map[string][]string, len(t))
|
||||
for act, branches := range t {
|
||||
switch b := branches.(type) {
|
||||
case string:
|
||||
acts[act] = []string{b}
|
||||
case []string:
|
||||
acts[act] = b
|
||||
case []interface{}:
|
||||
acts[act] = make([]string, len(b))
|
||||
for i, v := range b {
|
||||
acts[act][i] = v.(string)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: acts,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
||||
}
|
||||
}
|
184
pkg/jobparser/model_test.go
Normal file
184
pkg/jobparser/model_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseRawOn(t *testing.T) {
|
||||
kases := []struct {
|
||||
input string
|
||||
result []*Event
|
||||
}{
|
||||
{
|
||||
input: "on: issue_comment",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "issue_comment",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
input: "on:\n - push\n - pull_request",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
"branches": {
|
||||
"master",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "branch_protection_rule",
|
||||
Acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "project",
|
||||
Acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "milestone",
|
||||
Acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
"branches": {
|
||||
"releases/**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
"branches": {
|
||||
"**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - 'main'\n - 'releases/**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
"releases/**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n tags:\n - v1.**",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
"tags": {
|
||||
"v1.**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on: [pull_request, workflow_dispatch]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
},
|
||||
{
|
||||
Name: "workflow_dispatch",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
origin, err := model.ReadWorkflow(strings.NewReader(kase.input))
|
||||
assert.NoError(t, err)
|
||||
|
||||
events, err := ParseRawOn(&origin.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
||||
})
|
||||
}
|
||||
}
|
16
pkg/jobparser/testdata/has_needs.in.yaml
vendored
Normal file
16
pkg/jobparser/testdata/has_needs.in.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: job1
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: [job1, job2]
|
23
pkg/jobparser/testdata/has_needs.out.yaml
vendored
Normal file
23
pkg/jobparser/testdata/has_needs.out.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
14
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
Normal file
14
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
23
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
Normal file
23
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
13
pkg/jobparser/testdata/multiple_matrix.in.yaml
vendored
Normal file
13
pkg/jobparser/testdata/multiple_matrix.in.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, ubuntu-20.04]
|
||||
version: [1.17, 1.18, 1.19]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
101
pkg/jobparser/testdata/multiple_matrix.out.yaml
vendored
Normal file
101
pkg/jobparser/testdata/multiple_matrix.out.yaml
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.17)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.18)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.19)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.19
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.17)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.18)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.19)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.19
|
@@ -20,7 +20,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
// Force input to lowercase for case insensitive comparison
|
||||
format := ActionRunsUsing(strings.ToLower(using))
|
||||
switch format {
|
||||
case ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite:
|
||||
case ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo:
|
||||
*a = format
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{
|
||||
@@ -28,6 +28,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
ActionRunsUsingDocker,
|
||||
ActionRunsUsingNode12,
|
||||
ActionRunsUsingNode16,
|
||||
ActionRunsUsingGo,
|
||||
}, format))
|
||||
}
|
||||
return nil
|
||||
@@ -42,6 +43,8 @@ const (
|
||||
ActionRunsUsingDocker = "docker"
|
||||
// ActionRunsUsingComposite for running composite
|
||||
ActionRunsUsingComposite = "composite"
|
||||
// ActionRunsUsingGo for running with go
|
||||
ActionRunsUsingGo = "go"
|
||||
)
|
||||
|
||||
// ActionRuns are a field in Action
|
||||
|
@@ -164,6 +164,13 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||
return wp, nil
|
||||
}
|
||||
|
||||
// CombineWorkflowPlanner combines workflows to a WorkflowPlanner
|
||||
func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
|
||||
return &workflowPlanner{
|
||||
workflows: workflows,
|
||||
}
|
||||
}
|
||||
|
||||
type workflowPlanner struct {
|
||||
workflows []*Workflow
|
||||
}
|
||||
|
@@ -67,6 +67,30 @@ func (w *Workflow) OnEvent(event string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Workflow) OnSchedule() []string {
|
||||
schedules := w.OnEvent("schedule")
|
||||
if schedules == nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
switch val := schedules.(type) {
|
||||
case []interface{}:
|
||||
allSchedules := []string{}
|
||||
for _, v := range val {
|
||||
for k, cron := range v.(map[string]interface{}) {
|
||||
if k != "cron" {
|
||||
continue
|
||||
}
|
||||
allSchedules = append(allSchedules, cron.(string))
|
||||
}
|
||||
}
|
||||
return allSchedules
|
||||
default:
|
||||
}
|
||||
|
||||
return []string{}
|
||||
}
|
||||
|
||||
type WorkflowDispatchInput struct {
|
||||
Description string `yaml:"description"`
|
||||
Required bool `yaml:"required"`
|
||||
@@ -504,6 +528,7 @@ type ContainerSpec struct {
|
||||
|
||||
// Step is the structure of one step in a job
|
||||
type Step struct {
|
||||
Number int `yaml:"-"`
|
||||
ID string `yaml:"id"`
|
||||
If yaml.Node `yaml:"if"`
|
||||
Name string `yaml:"name"`
|
||||
@@ -550,7 +575,7 @@ func (s *Step) GetEnv() map[string]string {
|
||||
func (s *Step) ShellCommand() string {
|
||||
shellCommand := ""
|
||||
|
||||
//Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
|
||||
// Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
|
||||
switch s.Shell {
|
||||
case "", "bash":
|
||||
shellCommand = "bash --noprofile --norc -e -o pipefail {0}"
|
||||
|
@@ -7,6 +7,88 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReadWorkflow_ScheduleEvent(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 5 * * 1,3'
|
||||
- cron: '30 5 * * 2,4'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
schedules := workflow.OnEvent("schedule")
|
||||
assert.Len(t, schedules, 2)
|
||||
|
||||
newSchedules := workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 2)
|
||||
|
||||
assert.Equal(t, "30 5 * * 1,3", newSchedules[0])
|
||||
assert.Equal(t, "30 5 * * 2,4", newSchedules[1])
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
test: '30 5 * * 1,3'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, tag]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
}
|
||||
|
||||
func TestReadWorkflow_StringEvent(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
@@ -171,12 +171,26 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
}
|
||||
|
||||
return execAsComposite(step)(ctx)
|
||||
case model.ActionRunsUsingGo:
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Main)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Main}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key must be one of: %v, got %s", []string{
|
||||
model.ActionRunsUsingDocker,
|
||||
model.ActionRunsUsingNode12,
|
||||
model.ActionRunsUsingNode16,
|
||||
model.ActionRunsUsingComposite,
|
||||
model.ActionRunsUsingGo,
|
||||
}, action.Runs.Using))
|
||||
}
|
||||
}
|
||||
@@ -367,7 +381,7 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createContainerName(rc.jobContainerName(), stepModel.ID),
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkMode,
|
||||
@@ -378,6 +392,7 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.Config.ContainerOptions,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
@@ -452,7 +467,8 @@ func hasPreStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Pre != "")
|
||||
}
|
||||
}
|
||||
@@ -511,6 +527,41 @@ func runPreStep(step actionStep) common.Executor {
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
// defaults in pre steps were missing, however provided inputs are available
|
||||
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
|
||||
// todo: refactor into step
|
||||
var actionDir string
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
}
|
||||
|
||||
actionLocation := ""
|
||||
if actionPath != "" {
|
||||
actionLocation = path.Join(actionDir, actionPath)
|
||||
} else {
|
||||
actionLocation = actionDir
|
||||
}
|
||||
|
||||
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
|
||||
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Pre)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Pre}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -547,7 +598,8 @@ func hasPostStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Post != "")
|
||||
}
|
||||
}
|
||||
@@ -603,6 +655,18 @@ func runPostStep(step actionStep) common.Executor {
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
populateEnvsFromSavedState(step.getEnv(), step, rc)
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Post)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Post}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@@ -135,6 +135,7 @@ func (rc *RunContext) compositeExecutor(action *model.Action) *compositeSteps {
|
||||
if step.ID == "" {
|
||||
step.ID = fmt.Sprintf("%d", i)
|
||||
}
|
||||
step.Number = i
|
||||
|
||||
// create a copy of the step, since this composite action could
|
||||
// run multiple times and we might modify the instance
|
||||
|
@@ -62,6 +62,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
if stepModel.ID == "" {
|
||||
stepModel.ID = fmt.Sprintf("%d", i)
|
||||
}
|
||||
stepModel.Number = i
|
||||
|
||||
step, err := sf.newStep(stepModel, rc)
|
||||
|
||||
@@ -173,7 +174,7 @@ func setJobOutputs(ctx context.Context, rc *RunContext) {
|
||||
|
||||
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
ctx = withStepLogger(ctx, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
|
||||
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
|
||||
|
||||
rawLogger := common.Logger(ctx).WithField("raw_output", true)
|
||||
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
|
||||
|
@@ -95,6 +95,17 @@ func WithJobLogger(ctx context.Context, jobID string, jobName string, config *Co
|
||||
logger.SetFormatter(formatter)
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if hook := common.LoggerHook(ctx); hook != nil {
|
||||
logger.AddHook(hook)
|
||||
}
|
||||
if config.JobLoggerLevel != nil {
|
||||
logger.SetLevel(*config.JobLoggerLevel)
|
||||
} else {
|
||||
logger.SetLevel(logrus.TraceLevel)
|
||||
}
|
||||
}
|
||||
|
||||
logger.SetFormatter(&maskedFormatter{
|
||||
Formatter: logger.Formatter,
|
||||
masker: valueMasker(config.InsecureSecrets, config.Secrets),
|
||||
@@ -131,8 +142,9 @@ func WithCompositeStepLogger(ctx context.Context, stepID string) context.Context
|
||||
}).WithContext(ctx))
|
||||
}
|
||||
|
||||
func withStepLogger(ctx context.Context, stepID string, stepName string, stageName string) context.Context {
|
||||
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
|
||||
rtn := common.Logger(ctx).WithFields(logrus.Fields{
|
||||
"stepNumber": stepNumber,
|
||||
"step": stepName,
|
||||
"stepID": []string{stepID},
|
||||
"stage": stageName,
|
||||
|
@@ -16,6 +16,7 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
@@ -86,7 +87,7 @@ func (rc *RunContext) GetEnv() map[string]string {
|
||||
}
|
||||
|
||||
func (rc *RunContext) jobContainerName() string {
|
||||
return createContainerName("act", rc.String())
|
||||
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
|
||||
}
|
||||
|
||||
// Returns the binds and mounts for the container, resolving paths as appopriate
|
||||
@@ -252,7 +253,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
|
||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: nil,
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
Entrypoint: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: username,
|
||||
@@ -260,7 +261,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
Name: name,
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: "host",
|
||||
NetworkMode: rc.Config.ContainerNetworkMode,
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
@@ -268,6 +269,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.options(ctx),
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
})
|
||||
if rc.JobContainer == nil {
|
||||
return errors.New("Failed to create job container")
|
||||
@@ -458,9 +460,19 @@ func (rc *RunContext) platformImage(ctx context.Context) string {
|
||||
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
|
||||
}
|
||||
|
||||
for _, runnerLabel := range job.RunsOn() {
|
||||
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
|
||||
image := rc.Config.Platforms[strings.ToLower(platformName)]
|
||||
runsOn := job.RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
|
||||
if pick := rc.Config.PlatformPicker; pick != nil {
|
||||
if image := pick(runsOn); image != "" {
|
||||
return image
|
||||
}
|
||||
}
|
||||
|
||||
for _, runnerLabel := range runsOn {
|
||||
image := rc.Config.Platforms[strings.ToLower(runnerLabel)]
|
||||
if image != "" {
|
||||
return image
|
||||
}
|
||||
@@ -520,6 +532,7 @@ func mergeMaps(maps ...map[string]string) map[string]string {
|
||||
return rtnMap
|
||||
}
|
||||
|
||||
// deprecated: use createSimpleContainerName
|
||||
func createContainerName(parts ...string) string {
|
||||
name := strings.Join(parts, "-")
|
||||
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
@@ -533,6 +546,22 @@ func createContainerName(parts ...string) string {
|
||||
return fmt.Sprintf("%s-%x", trimmedName, hash)
|
||||
}
|
||||
|
||||
func createSimpleContainerName(parts ...string) string {
|
||||
pattern := regexp.MustCompile("[^a-zA-Z0-9-]")
|
||||
name := make([]string, 0, len(parts))
|
||||
for _, v := range parts {
|
||||
v = pattern.ReplaceAllString(v, "-")
|
||||
v = strings.Trim(v, "-")
|
||||
for strings.Contains(v, "--") {
|
||||
v = strings.ReplaceAll(v, "--", "-")
|
||||
}
|
||||
if v != "" {
|
||||
name = append(name, v)
|
||||
}
|
||||
}
|
||||
return strings.Join(name, "_")
|
||||
}
|
||||
|
||||
func trimToLen(s string, l int) string {
|
||||
if l < 0 {
|
||||
l = 0
|
||||
@@ -613,6 +642,27 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
ghc.Actor = "nektos/act"
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if preset := rc.Config.PresetGitHubContext; preset != nil {
|
||||
ghc.Event = preset.Event
|
||||
ghc.RunID = preset.RunID
|
||||
ghc.RunNumber = preset.RunNumber
|
||||
ghc.Actor = preset.Actor
|
||||
ghc.Repository = preset.Repository
|
||||
ghc.EventName = preset.EventName
|
||||
ghc.Sha = preset.Sha
|
||||
ghc.Ref = preset.Ref
|
||||
ghc.RefName = preset.RefName
|
||||
ghc.RefType = preset.RefType
|
||||
ghc.HeadRef = preset.HeadRef
|
||||
ghc.BaseRef = preset.BaseRef
|
||||
ghc.Token = preset.Token
|
||||
ghc.RepositoryOwner = preset.RepositoryOwner
|
||||
ghc.RetentionDays = preset.RetentionDays
|
||||
return ghc
|
||||
}
|
||||
}
|
||||
|
||||
if rc.EventJSON != "" {
|
||||
err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
|
||||
if err != nil {
|
||||
@@ -716,6 +766,17 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||
defaultGraphqlURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
instance := rc.Config.GitHubInstance
|
||||
if !strings.HasPrefix(instance, "http://") &&
|
||||
!strings.HasPrefix(instance, "https://") {
|
||||
instance = "https://" + instance
|
||||
}
|
||||
defaultServerURL = instance
|
||||
defaultAPIURL = instance + "/api/v1" // the version of Gitea is v1
|
||||
defaultGraphqlURL = "" // Gitea doesn't support graphql
|
||||
}
|
||||
|
||||
if env["GITHUB_SERVER_URL"] == "" {
|
||||
env["GITHUB_SERVER_URL"] = defaultServerURL
|
||||
}
|
||||
|
@@ -624,3 +624,24 @@ func TestRunContextGetEnv(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_createSimpleContainerName(t *testing.T) {
|
||||
tests := []struct {
|
||||
parts []string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
parts: []string{"a--a", "BB正", "c-C"},
|
||||
want: "a-a_BB_c-C",
|
||||
},
|
||||
{
|
||||
parts: []string{"a-a", "", "-"},
|
||||
want: "a-a",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(strings.Join(tt.parts, " "), func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, createSimpleContainerName(tt.parts...), "createSimpleContainerName(%v)", tt.parts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
@@ -54,6 +55,15 @@ type Config struct {
|
||||
RemoteName string // remote name in local git repo config
|
||||
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||
|
||||
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
||||
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
||||
ContainerNamePrefix string // the prefix of container name
|
||||
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
||||
ContainerNetworkMode string // the network mode of job containers
|
||||
DefaultActionInstance string // the default actions web site
|
||||
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
||||
JobLoggerLevel *log.Level // the level of job logger
|
||||
}
|
||||
|
||||
type caller struct {
|
||||
@@ -77,7 +87,9 @@ func New(runnerConfig *Config) (Runner, error) {
|
||||
|
||||
func (runner *runnerImpl) configure() (Runner, error) {
|
||||
runner.eventJSON = "{}"
|
||||
if runner.config.EventPath != "" {
|
||||
if runner.config.EventJSON != "" {
|
||||
runner.eventJSON = runner.config.EventJSON
|
||||
} else if runner.config.EventPath != "" {
|
||||
log.Debugf("Reading event.json from %s", runner.config.EventPath)
|
||||
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
|
||||
if err != nil {
|
||||
|
@@ -30,9 +30,7 @@ type stepActionRemote struct {
|
||||
remoteAction *remoteAction
|
||||
}
|
||||
|
||||
var (
|
||||
stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||
)
|
||||
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||
|
||||
func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -46,15 +44,12 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
return fmt.Errorf("Expected format {org}/{repo}[/path]@ref. Actual '%s' Input string was not in a correct format", sar.Step.Uses)
|
||||
}
|
||||
|
||||
sar.remoteAction.URL = sar.RunContext.Config.GitHubInstance
|
||||
|
||||
github := sar.getGithubContext(ctx)
|
||||
if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout {
|
||||
common.Logger(ctx).Debugf("Skipping local actions/checkout because workdir was already copied")
|
||||
return nil
|
||||
}
|
||||
|
||||
sar.remoteAction.URL = sar.RunContext.Config.GitHubInstance
|
||||
for _, action := range sar.RunContext.Config.ReplaceGheActionWithGithubCom {
|
||||
if strings.EqualFold(fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo), action) {
|
||||
sar.remoteAction.URL = "github.com"
|
||||
@@ -64,10 +59,16 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: sar.remoteAction.CloneURL(),
|
||||
URL: sar.remoteAction.CloneURL(sar.RunContext.Config.DefaultActionInstance),
|
||||
Ref: sar.remoteAction.Ref,
|
||||
Dir: actionDir,
|
||||
Token: github.Token,
|
||||
Token: "", /*
|
||||
Shouldn't provide token when cloning actions,
|
||||
the token comes from the instance which triggered the task,
|
||||
however, it might be not the same instance which provides actions.
|
||||
For GitHub, they are the same, always github.com.
|
||||
But for Gitea, tasks triggered by a.com can clone actions from b.com.
|
||||
*/
|
||||
})
|
||||
var ntErr common.Executor
|
||||
if err := gitClone(ctx); err != nil {
|
||||
@@ -213,8 +214,15 @@ type remoteAction struct {
|
||||
Ref string
|
||||
}
|
||||
|
||||
func (ra *remoteAction) CloneURL() string {
|
||||
return fmt.Sprintf("https://%s/%s/%s", ra.URL, ra.Org, ra.Repo)
|
||||
func (ra *remoteAction) CloneURL(defaultURL string) string {
|
||||
u := ra.URL
|
||||
if u == "" {
|
||||
u = defaultURL
|
||||
}
|
||||
if !strings.HasPrefix(u, "http://") && !strings.HasPrefix(u, "https://") {
|
||||
u = "https://" + u
|
||||
}
|
||||
return fmt.Sprintf("%s/%s/%s", u, ra.Org, ra.Repo)
|
||||
}
|
||||
|
||||
func (ra *remoteAction) IsCheckout() bool {
|
||||
@@ -225,6 +233,26 @@ func (ra *remoteAction) IsCheckout() bool {
|
||||
}
|
||||
|
||||
func newRemoteAction(action string) *remoteAction {
|
||||
// support http(s)://host/owner/repo@v3
|
||||
for _, schema := range []string{"https://", "http://"} {
|
||||
if strings.HasPrefix(action, schema) {
|
||||
splits := strings.SplitN(strings.TrimPrefix(action, schema), "/", 2)
|
||||
if len(splits) != 2 {
|
||||
return nil
|
||||
}
|
||||
ret := parseAction(splits[1])
|
||||
if ret == nil {
|
||||
return nil
|
||||
}
|
||||
ret.URL = schema + splits[0]
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
return parseAction(action)
|
||||
}
|
||||
|
||||
func parseAction(action string) *remoteAction {
|
||||
// GitHub's document[^] describes:
|
||||
// > We strongly recommend that you include the version of
|
||||
// > the action you are using by specifying a Git ref, SHA, or Docker tag number.
|
||||
@@ -240,7 +268,7 @@ func newRemoteAction(action string) *remoteAction {
|
||||
Repo: matches[2],
|
||||
Path: matches[4],
|
||||
Ref: matches[6],
|
||||
URL: "github.com",
|
||||
URL: "",
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -616,6 +616,100 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_newRemoteAction(t *testing.T) {
|
||||
tests := []struct {
|
||||
action string
|
||||
want *remoteAction
|
||||
wantCloneURL string
|
||||
}{
|
||||
{
|
||||
action: "actions/heroku@main",
|
||||
want: &remoteAction{
|
||||
URL: "",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://github.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "actions/aws/ec2@main",
|
||||
want: &remoteAction{
|
||||
URL: "",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://github.com/actions/aws",
|
||||
},
|
||||
{
|
||||
action: "./.github/actions/my-action", // it's valid for GitHub, but act don't support it
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
action: "docker://alpine:3.8", // it's valid for GitHub, but act don't support it
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
action: "https://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "https://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://gitea.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "https://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "https://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://gitea.com/actions/aws",
|
||||
},
|
||||
{
|
||||
action: "http://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "http://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "http://gitea.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "http://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "http://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "http://gitea.com/actions/aws",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.action, func(t *testing.T) {
|
||||
got := newRemoteAction(tt.action)
|
||||
assert.Equalf(t, tt.want, got, "newRemoteAction(%v)", tt.action)
|
||||
cloneURL := ""
|
||||
if got != nil {
|
||||
cloneURL = got.CloneURL("github.com")
|
||||
}
|
||||
assert.Equalf(t, tt.wantCloneURL, cloneURL, "newRemoteAction(%v).CloneURL()", tt.action)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_safeFilename(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
|
@@ -120,7 +120,7 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd []
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createContainerName(rc.jobContainerName(), step.ID),
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+step.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
|
||||
@@ -130,6 +130,7 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd []
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
|
Reference in New Issue
Block a user