Initial commit with support for GitHub actions
This commit is contained in:
109
actions/log.go
Normal file
109
actions/log.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
type actionLogFormatter struct {
|
||||
}
|
||||
|
||||
var formatter *actionLogFormatter
|
||||
|
||||
func init() {
|
||||
formatter = new(actionLogFormatter)
|
||||
}
|
||||
|
||||
const (
|
||||
//nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
func newActionLogger(actionName string, dryrun bool) *logrus.Entry {
|
||||
logger := logrus.New()
|
||||
logger.SetFormatter(formatter)
|
||||
logger.SetLevel(logrus.GetLevel())
|
||||
rtn := logger.WithFields(logrus.Fields{"action_name": actionName, "dryrun": dryrun})
|
||||
return rtn
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
if f.isColored(entry) {
|
||||
f.printColored(b, entry)
|
||||
} else {
|
||||
f.print(b, entry)
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case logrus.DebugLevel, logrus.TraceLevel:
|
||||
levelColor = gray
|
||||
case logrus.WarnLevel:
|
||||
levelColor = yellow
|
||||
case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||
actionName := entry.Data["action_name"]
|
||||
|
||||
if entry.Data["dryrun"] == true {
|
||||
fmt.Fprintf(b, "\x1b[%dm*DRYRUN* \x1b[%dm[%s] \x1b[0m%s", green, levelColor, actionName, entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm[%s] \x1b[0m%s", levelColor, actionName, entry.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) print(b *bytes.Buffer, entry *logrus.Entry) {
|
||||
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||
actionName := entry.Data["action_name"]
|
||||
|
||||
if entry.Data["dryrun"] == true {
|
||||
fmt.Fprintf(b, "*DRYRUN* [%s] %s", actionName, entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "[%s] %s", actionName, entry.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *actionLogFormatter) isColored(entry *logrus.Entry) bool {
|
||||
|
||||
isColored := checkIfTerminal(entry.Logger.Out)
|
||||
|
||||
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||
isColored = true
|
||||
} else if ok && force == "0" {
|
||||
isColored = false
|
||||
} else if os.Getenv("CLICOLOR") == "0" {
|
||||
isColored = false
|
||||
}
|
||||
|
||||
return isColored
|
||||
}
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
76
actions/parser.go
Normal file
76
actions/parser.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ParseWorkflows will read in the set of actions from the workflow file
|
||||
func ParseWorkflows(workingDir string, workflowPath string) (Workflows, error) {
|
||||
workingDir, err := filepath.Abs(workingDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Setting working dir to %s", workingDir)
|
||||
|
||||
if !filepath.IsAbs(workflowPath) {
|
||||
workflowPath = filepath.Join(workingDir, workflowPath)
|
||||
}
|
||||
log.Debugf("Loading workflow config from %s", workflowPath)
|
||||
workflowReader, err := os.Open(workflowPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(workflowReader)
|
||||
|
||||
workflows := new(workflowsFile)
|
||||
workflows.WorkingDir = workingDir
|
||||
workflows.WorkflowPath = workflowPath
|
||||
|
||||
astFile, err := hcl.ParseBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootNode := ast.Walk(astFile.Node, cleanWorkflowsAST)
|
||||
err = hcl.DecodeObject(workflows, rootNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workflows.TempDir, err = ioutil.TempDir("/tmp", "act-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: add validation logic
|
||||
// - check for circular dependencies
|
||||
// - check for valid local path refs
|
||||
// - check for valid dependencies
|
||||
|
||||
return workflows, nil
|
||||
}
|
||||
|
||||
func cleanWorkflowsAST(node ast.Node) (ast.Node, bool) {
|
||||
if objectItem, ok := node.(*ast.ObjectItem); ok {
|
||||
key := objectItem.Keys[0].Token.Value()
|
||||
|
||||
// handle condition where value is a string but should be a list
|
||||
switch key {
|
||||
case "resolves", "needs", "args":
|
||||
if literalType, ok := objectItem.Val.(*ast.LiteralType); ok {
|
||||
listType := new(ast.ListType)
|
||||
listType.Add(literalType)
|
||||
objectItem.Val = listType
|
||||
}
|
||||
}
|
||||
}
|
||||
return node, true
|
||||
}
|
412
actions/runner.go
Normal file
412
actions/runner.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
|
||||
"github.com/howeyc/gopass"
|
||||
"github.com/nektos/act/common"
|
||||
"github.com/nektos/act/container"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var secretCache map[string]string
|
||||
|
||||
func (w *workflowsFile) ListEvents() []string {
|
||||
log.Debugf("Listing all events")
|
||||
events := make([]string, 0)
|
||||
for _, w := range w.Workflow {
|
||||
events = append(events, w.On)
|
||||
}
|
||||
|
||||
// sort the list based on depth of dependencies
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i] < events[j]
|
||||
})
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func (w *workflowsFile) GraphEvent(eventName string) ([][]string, error) {
|
||||
log.Debugf("Listing actions for event '%s'", eventName)
|
||||
workflow, _, err := w.getWorkflow(eventName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.newExecutionGraph(workflow.Resolves...), nil
|
||||
}
|
||||
|
||||
func (w *workflowsFile) RunAction(ctx context.Context, dryrun bool, actionName string) error {
|
||||
log.Debugf("Running action '%s'", actionName)
|
||||
return w.newActionExecutor(ctx, dryrun, "", actionName)()
|
||||
}
|
||||
|
||||
func (w *workflowsFile) RunEvent(ctx context.Context, dryrun bool, eventName string) error {
|
||||
log.Debugf("Running event '%s'", eventName)
|
||||
workflow, _, err := w.getWorkflow(eventName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Running actions %s -> %s", eventName, workflow.Resolves)
|
||||
return w.newActionExecutor(ctx, dryrun, eventName, workflow.Resolves...)()
|
||||
}
|
||||
|
||||
func (w *workflowsFile) getWorkflow(eventName string) (*workflowDef, string, error) {
|
||||
for wName, w := range w.Workflow {
|
||||
if w.On == eventName {
|
||||
return &w, wName, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("unsupported event: %v", eventName)
|
||||
}
|
||||
|
||||
func (w *workflowsFile) getAction(actionName string) (*actionDef, error) {
|
||||
if a, ok := w.Action[actionName]; ok {
|
||||
return &a, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported action: %v", actionName)
|
||||
}
|
||||
|
||||
func (w *workflowsFile) Close() {
|
||||
os.RemoveAll(w.TempDir)
|
||||
}
|
||||
|
||||
// return a pipeline that is run in series. pipeline is a list of steps to run in parallel
|
||||
func (w *workflowsFile) newExecutionGraph(actionNames ...string) [][]string {
|
||||
// first, build a list of all the necessary actions to run, and their dependencies
|
||||
actionDependencies := make(map[string][]string)
|
||||
for len(actionNames) > 0 {
|
||||
newActionNames := make([]string, 0)
|
||||
for _, aName := range actionNames {
|
||||
// make sure we haven't visited this action yet
|
||||
if _, ok := actionDependencies[aName]; !ok {
|
||||
actionDependencies[aName] = w.Action[aName].Needs
|
||||
newActionNames = append(newActionNames, w.Action[aName].Needs...)
|
||||
}
|
||||
}
|
||||
actionNames = newActionNames
|
||||
}
|
||||
|
||||
// next, build an execution graph
|
||||
graph := make([][]string, 0)
|
||||
for len(actionDependencies) > 0 {
|
||||
stage := make([]string, 0)
|
||||
for aName, aDeps := range actionDependencies {
|
||||
// make sure all deps are in the graph already
|
||||
if listInLists(aDeps, graph...) {
|
||||
stage = append(stage, aName)
|
||||
delete(actionDependencies, aName)
|
||||
}
|
||||
}
|
||||
if len(stage) == 0 {
|
||||
log.Fatalf("Unable to build dependency graph!")
|
||||
}
|
||||
graph = append(graph, stage)
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
||||
|
||||
// return true iff all strings in srcList exist in at least one of the searchLists
|
||||
func listInLists(srcList []string, searchLists ...[]string) bool {
|
||||
for _, src := range srcList {
|
||||
found := false
|
||||
for _, searchList := range searchLists {
|
||||
for _, search := range searchList {
|
||||
if src == search {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *workflowsFile) newActionExecutor(ctx context.Context, dryrun bool, eventName string, actionNames ...string) common.Executor {
|
||||
graph := w.newExecutionGraph(actionNames...)
|
||||
|
||||
pipeline := make([]common.Executor, 0)
|
||||
for _, actions := range graph {
|
||||
stage := make([]common.Executor, 0)
|
||||
for _, actionName := range actions {
|
||||
action, err := w.getAction(actionName)
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
actionExecutor := action.asExecutor(ctx, dryrun, w.WorkingDir, w.TempDir, actionName, w.setupEnvironment(eventName, actionName, dryrun))
|
||||
stage = append(stage, actionExecutor)
|
||||
}
|
||||
pipeline = append(pipeline, common.NewParallelExecutor(stage...))
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(pipeline...)
|
||||
}
|
||||
|
||||
func (action *actionDef) asExecutor(ctx context.Context, dryrun bool, workingDir string, tempDir string, actionName string, env []string) common.Executor {
|
||||
logger := newActionLogger(actionName, dryrun)
|
||||
log.Debugf("Using '%s' for action '%s'", action.Uses, actionName)
|
||||
|
||||
in := container.DockerExecutorInput{
|
||||
Ctx: ctx,
|
||||
Logger: logger,
|
||||
Dryrun: dryrun,
|
||||
}
|
||||
|
||||
var image string
|
||||
executors := make([]common.Executor, 0)
|
||||
if imageRef, ok := parseImageReference(action.Uses); ok {
|
||||
executors = append(executors, container.NewDockerPullExecutor(container.NewDockerPullExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Image: imageRef,
|
||||
}))
|
||||
image = imageRef
|
||||
} else if contextDir, imageTag, ok := parseImageLocal(workingDir, action.Uses); ok {
|
||||
executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: imageTag,
|
||||
}))
|
||||
image = imageTag
|
||||
} else if cloneURL, ref, path, ok := parseImageGithub(action.Uses); ok {
|
||||
cloneDir := filepath.Join(os.TempDir(), "act", action.Uses)
|
||||
executors = append(executors, common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{
|
||||
URL: cloneURL,
|
||||
Ref: ref,
|
||||
Dir: cloneDir,
|
||||
Logger: logger,
|
||||
Dryrun: dryrun,
|
||||
}))
|
||||
|
||||
contextDir := filepath.Join(cloneDir, path)
|
||||
imageTag := fmt.Sprintf("%s:%s", filepath.Base(cloneURL.Path), ref)
|
||||
|
||||
executors = append(executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: imageTag,
|
||||
}))
|
||||
image = imageTag
|
||||
} else {
|
||||
return common.NewErrorExecutor(fmt.Errorf("unable to determine executor type for image '%s'", action.Uses))
|
||||
}
|
||||
|
||||
ghReader, err := action.createGithubTarball()
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
randSuffix := randString(6)
|
||||
containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-")
|
||||
if len(containerName)+len(randSuffix)+1 > 30 {
|
||||
containerName = containerName[:(30 - (len(randSuffix) + 1))]
|
||||
}
|
||||
executors = append(executors, container.NewDockerRunExecutor(container.NewDockerRunExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Cmd: action.Args,
|
||||
Image: image,
|
||||
WorkingDir: "/github/workspace",
|
||||
Env: env,
|
||||
Name: fmt.Sprintf("%s-%s", containerName, randSuffix),
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", workingDir, "/github/workspace"),
|
||||
fmt.Sprintf("%s:%s", tempDir, "/github/home"),
|
||||
fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"),
|
||||
},
|
||||
Content: map[string]io.Reader{"/github": ghReader},
|
||||
}))
|
||||
|
||||
return common.NewPipelineExecutor(executors...)
|
||||
}
|
||||
|
||||
const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func randString(slen int) string {
|
||||
b := make([]byte, slen)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (action *actionDef) createGithubTarball() (io.Reader, error) {
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
var files = []struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}{
|
||||
{"workflow/event.json", 0644, "{}"},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: file.Mode,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &buf, nil
|
||||
|
||||
}
|
||||
|
||||
func (w *workflowsFile) setupEnvironment(eventName string, actionName string, dryrun bool) []string {
|
||||
env := make([]string, 0)
|
||||
repoPath := w.WorkingDir
|
||||
|
||||
_, workflowName, _ := w.getWorkflow(eventName)
|
||||
|
||||
env = append(env, fmt.Sprintf("HOME=/github/home"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_ACTOR=nektos/act"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_EVENT_PATH=/github/workflow/event.json"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_WORKSPACE=/github/workspace"))
|
||||
env = append(env, fmt.Sprintf("GITHUB_WORKFLOW=%s", workflowName))
|
||||
env = append(env, fmt.Sprintf("GITHUB_EVENT_NAME=%s", eventName))
|
||||
env = append(env, fmt.Sprintf("GITHUB_ACTION=%s", actionName))
|
||||
|
||||
_, rev, err := common.FindGitRevision(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git revision: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_SHA=%s", rev))
|
||||
}
|
||||
|
||||
repo, err := common.FindGithubRepo(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git repo: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_REPOSITORY=%s", repo))
|
||||
}
|
||||
|
||||
branch, err := common.FindGitBranch(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git branch: %v", err)
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf("GITHUB_REF=refs/heads/%s", branch))
|
||||
}
|
||||
|
||||
action, err := w.getAction(actionName)
|
||||
if err == nil && !dryrun {
|
||||
action.applyEnvironmentSecrets(&env)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
func (action *actionDef) applyEnvironmentSecrets(env *[]string) {
|
||||
if action != nil {
|
||||
for envKey, envValue := range action.Env {
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", envKey, envValue))
|
||||
}
|
||||
|
||||
for _, secret := range action.Secrets {
|
||||
if secretVal, ok := os.LookupEnv(secret); ok {
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", secret, secretVal))
|
||||
} else {
|
||||
if secretCache == nil {
|
||||
secretCache = make(map[string]string)
|
||||
}
|
||||
|
||||
if secretCache[secret] == "" {
|
||||
fmt.Printf("Provide value for '%s': ", secret)
|
||||
val, err := gopass.GetPasswdMasked()
|
||||
if err != nil {
|
||||
log.Fatal("abort")
|
||||
}
|
||||
|
||||
secretCache[secret] = string(val)
|
||||
}
|
||||
*env = append(*env, fmt.Sprintf("%s=%s", secret, secretCache[secret]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// imageURL is the directory where a `Dockerfile` should exist
|
||||
func parseImageLocal(workingDir string, contextDir string) (contextDirOut string, tag string, ok bool) {
|
||||
if !filepath.IsAbs(contextDir) {
|
||||
contextDir = filepath.Join(workingDir, contextDir)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(contextDir, "Dockerfile")); os.IsNotExist(err) {
|
||||
log.Debugf("Ignoring missing Dockerfile '%s/Dockerfile'", contextDir)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
sha, _, err := common.FindGitRevision(contextDir)
|
||||
if err != nil {
|
||||
log.Warnf("Unable to determine git revision: %v", err)
|
||||
sha = "latest"
|
||||
}
|
||||
return contextDir, fmt.Sprintf("%s:%s", filepath.Base(contextDir), sha), true
|
||||
}
|
||||
|
||||
// imageURL is the URL for a docker repo
|
||||
func parseImageReference(image string) (ref string, ok bool) {
|
||||
imageURL, err := url.Parse(image)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to parse image as url: %v", err)
|
||||
return "", false
|
||||
}
|
||||
if imageURL.Scheme != "docker" {
|
||||
log.Debugf("Ignoring non-docker ref '%s'", imageURL.String())
|
||||
return "", false
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s%s", imageURL.Host, imageURL.Path), true
|
||||
}
|
||||
|
||||
// imageURL is the directory where a `Dockerfile` should exist
|
||||
func parseImageGithub(image string) (cloneURL *url.URL, ref string, path string, ok bool) {
|
||||
re := regexp.MustCompile("^([^/@]+)/([^/@]+)(/([^@]*))?(@(.*))?$")
|
||||
matches := re.FindStringSubmatch(image)
|
||||
|
||||
if matches == nil {
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
cloneURL, err := url.Parse(fmt.Sprintf("https://github.com/%s/%s", matches[1], matches[2]))
|
||||
if err != nil {
|
||||
log.Debugf("Unable to parse as URL: %v", err)
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
resp, err := http.Head(cloneURL.String())
|
||||
if resp.StatusCode >= 400 || err != nil {
|
||||
log.Debugf("Unable to HEAD URL %s status=%v err=%v", cloneURL.String(), resp.StatusCode, err)
|
||||
return nil, "", "", false
|
||||
}
|
||||
|
||||
ref = matches[6]
|
||||
if ref == "" {
|
||||
ref = "master"
|
||||
}
|
||||
|
||||
path = matches[4]
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
|
||||
return cloneURL, ref, path, true
|
||||
}
|
89
actions/runner_test.go
Normal file
89
actions/runner_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseImageReference(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
refIn string
|
||||
refOut string
|
||||
ok bool
|
||||
}{
|
||||
{"docker://myhost.com/foo/bar", "myhost.com/foo/bar", true},
|
||||
{"docker://ubuntu", "ubuntu", true},
|
||||
{"docker://ubuntu:18.04", "ubuntu:18.04", true},
|
||||
{"docker://cibuilds/hugo:0.53", "cibuilds/hugo:0.53", true},
|
||||
{"http://google.com:8080", "", false},
|
||||
{"./foo", "", false},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
refOut, ok := parseImageReference(table.refIn)
|
||||
assert.Equal(t, table.refOut, refOut)
|
||||
assert.Equal(t, table.ok, ok)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseImageLocal(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
pathIn string
|
||||
contextDir string
|
||||
refTag string
|
||||
ok bool
|
||||
}{
|
||||
{"docker://myhost.com/foo/bar", "", "", false},
|
||||
{"http://google.com:8080", "", "", false},
|
||||
{"example/action1", "/example/action1", "action1:", true},
|
||||
}
|
||||
|
||||
revision, _, err := common.FindGitRevision(".")
|
||||
assert.Nil(t, err)
|
||||
basedir, err := filepath.Abs("..")
|
||||
assert.Nil(t, err)
|
||||
for _, table := range tables {
|
||||
contextDir, refTag, ok := parseImageLocal(basedir, table.pathIn)
|
||||
assert.Equal(t, table.ok, ok, "ok match for %s", table.pathIn)
|
||||
if ok {
|
||||
assert.Equal(t, fmt.Sprintf("%s%s", basedir, table.contextDir), contextDir, "context dir doesn't match for %s", table.pathIn)
|
||||
assert.Equal(t, fmt.Sprintf("%s%s", table.refTag, revision), refTag)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func TestParseImageGithub(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
tables := []struct {
|
||||
image string
|
||||
cloneURL string
|
||||
ref string
|
||||
path string
|
||||
ok bool
|
||||
}{
|
||||
{"nektos/act", "https://github.com/nektos/act", "master", ".", true},
|
||||
{"nektos/act/foo", "https://github.com/nektos/act", "master", "foo", true},
|
||||
{"nektos/act@xxxxx", "https://github.com/nektos/act", "xxxxx", ".", true},
|
||||
{"nektos/act/bar/baz@zzzzz", "https://github.com/nektos/act", "zzzzz", "bar/baz", true},
|
||||
{"nektos/zzzzundefinedzzzz", "", "", "", false},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
cloneURL, ref, path, ok := parseImageGithub(table.image)
|
||||
assert.Equal(t, table.ok, ok, "ok match for %s", table.image)
|
||||
if ok {
|
||||
assert.Equal(t, table.cloneURL, cloneURL.String())
|
||||
assert.Equal(t, table.ref, ref)
|
||||
assert.Equal(t, table.path, path)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
56
actions/types.go
Normal file
56
actions/types.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Workflows provides capabilities to work with the workflow file
|
||||
type Workflows interface {
|
||||
EventGrapher
|
||||
EventLister
|
||||
ActionRunner
|
||||
EventRunner
|
||||
Close()
|
||||
}
|
||||
|
||||
// EventGrapher to list the actions
|
||||
type EventGrapher interface {
|
||||
GraphEvent(eventName string) ([][]string, error)
|
||||
}
|
||||
|
||||
// EventLister to list the events
|
||||
type EventLister interface {
|
||||
ListEvents() []string
|
||||
}
|
||||
|
||||
// ActionRunner to run an action
|
||||
type ActionRunner interface {
|
||||
RunAction(ctx context.Context, dryrun bool, action string) error
|
||||
}
|
||||
|
||||
// EventRunner to run an event
|
||||
type EventRunner interface {
|
||||
RunEvent(ctx context.Context, dryrun bool, event string) error
|
||||
}
|
||||
|
||||
type workflowDef struct {
|
||||
On string
|
||||
Resolves []string
|
||||
}
|
||||
|
||||
type actionDef struct {
|
||||
Needs []string
|
||||
Uses string
|
||||
Runs string
|
||||
Args []string
|
||||
Env map[string]string
|
||||
Secrets []string
|
||||
}
|
||||
|
||||
type workflowsFile struct {
|
||||
TempDir string
|
||||
WorkingDir string
|
||||
WorkflowPath string
|
||||
Workflow map[string]workflowDef
|
||||
Action map[string]actionDef
|
||||
}
|
Reference in New Issue
Block a user