initial load of yaml working
This commit is contained in:
@@ -5,17 +5,18 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
"github.com/howeyc/gopass"
|
||||
)
|
||||
|
||||
var secretCache map[string]string
|
||||
|
||||
type actionEnvironmentApplier struct {
|
||||
*model.Action
|
||||
*Action
|
||||
}
|
||||
|
||||
func newActionEnvironmentApplier(action *model.Action) environmentApplier {
|
||||
type Action struct{}
|
||||
|
||||
func newActionEnvironmentApplier(action *Action) environmentApplier {
|
||||
return &actionEnvironmentApplier{action}
|
||||
}
|
||||
|
||||
|
@@ -1,51 +0,0 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Runner provides capabilities to run GitHub actions
|
||||
type Runner interface {
|
||||
EventGrapher
|
||||
EventLister
|
||||
EventRunner
|
||||
ActionRunner
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// EventGrapher to list the actions
|
||||
type EventGrapher interface {
|
||||
GraphEvent(eventName string) ([][]string, error)
|
||||
}
|
||||
|
||||
// EventLister to list the events
|
||||
type EventLister interface {
|
||||
ListEvents() []string
|
||||
}
|
||||
|
||||
// EventRunner to run the actions for a given event
|
||||
type EventRunner interface {
|
||||
RunEvent() error
|
||||
}
|
||||
|
||||
// ActionRunner to run a specific actions
|
||||
type ActionRunner interface {
|
||||
RunActions(actionNames ...string) error
|
||||
}
|
||||
|
||||
// RunnerConfig contains the config for a new runner
|
||||
type RunnerConfig struct {
|
||||
Ctx context.Context // context to use for the run
|
||||
Dryrun bool // don't start any of the containers
|
||||
WorkingDir string // base directory to use
|
||||
WorkflowPath string // path to load main.workflow file, relative to WorkingDir
|
||||
EventName string // name of event to run
|
||||
EventPath string // path to JSON file to use for event.json in containers, relative to WorkingDir
|
||||
ReuseContainers bool // reuse containers to maintain state
|
||||
ForcePull bool // force pulling of the image, if already present
|
||||
}
|
||||
|
||||
type environmentApplier interface {
|
||||
applyEnvironment(map[string]string)
|
||||
}
|
@@ -1,64 +0,0 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
)
|
||||
|
||||
// return a pipeline that is run in series. pipeline is a list of steps to run in parallel
|
||||
func newExecutionGraph(workflowConfig *model.Configuration, actionNames ...string) [][]string {
|
||||
// first, build a list of all the necessary actions to run, and their dependencies
|
||||
actionDependencies := make(map[string][]string)
|
||||
for len(actionNames) > 0 {
|
||||
newActionNames := make([]string, 0)
|
||||
for _, aName := range actionNames {
|
||||
// make sure we haven't visited this action yet
|
||||
if _, ok := actionDependencies[aName]; !ok {
|
||||
action := workflowConfig.GetAction(aName)
|
||||
if action != nil {
|
||||
actionDependencies[aName] = action.Needs
|
||||
newActionNames = append(newActionNames, action.Needs...)
|
||||
}
|
||||
}
|
||||
}
|
||||
actionNames = newActionNames
|
||||
}
|
||||
|
||||
// next, build an execution graph
|
||||
graph := make([][]string, 0)
|
||||
for len(actionDependencies) > 0 {
|
||||
stage := make([]string, 0)
|
||||
for aName, aDeps := range actionDependencies {
|
||||
// make sure all deps are in the graph already
|
||||
if listInLists(aDeps, graph...) {
|
||||
stage = append(stage, aName)
|
||||
delete(actionDependencies, aName)
|
||||
}
|
||||
}
|
||||
if len(stage) == 0 {
|
||||
log.Fatalf("Unable to build dependency graph!")
|
||||
}
|
||||
graph = append(graph, stage)
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
||||
|
||||
// return true iff all strings in srcList exist in at least one of the searchLists
|
||||
func listInLists(srcList []string, searchLists ...[]string) bool {
|
||||
for _, src := range srcList {
|
||||
found := false
|
||||
for _, searchList := range searchLists {
|
||||
for _, search := range searchList {
|
||||
if src == search {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
@@ -1,160 +0,0 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
"github.com/actions/workflow-parser/parser"
|
||||
"github.com/nektos/act/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type runnerImpl struct {
|
||||
config *RunnerConfig
|
||||
workflowConfig *model.Configuration
|
||||
tempDir string
|
||||
eventJSON string
|
||||
}
|
||||
|
||||
// NewRunner Creates a new Runner
|
||||
func NewRunner(runnerConfig *RunnerConfig) (Runner, error) {
|
||||
runner := &runnerImpl{
|
||||
config: runnerConfig,
|
||||
}
|
||||
|
||||
init := common.NewPipelineExecutor(
|
||||
runner.setupTempDir,
|
||||
runner.setupWorkingDir,
|
||||
runner.setupWorkflows,
|
||||
runner.setupEvent,
|
||||
)
|
||||
|
||||
return runner, init()
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) setupTempDir() error {
|
||||
var err error
|
||||
runner.tempDir, err = ioutil.TempDir("", "act-")
|
||||
return err
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) setupWorkingDir() error {
|
||||
var err error
|
||||
runner.config.WorkingDir, err = filepath.Abs(runner.config.WorkingDir)
|
||||
log.Debugf("Setting working dir to %s", runner.config.WorkingDir)
|
||||
return err
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) setupWorkflows() error {
|
||||
runner.config.WorkflowPath = runner.resolvePath(runner.config.WorkflowPath)
|
||||
log.Debugf("Loading workflow config from %s", runner.config.WorkflowPath)
|
||||
workflowReader, err := os.Open(runner.config.WorkflowPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer workflowReader.Close()
|
||||
|
||||
runner.workflowConfig, err = parser.Parse(workflowReader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) setupEvent() error {
|
||||
runner.eventJSON = "{}"
|
||||
if runner.config.EventPath != "" {
|
||||
runner.config.EventPath = runner.resolvePath(runner.config.EventPath)
|
||||
log.Debugf("Reading event.json from %s", runner.config.EventPath)
|
||||
eventJSONBytes, err := ioutil.ReadFile(runner.config.EventPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.eventJSON = string(eventJSONBytes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) resolvePath(path string) string {
|
||||
if path == "" {
|
||||
return path
|
||||
}
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join(runner.config.WorkingDir, path)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// ListEvents gets all the events in the workflows file
|
||||
func (runner *runnerImpl) ListEvents() []string {
|
||||
log.Debugf("Listing all events")
|
||||
events := make([]string, 0)
|
||||
for _, w := range runner.workflowConfig.Workflows {
|
||||
events = append(events, w.On)
|
||||
}
|
||||
|
||||
// sort the list based on depth of dependencies
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i] < events[j]
|
||||
})
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
// GraphEvent builds an execution path
|
||||
func (runner *runnerImpl) GraphEvent(eventName string) ([][]string, error) {
|
||||
log.Debugf("Listing actions for event '%s'", eventName)
|
||||
resolves := runner.resolveEvent(eventName)
|
||||
return newExecutionGraph(runner.workflowConfig, resolves...), nil
|
||||
}
|
||||
|
||||
// RunAction runs a set of actions in parallel, and their dependencies
|
||||
func (runner *runnerImpl) RunActions(actionNames ...string) error {
|
||||
log.Debugf("Running actions %+q", actionNames)
|
||||
graph := newExecutionGraph(runner.workflowConfig, actionNames...)
|
||||
|
||||
pipeline := make([]common.Executor, 0)
|
||||
for _, actions := range graph {
|
||||
stage := make([]common.Executor, 0)
|
||||
for _, actionName := range actions {
|
||||
stage = append(stage, runner.newActionExecutor(actionName))
|
||||
}
|
||||
pipeline = append(pipeline, common.NewParallelExecutor(stage...))
|
||||
}
|
||||
|
||||
executor := common.NewPipelineExecutor(pipeline...)
|
||||
return executor()
|
||||
}
|
||||
|
||||
// RunEvent runs the actions for a single event
|
||||
func (runner *runnerImpl) RunEvent() error {
|
||||
log.Debugf("Running event '%s'", runner.config.EventName)
|
||||
resolves := runner.resolveEvent(runner.config.EventName)
|
||||
log.Debugf("Running actions %s -> %s", runner.config.EventName, resolves)
|
||||
return runner.RunActions(resolves...)
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) Close() error {
|
||||
return os.RemoveAll(runner.tempDir)
|
||||
}
|
||||
|
||||
// get list of resolves for an event
|
||||
func (runner *runnerImpl) resolveEvent(eventName string) []string {
|
||||
workflows := runner.workflowConfig.GetWorkflows(eventName)
|
||||
resolves := make([]string, 0)
|
||||
for _, workflow := range workflows {
|
||||
for _, resolve := range workflow.Resolves {
|
||||
found := false
|
||||
for _, r := range resolves {
|
||||
if r == resolve {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
resolves = append(resolves, resolve)
|
||||
}
|
||||
}
|
||||
}
|
||||
return resolves
|
||||
}
|
@@ -1,253 +0,0 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
"github.com/nektos/act/common"
|
||||
"github.com/nektos/act/container"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (runner *runnerImpl) newActionExecutor(actionName string) common.Executor {
|
||||
action := runner.workflowConfig.GetAction(actionName)
|
||||
if action == nil {
|
||||
return common.NewErrorExecutor(fmt.Errorf("Unable to find action named '%s'", actionName))
|
||||
}
|
||||
|
||||
executors := make([]common.Executor, 0)
|
||||
image, err := runner.addImageExecutor(action, &executors)
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
|
||||
err = runner.addRunExecutor(action, image, &executors)
|
||||
if err != nil {
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(executors...)
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) addImageExecutor(action *model.Action, executors *[]common.Executor) (string, error) {
|
||||
var image string
|
||||
logger := newActionLogger(action.Identifier, runner.config.Dryrun)
|
||||
log.Debugf("Using '%s' for action '%s'", action.Uses, action.Identifier)
|
||||
|
||||
in := container.DockerExecutorInput{
|
||||
Ctx: runner.config.Ctx,
|
||||
Logger: logger,
|
||||
Dryrun: runner.config.Dryrun,
|
||||
}
|
||||
switch uses := action.Uses.(type) {
|
||||
|
||||
case *model.UsesDockerImage:
|
||||
image = uses.Image
|
||||
|
||||
pull := runner.config.ForcePull
|
||||
if !pull {
|
||||
imageExists, err := container.ImageExistsLocally(runner.config.Ctx, image)
|
||||
log.Debugf("Image exists? %v", imageExists)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine if image already exists for image %q", image)
|
||||
}
|
||||
|
||||
if !imageExists {
|
||||
pull = true
|
||||
}
|
||||
}
|
||||
|
||||
if pull {
|
||||
*executors = append(*executors, container.NewDockerPullExecutor(container.NewDockerPullExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Image: image,
|
||||
}))
|
||||
}
|
||||
|
||||
case *model.UsesPath:
|
||||
contextDir := filepath.Join(runner.config.WorkingDir, uses.String())
|
||||
sha, _, err := common.FindGitRevision(contextDir)
|
||||
if err != nil {
|
||||
log.Warnf("Unable to determine git revision: %v", err)
|
||||
sha = "latest"
|
||||
}
|
||||
image = fmt.Sprintf("%s:%s", filepath.Base(contextDir), sha)
|
||||
|
||||
*executors = append(*executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: image,
|
||||
}))
|
||||
|
||||
case *model.UsesRepository:
|
||||
image = fmt.Sprintf("%s:%s", filepath.Base(uses.Repository), uses.Ref)
|
||||
cloneURL := fmt.Sprintf("https://github.com/%s", uses.Repository)
|
||||
|
||||
cloneDir := filepath.Join(os.TempDir(), "act", action.Uses.String())
|
||||
*executors = append(*executors, common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{
|
||||
URL: cloneURL,
|
||||
Ref: uses.Ref,
|
||||
Dir: cloneDir,
|
||||
Logger: logger,
|
||||
Dryrun: runner.config.Dryrun,
|
||||
}))
|
||||
|
||||
contextDir := filepath.Join(cloneDir, uses.Path)
|
||||
*executors = append(*executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
ContextDir: contextDir,
|
||||
ImageTag: image,
|
||||
}))
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unable to determine executor type for image '%s'", action.Uses)
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) addRunExecutor(action *model.Action, image string, executors *[]common.Executor) error {
|
||||
logger := newActionLogger(action.Identifier, runner.config.Dryrun)
|
||||
log.Debugf("Using '%s' for action '%s'", action.Uses, action.Identifier)
|
||||
|
||||
in := container.DockerExecutorInput{
|
||||
Ctx: runner.config.Ctx,
|
||||
Logger: logger,
|
||||
Dryrun: runner.config.Dryrun,
|
||||
}
|
||||
|
||||
env := make(map[string]string)
|
||||
for _, applier := range []environmentApplier{newActionEnvironmentApplier(action), runner} {
|
||||
applier.applyEnvironment(env)
|
||||
}
|
||||
env["GITHUB_ACTION"] = action.Identifier
|
||||
|
||||
ghReader, err := runner.createGithubTarball()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envList := make([]string, 0)
|
||||
for k, v := range env {
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
var cmd, entrypoint []string
|
||||
if action.Args != nil {
|
||||
cmd = action.Args.Split()
|
||||
}
|
||||
if action.Runs != nil {
|
||||
entrypoint = action.Runs.Split()
|
||||
}
|
||||
*executors = append(*executors, container.NewDockerRunExecutor(container.NewDockerRunExecutorInput{
|
||||
DockerExecutorInput: in,
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
Image: image,
|
||||
WorkingDir: "/github/workspace",
|
||||
Env: envList,
|
||||
Name: runner.createContainerName(action.Identifier),
|
||||
Binds: []string{
|
||||
fmt.Sprintf("%s:%s", runner.config.WorkingDir, "/github/workspace"),
|
||||
fmt.Sprintf("%s:%s", runner.tempDir, "/github/home"),
|
||||
fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"),
|
||||
},
|
||||
Content: map[string]io.Reader{"/github": ghReader},
|
||||
ReuseContainers: runner.config.ReuseContainers,
|
||||
}))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) applyEnvironment(env map[string]string) {
|
||||
repoPath := runner.config.WorkingDir
|
||||
|
||||
workflows := runner.workflowConfig.GetWorkflows(runner.config.EventName)
|
||||
if len(workflows) == 0 {
|
||||
return
|
||||
}
|
||||
workflowName := workflows[0].Identifier
|
||||
|
||||
env["HOME"] = "/github/home"
|
||||
env["GITHUB_ACTOR"] = "nektos/act"
|
||||
env["GITHUB_EVENT_PATH"] = "/github/workflow/event.json"
|
||||
env["GITHUB_WORKSPACE"] = "/github/workspace"
|
||||
env["GITHUB_WORKFLOW"] = workflowName
|
||||
env["GITHUB_EVENT_NAME"] = runner.config.EventName
|
||||
|
||||
_, rev, err := common.FindGitRevision(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git revision: %v", err)
|
||||
} else {
|
||||
env["GITHUB_SHA"] = rev
|
||||
}
|
||||
|
||||
repo, err := common.FindGithubRepo(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git repo: %v", err)
|
||||
} else {
|
||||
env["GITHUB_REPOSITORY"] = repo
|
||||
}
|
||||
|
||||
ref, err := common.FindGitRef(repoPath)
|
||||
if err != nil {
|
||||
log.Warningf("unable to get git ref: %v", err)
|
||||
} else {
|
||||
log.Infof("using github ref: %s", ref)
|
||||
env["GITHUB_REF"] = ref
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) createGithubTarball() (io.Reader, error) {
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
var files = []struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}{
|
||||
{"workflow/event.json", 0644, runner.eventJSON},
|
||||
}
|
||||
for _, file := range files {
|
||||
log.Debugf("Writing entry to tarball %s len:%d", file.Name, len(runner.eventJSON))
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: file.Mode,
|
||||
Size: int64(len(runner.eventJSON)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(runner.eventJSON)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &buf, nil
|
||||
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) createContainerName(actionName string) string {
|
||||
containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-")
|
||||
|
||||
prefix := fmt.Sprintf("%s-", trimToLen(filepath.Base(runner.config.WorkingDir), 10))
|
||||
suffix := ""
|
||||
containerName = trimToLen(containerName, 30-(len(prefix)+len(suffix)))
|
||||
return fmt.Sprintf("%s%s%s", prefix, containerName, suffix)
|
||||
}
|
||||
|
||||
func trimToLen(s string, l int) string {
|
||||
if len(s) > l {
|
||||
return s[:l]
|
||||
}
|
||||
return s
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestGraphEvent(t *testing.T) {
|
||||
runnerConfig := &RunnerConfig{
|
||||
Ctx: context.Background(),
|
||||
WorkflowPath: "multi.workflow",
|
||||
WorkingDir: "testdata",
|
||||
EventName: "push",
|
||||
}
|
||||
runner, err := NewRunner(runnerConfig)
|
||||
assert.NilError(t, err)
|
||||
|
||||
graph, err := runner.GraphEvent("push")
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, graph, [][]string{{"build"}})
|
||||
|
||||
graph, err = runner.GraphEvent("release")
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, graph, [][]string{{"deploy"}})
|
||||
}
|
||||
|
||||
func TestRunEvent(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
tables := []struct {
|
||||
workflowPath string
|
||||
eventName string
|
||||
errorMessage string
|
||||
}{
|
||||
{"basic.workflow", "push", ""},
|
||||
{"pipe.workflow", "push", ""},
|
||||
{"fail.workflow", "push", "exit with `FAILURE`: 1"},
|
||||
{"buildfail.workflow", "push", "COPY failed"},
|
||||
{"regex.workflow", "push", "exit with `NEUTRAL`: 78"},
|
||||
{"gitref.workflow", "push", ""},
|
||||
{"env.workflow", "push", ""},
|
||||
{"detect_event.workflow", "", ""},
|
||||
}
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
for _, table := range tables {
|
||||
table := table
|
||||
t.Run(table.workflowPath, func(t *testing.T) {
|
||||
runnerConfig := &RunnerConfig{
|
||||
Ctx: context.Background(),
|
||||
WorkflowPath: table.workflowPath,
|
||||
WorkingDir: "testdata",
|
||||
EventName: table.eventName,
|
||||
}
|
||||
runner, err := NewRunner(runnerConfig)
|
||||
assert.NilError(t, err, table.workflowPath)
|
||||
|
||||
err = runner.RunEvent()
|
||||
if table.errorMessage == "" {
|
||||
assert.NilError(t, err, table.workflowPath)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, table.errorMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user