Compare commits

...

114 Commits

Author SHA1 Message Date
f494bd3cbe Patched options() to let container options propagate to job containers 2024-02-23 08:45:47 +01:00
techknowlogick
e6fec7324e Merge pull request 'Bump nektos/act to 0.2.59' (#90) from harryzcy/act:bump-nektos-act into main
Reviewed-on: https://gitea.com/gitea/act/pulls/90
2024-02-19 22:25:12 +00:00
Chongyi Zheng
73f32886f2 Merge branch 'nektos/master' into bump-nektos 2024-02-17 13:19:51 -05:00
GitHub Actions
b7a8145d09 chore: bump VERSION to 0.2.59 2024-02-01 22:32:06 +00:00
Milo Moisson
12c0c4277a feat: correctly use the xdg library, which has the side effect to fix the config survey (#2195) 2024-02-01 13:57:16 -08:00
GitHub Actions
3ed38d8e8b chore: bump VERSION to 0.2.58 2024-02-01 02:12:33 +00:00
dependabot[bot]
0dbf44c657 build(deps): bump github.com/opencontainers/runc from 1.1.7 to 1.1.12 (#2187)
Bumps [github.com/opencontainers/runc](https://github.com/opencontainers/runc) from 1.1.7 to 1.1.12.
- [Release notes](https://github.com/opencontainers/runc/releases)
- [Changelog](https://github.com/opencontainers/runc/blob/v1.1.12/CHANGELOG.md)
- [Commits](https://github.com/opencontainers/runc/compare/v1.1.7...v1.1.12)

---
updated-dependencies:
- dependency-name: github.com/opencontainers/runc
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-31 23:56:50 +00:00
dependabot[bot]
6dcf9bc6e6 build(deps): bump github.com/moby/buildkit from 0.11.5 to 0.12.5 (#2188)
Bumps [github.com/moby/buildkit](https://github.com/moby/buildkit) from 0.11.5 to 0.12.5.
- [Release notes](https://github.com/moby/buildkit/releases)
- [Commits](https://github.com/moby/buildkit/compare/v0.11.5...v0.12.5)

---
updated-dependencies:
- dependency-name: github.com/moby/buildkit
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-31 23:41:20 +00:00
dependabot[bot]
df61c7fcdb build(deps): bump github.com/containerd/containerd from 1.6.19 to 1.6.26 (#2189)
Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.6.19 to 1.6.26.
- [Release notes](https://github.com/containerd/containerd/releases)
- [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md)
- [Commits](https://github.com/containerd/containerd/compare/v1.6.19...v1.6.26)

---
updated-dependencies:
- dependency-name: github.com/containerd/containerd
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-31 23:24:57 +00:00
dependabot[bot]
36e0261150 build(deps): bump github.com/opencontainers/image-spec (#2167)
Bumps [github.com/opencontainers/image-spec](https://github.com/opencontainers/image-spec) from 1.1.0-rc5 to 1.1.0-rc.6.
- [Release notes](https://github.com/opencontainers/image-spec/releases)
- [Changelog](https://github.com/opencontainers/image-spec/blob/main/RELEASES.md)
- [Commits](https://github.com/opencontainers/image-spec/compare/v1.1.0-rc5...v1.1.0-rc6)

---
updated-dependencies:
- dependency-name: github.com/opencontainers/image-spec
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-31 23:11:39 +00:00
dependabot[bot]
46dc2ffe80 build(deps): bump github.com/moby/buildkit from 0.12.4 to 0.12.5 (#2186)
Bumps [github.com/moby/buildkit](https://github.com/moby/buildkit) from 0.12.4 to 0.12.5.
- [Release notes](https://github.com/moby/buildkit/releases)
- [Commits](https://github.com/moby/buildkit/compare/v0.12.4...v0.12.5)

---
updated-dependencies:
- dependency-name: github.com/moby/buildkit
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-31 22:58:02 +00:00
Markus Wolf
054caec791 fix: use correct path to toolcache (#1494)
The toolcache on GitHub Actions need to be in
/opt/hostedtoolcache. This is the case for all
environment variables set by act, but it's not the
case for the volume mounted into the container.

Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de>
Co-authored-by: ChristopherHX <christopher.homberger@web.de>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-30 22:43:52 +00:00
ChristopherHX
5a80a044f9 refactor: filecollector into new package (#2174)
* refactor: filecollector into new package

* Add test for symlinks

* add test fix bug of GetContainerArchive

* add test data
2024-01-30 00:46:45 +00:00
dependabot[bot]
4ca35d2192 build(deps): bump codecov/codecov-action from 3.1.4 to 3.1.5 (#2175)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3.1.4 to 3.1.5.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v3.1.4...v3.1.5)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-29 02:34:33 +00:00
Josh Soref
5e0d29d665 fix: improve warning about remote not found (#2169)
Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-28 19:21:21 +00:00
ChristopherHX
6dd67253bc fix: improve new-action-cache fetch failure error (#2172)
- include repoURL and repoRef in error
- map NoErrAlreadyUptodate to `couldn't find remote ref` for branchOrtag
  fetch request

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-28 17:02:15 +00:00
ChristopherHX
09d4b5d6ad fix: subpath actions via new artifact cache (#2170)
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-28 16:49:47 +00:00
ChristopherHX
a6ec2c129a fix: improve action not found error (#2171) 2024-01-28 16:37:19 +00:00
Eng Zer Jun
424fd5e02b refactor(cmd/root): simplify parseEnvs (#2162)
Prior to this commit, `parseEnvs` accept two parameters:

  1. env []string
  2. envs map[string]string

`parseEnvs` then do a `nil` check for `env`. However, we never pass a
`nil` `env` to `parseEnvs` in `newRunCommand`.

This commit simplify the `parseEnvs` function to accept just one
`env []string` parameter and return the result as `map[string]string`
instead.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2024-01-24 03:06:31 +00:00
Matthew
6a8c42ac53 Add containerd's normalized architectures to archMapper (#2168) 2024-01-24 02:44:48 +00:00
dependabot[bot]
c215e0888a build(deps): bump megalinter/megalinter from 7.7.0 to 7.8.0 (#2164)
Bumps [megalinter/megalinter](https://github.com/megalinter/megalinter) from 7.7.0 to 7.8.0.
- [Release notes](https://github.com/megalinter/megalinter/releases)
- [Changelog](https://github.com/oxsecurity/megalinter/blob/main/CHANGELOG.md)
- [Commits](https://github.com/megalinter/megalinter/compare/v7.7.0...v7.8.0)

---
updated-dependencies:
- dependency-name: megalinter/megalinter
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-22 02:22:08 +00:00
Milo Moisson
6091094e14 fix: write default config in XDG config dir to avoid cluttering the HOME directory by default (#2140)
Co-authored-by: Casey Lee <cplee@nektos.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-21 20:16:06 +00:00
胖梁
8072a00a77 WorkflowDispatchConfig supports multiple yaml node kinds (#2123)
* WorkflowDispatchConfig supports ScalarNode and SequenceNode yaml node kinds

* Avoid using log.Fatal

* package slices is not in golang 1.20

---------

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-20 14:07:36 +00:00
Kristoffer
7f7d84b10f fix: match cache restore-keys in creation reverse order (#2153)
* Match cache restore-keys in creation reverse order

* Match full prefix when selecting cache

---------

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-20 12:11:50 +00:00
dependabot[bot]
15bb54f14e build(deps): bump actions/upload-artifact from 3 to 4 (#2133)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Casey Lee <cplee@nektos.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-20 00:32:38 +00:00
TKaxv_7S
f055d4ae60 feat: support offline mode (#2128)
* Add: Actions Offline Mode

* Add: Actions Offline Mode

---------

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-20 00:20:15 +00:00
ChristopherHX
f7a846d2f5 feat: cli option to enable the new action cache (#1954)
* Enable the new action cache

* fix

* fix: CopyTarStream (Docker)

* suppress panic in test

* add a cli option for opt in

* fixups

* add package

* fix

* rc.Config nil in test???

* add feature flag

* patch

* Fix respect --action-cache-path

Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de>

* add remote reusable workflow to ActionCache

* fixup

---------

Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2024-01-19 23:49:35 +00:00
Casey Lee
cd40f3fe9b ci: automatic merge PRs created by a maintainer and approved by 1 other maintainer (#2156) 2024-01-19 07:04:05 -08:00
Leonardo Taccari
adbe229fcb Add support for NetBSD (#2023)
NetBSD can run Docker CLI and then use Docker on some remote machine
via DOCKER_HOST.

(This can be probably extended to all other Unix-es capable of running
just Docker CLI.)

Co-authored-by: ChristopherHX <christopher.homberger@web.de>
2024-01-08 19:26:03 +00:00
dependabot[bot]
96d6cf8b2c build(deps): bump github.com/cloudflare/circl from 1.3.3 to 1.3.7 (#2149)
Bumps [github.com/cloudflare/circl](https://github.com/cloudflare/circl) from 1.3.3 to 1.3.7.
- [Release notes](https://github.com/cloudflare/circl/releases)
- [Commits](https://github.com/cloudflare/circl/compare/v1.3.3...v1.3.7)

---
updated-dependencies:
- dependency-name: github.com/cloudflare/circl
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-08 17:01:44 +00:00
dependabot[bot]
ef5746ba74 build(deps): bump golang.org/x/term from 0.15.0 to 0.16.0 (#2148)
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.15.0 to 0.16.0.
- [Commits](https://github.com/golang/term/compare/v0.15.0...v0.16.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-08 02:23:22 +00:00
Jason Song
15045b4fc0 Merge pull request 'Fix panic in extractFromImageEnv' (#81) from wolfogre/act:bugfix/panic_extractFromImageEnv into main
Reviewed-on: https://gitea.com/gitea/act/pulls/81
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-10-31 14:48:40 +00:00
Jason Song
67918333fa fix: panic 2023-10-31 22:32:21 +08:00
Lunny Xiao
c93462e19f Merge pull request 'bump nektos to 0.2.52' (#79) from bump-nektos into main
Reviewed-on: https://gitea.com/gitea/act/pulls/79
Reviewed-by: John Olheiser <john+gitea@jolheiser.com>
2023-10-13 01:20:21 +00:00
techknowlogick
f3264cac20 Merge remote-tracking branch 'upstream/master' into bump-nektos 2023-10-11 15:28:38 -04:00
techknowlogick
4699c3b689 Merge nektos/act/v0.2.51 2023-09-24 15:09:26 -04:00
Jason Song
22d91e3ac3 Merge tag 'nektos/v0.2.49'
Conflicts:
	cmd/input.go
	go.mod
	go.sum
	pkg/exprparser/interpreter.go
	pkg/model/workflow.go
	pkg/runner/expression.go
	pkg/runner/job_executor.go
	pkg/runner/runner.go
2023-08-02 11:52:14 +08:00
sillyguodong
cdc6d4bc6a Support expression in uses (#75)
Since actions can specify the download source via a url prefix. The prefix may contain some sensitive information that needs to be stored in secrets or variable context, so we need to interpolate the expression value for`uses` firstly.

Reviewed-on: https://gitea.com/gitea/act/pulls/75
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-07-17 03:46:34 +00:00
Zettat123
2069b04779 Fix missed ValidVolumes for docker steps (#74)
Fixes https://gitea.com/gitea/act_runner/issues/277

Thanks @ChristopherHX for finding the cause of the bug.

Reviewed-on: https://gitea.com/gitea/act/pulls/74
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-07-11 02:08:22 +00:00
sati.ac
3813f40cba use remoteAction.URL if not empty (#71)
Fixes https://github.com/go-gitea/gitea/issues/25615

Reviewed-on: https://gitea.com/gitea/act/pulls/71
Co-authored-by: sati.ac <sati.ac@noreply.gitea.com>
Co-committed-by: sati.ac <sati.ac@noreply.gitea.com>
2023-07-03 03:43:44 +00:00
Jason Song
eb19987893 Revert "Support for multiple default URLs for getting actions (#58)" (#70)
Follow https://github.com/go-gitea/gitea/pull/25581 .

Reviewed-on: https://gitea.com/gitea/act/pulls/70
2023-06-30 07:45:13 +00:00
Zettat123
545802b97b Fix the error when removing network in self-hosted mode (#69)
Fixes https://gitea.com/gitea/act_runner/issues/255

Reviewed-on: https://gitea.com/gitea/act/pulls/69
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-06-28 02:27:12 +00:00
Tomasz Duda
515c2c429d fix action cloning, set correct server_url for act_runner exec (#68)
1. Newest act is not able to clone action based on --default-actions-url
It might be side effect of https://gitea.com/gitea/act/pulls/67.
2. Set correct server_url, api_url, graphql_url for act_runner exec

Reviewed-on: https://gitea.com/gitea/act/pulls/68
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Tomasz Duda <tomaszduda23@gmail.com>
Co-committed-by: Tomasz Duda <tomaszduda23@gmail.com>
2023-06-20 07:36:10 +00:00
Zettat123
a165e17878 Add support for glob syntax when checking volumes (#64)
Follow #60

Reviewed-on: https://gitea.com/gitea/act/pulls/64
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-06-16 05:24:01 +00:00
Zettat123
56e103b4ba Fix the missing URL when using remote reusable workflow (#67)
Reviewed-on: https://gitea.com/gitea/act/pulls/67
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-06-16 05:12:43 +00:00
Marius Zwicker
422cbdf446 Allow to override location of action cache dir (#65)
Adds an explicit config option to specify the directory
below which action contents will be cached. If left empty
the previous location at `$XDG_CACHE_HOME/act` or
`$HOME/.cache/act` will be used respectively.

Required to resolve gitea/act_runner#235

Co-authored-by: Marius Zwicker <marius@mlba-team.de>
Reviewed-on: https://gitea.com/gitea/act/pulls/65
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Marius Zwicker <emzeat@noreply.gitea.com>
Co-committed-by: Marius Zwicker <emzeat@noreply.gitea.com>
2023-06-16 03:41:39 +00:00
Jason Song
8c56bd3aa5 Merge tag 'nektos/v0.2.46' 2023-06-16 11:08:39 +08:00
a1012112796
a94498b482 fix local workflow for act_runner exec (#63)
by the way, export `ACT_SKIP_CHECKOUT` as a env verb for user to do some special config of local test.

example usage:

7a3ab0fdbc

Reviewed-on: https://gitea.com/gitea/act/pulls/63
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: a1012112796 <1012112796@qq.com>
Co-committed-by: a1012112796 <1012112796@qq.com>
2023-06-13 03:46:26 +00:00
sillyguodong
fe76a035ad Follow upstream support for variables (#66)
Because the upstream [PR](https://github.com/nektos/act/pull/1833) already supports variables, so this PR revert #43 (commit de529139af), and cherry-pick commit [6ce45e3](6ce45e3f24).

Co-authored-by: Kuan Yong <wong0514@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/66
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-06-12 06:54:17 +00:00
sillyguodong
6ce5c93cc8 Put the job container name into the env context (#62)
Related: https://gitea.com/gitea/act_runner/issues/189#issuecomment-740636
Refer to [Docker Doc](https://docs.docker.com/engine/reference/commandline/run/#volumes-from), the `--volumes-from` flag is used when running or creating a new container and takes the name or ID of the container from which you want to share volumes. Here's the syntax:
```
docker run --volumes-from <container_name_or_id> <image>
```
So put the job container name into the `env` context in this PR.

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/62
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-06-06 00:21:31 +00:00
Zettat123
92b4d73376 Check volumes (#60)
This PR adds a `ValidVolumes` config. Users can specify the volumes (including bind mounts) that can be mounted to containers by this config.

Options related to volumes:
- [jobs.<job_id>.container.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontainervolumes)
- [jobs.<job_id>.services.<service_id>.volumes](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idvolumes)

In addition, volumes specified by `options` will also be checked.

Currently, the following default volumes (see a72822b3f8/pkg/runner/run_context.go (L116-L166)) will be added to `ValidVolumes`:
- `act-toolcache`
- `<container-name>` and `<container-name>-env`
- `/var/run/docker.sock` (We need to add a new configuration to control whether the docker daemon can be mounted)

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/60
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-06-05 09:21:59 +00:00
Zettat123
183bb7af1b Support for multiple default URLs for getting actions (#58)
Partially resolve https://github.com/go-gitea/gitea/issues/24789.

`act_runner`  needs to be improved to parse `gitea_default_actions_url` after this PR merged (https://gitea.com/gitea/act_runner/pulls/200)

Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/58
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-06-05 09:07:17 +00:00
sillyguodong
a72822b3f8 Fix some options issue. (#59)
- Fix https://gitea.com/gitea/act_runner/issues/220
ignore `--network` and `--net` in `options`.
- Fix https://gitea.com/gitea/act_runner/issues/222
add opts of `mergo.WithAppendSlice` when excute `mergo.Merge()`.

Reviewed-on: https://gitea.com/gitea/act/pulls/59
Reviewed-by: Zettat123 <zettat123@noreply.gitea.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-31 10:33:39 +00:00
sillyguodong
9283cfc9b1 Fix container network issue (#56)
Follow: https://gitea.com/gitea/act_runner/pulls/184
Close https://gitea.com/gitea/act_runner/issues/177

#### changes:
- `act` create new networks only if the value of `NeedCreateNetwork` is true, and remove these networks at last. `NeedCreateNetwork` is passed by `act_runner`. 'NeedCreateNetwork' is true only if  `container.network` in the configuration file of the `act_runner` is empty.
- In the `docker create` phase, specify the network to which containers will connect. Because, if not specify , container will connect to `bridge` network which is created automatically by Docker.
  - If the network is user defined network ( the value of `container.network` is empty or `<custom-network>`.  Because, the network created by `act` is also user defined network.), will also specify alias by `--network-alias`. The alias of service is `<service-id>`. So we can be access service container by `<service-id>:<port>` in the steps of job.
- Won't try to `docker network connect ` network after `docker start` any more.
  - Because on the one hand,  `docker network connect` applies only to user defined networks, if try to `docker network connect host <container-name>` will return error.
  - On the other hand, we just specify network in the stage of `docker create`, the same effect can be achieved.
- Won't try to remove containers and networks berfore  the stage of `docker start`, because the name of these containers and netwoks won't be repeat.

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/56
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 14:03:55 +08:00
Zettat123
27846050ae Force privileged to false when runner's config is false (#57)
The runner's `privileged` config can be bypassed. Currently, even if the runner's `privileged` config is false, users can still enable the privileged mode by using `--privileged` in the container's option string. Therefore, if runner's config is false, the `--privileged` in options string should be ignored.

Reviewed-on: https://gitea.com/gitea/act/pulls/57
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-05-16 11:21:18 +08:00
Zettat123
ed9b6643ca Do not set the default network to host (#55)
In [nektos/act/pull/1739](https://github.com/nektos/act/pull/1739), the container network mode defaults to `host` if the network option isn't specified in `options`.  When calling `ConnectToNetwork`, the `host` network mode may cause the error:
`Error response from daemon: container sharing network namespace with another container or host cannot be connected to any other network`
see the code: a94a01bff2/pkg/container/docker_run.go (L51-L68)

To avoid the error, this logic needs to be removed to keep the default network mode as `bridge`.

Reviewed-on: https://gitea.com/gitea/act/pulls/55
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-05-09 16:41:31 +08:00
Jason Song
a94a01bff2 Fix regression after merging upstream (#54)
Related to 229dbaf153

Reviewed-on: https://gitea.com/gitea/act/pulls/54
2023-05-04 17:54:09 +08:00
Jason Song
229dbaf153 Merge tag 'nektos/v0.2.45' 2023-05-04 17:45:53 +08:00
Zettat123
a18648ee73 Support services credentials (#51)
If a service's image is from a container registry requires authentication, `act_runner` will need `credentials` to pull the image, see [documentation](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idservicesservice_idcredentials).
Currently, `act_runner` incorrectly uses the `credentials` of `containers` to pull services' images and the `credentials` of services won't be used, see the related code: 0c1f2edb99/pkg/runner/run_context.go (L228-L269)

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/51
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-25 14:45:39 +08:00
sillyguodong
518d8c96f3 Keep the order of on when parsing workflow (#46)
Keep the order of `on` when parsing workflow, and fix the occasional unit test failure of `actions` like https://gitea.com/gitea/act/actions/runs/68

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/46
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-04-24 23:16:41 +08:00
Zettat123
0c1f2edb99 Support specifying command for services (#50)
This PR is to support overwriting the default `CMD` command of `services` containers.

This is a Gitea specific feature and GitHub Actions doesn't support this syntax.

Reviewed-on: https://gitea.com/gitea/act/pulls/50
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-23 14:55:17 +08:00
Zettat123
721857e4a0 Remove empty steps when decoding Job (#49)
Follow #48
Empty steps are invalid, so remove them when decoding `Job` from YAML.

Reviewed-on: https://gitea.com/gitea/act/pulls/49
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-21 20:21:15 +08:00
Zettat123
6b1010ad07 Fix potential panic caused by nil Step (#48)
```yml
jobs:
  job1:
    steps:
      - run: echo HelloWorld
      - # empty step
```

If a job contains an empty step, `Job.Steps` will have a nil element and will cause panic when calling `Step.String()`.

See [the code of gitea](948a9ee5e8/models/actions/task.go (L300-L301))

Reviewed-on: https://gitea.com/gitea/act/pulls/48
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-21 14:45:38 +08:00
Zettat123
e12252a43a Support intepolation for env of services (#47)
Reviewed-on: https://gitea.com/gitea/act/pulls/47
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-20 16:24:31 +08:00
Zettat123
8609522aa4 Support services options (#45)
Reviewed-on: https://gitea.com/gitea/act/pulls/45
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-19 21:53:57 +08:00
Zettat123
6a876c4f99 Add go build tag to docker_network.go (#44)
Fix the build failure in https://gitea.com/gitea/act_runner/actions/runs/278/jobs/0

Reviewed-on: https://gitea.com/gitea/act/pulls/44
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-19 16:19:38 +08:00
sillyguodong
de529139af Support configuration variables (#43)
related to: https://gitea.com/gitea/act_runner/issues/127

This PR make `act` support the expression like `${{ vars.YOUR_CUSTOM_VARIABLES }}`.

Reviewed-on: https://gitea.com/gitea/act/pulls/43
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-04-19 15:22:56 +08:00
Zettat123
d3a56cdb69 Support services (#42)
Replace #5

Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/42
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-19 11:23:28 +08:00
Galen Abell
9bdddf18e0 Parse secret inputs in reusable workflows (#41)
Secrets can be passed to reusable workflows, either explicitly by key or
implicitly by `inherit`:

https://docs.github.com/en/actions/using-workflows/reusing-workflows#using-inputs-and-secrets-in-a-reusable-workflow

Reviewed-on: https://gitea.com/gitea/act/pulls/41
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Galen Abell <galen@galenabell.com>
Co-committed-by: Galen Abell <galen@galenabell.com>
2023-04-17 13:41:02 +08:00
Zettat123
ac1ba34518 Fix incorrect job result status (#40)
Fix [#24039(GitHub)](https://github.com/go-gitea/gitea/issues/24039)

At present, if a job fails in the `Set up job`, the result status of the job will still be `success`. The reason is that the `pre` steps don't call `SetJobError`, so the `jobError` will be nil when `post` steps setting the job result. See 5c4a96bcb7/pkg/runner/job_executor.go (L99)

Reviewed-on: https://gitea.com/gitea/act/pulls/40
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-14 15:42:03 +08:00
Jason Song
5c4a96bcb7 Avoid using log.Fatal in pkg/* (#39)
Follow https://github.com/nektos/act/pull/1705

Reviewed-on: https://gitea.com/gitea/act/pulls/39
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
2023-04-07 16:31:03 +08:00
Zettat123
62abf4fe11 Add token for getting reusable workflows from local private repos (#38)
Partially fixes https://gitea.com/gitea/act_runner/issues/91

If the repository is private, we need to provide the token to the caller workflows to access the called reusable workflows from the same repository.

Reviewed-on: https://gitea.com/gitea/act/pulls/38
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-06 14:16:20 +08:00
Zettat123
cfedc518ca Add With field to jobparser.Job (#37)
Partially Fixes [gitea/act_runner#91 comment](https://gitea.com/gitea/act_runner/issues/91#issuecomment-734544)

nektos/act has added `With` to support reusable workflows (see [code](68c72b9a51/pkg/model/workflow.go (L160)))

GitHub actions also support [`jobs.<job_id>.with`](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idwith)

Reviewed-on: https://gitea.com/gitea/act/pulls/37
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-04-04 10:59:53 +08:00
Zettat123
5e76853b55 Support reusable workflow (#34)
Fix https://gitea.com/gitea/act_runner/issues/80
Fix https://gitea.com/gitea/act_runner/issues/85

To support reusable workflows, I made some improvements:
- read `yml` files from both `.gitea/workflows` and `.github/workflows`
- clone repository for local reusable workflows because the runner doesn't have the code in its local directory
- fix the incorrect clone url like `https://https://gitea.com`

Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/34
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-03-29 13:59:22 +08:00
Jason Song
2eb4de02ee Expose SetJob to make EraseNeeds work (#35)
Related to #33

Reviewed-on: https://gitea.com/gitea/act/pulls/35
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-29 13:57:29 +08:00
Jason Song
342ad6a51a Keep the order of jobs in the workflow file when parsing (#33)
Keep the order of jobs in the workflow file when parsing, and it will make it possible for Gitea to show jobs in the original order on UI.

Reviewed-on: https://gitea.com/gitea/act/pulls/33
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-28 11:38:40 +08:00
Jason Song
568f053723 Revert "Erase needs of job in SingleWorkflow (#9)" (#32)
This reverts commit 1ba076d321.

`EraseNeeds` Shouldn't be used in `jobparser.Parse`, it's for 023e61e678/models/actions/run.go (L200)

Or Gitea won't be able to get `Needs` of jobs.

Reviewed-on: https://gitea.com/gitea/act/pulls/32
Reviewed-by: Zettat123 <zettat123@noreply.gitea.io>
2023-03-27 17:46:50 +08:00
Bo-Yi.Wu
8f12a6c947 chore: update go-git dependency in go.mod (#30)
- Replace `go-git` with a forked version in `go.mod`

Signed-off-by: Bo-Yi.Wu <appleboy.tw@gmail.com>

Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/30
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Bo-Yi.Wu <appleboy.tw@gmail.com>
Co-committed-by: Bo-Yi.Wu <appleboy.tw@gmail.com>
2023-03-26 21:27:19 +08:00
Lunny Xiao
83fb85f702 Fix bug (#31)
Reviewed-on: https://gitea.com/gitea/act/pulls/31
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-committed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-26 21:01:46 +08:00
Zettat123
3daf313205 chore(yaml): Improve ParseRawOn (#28)
See [act_runner #71 comment](https://gitea.com/gitea/act_runner/issues/71#issuecomment-733806), we need to handle `nil interface{}` in `ParseRawOn` function

Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/28
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Reviewed-by: appleboy <appleboy.tw@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-03-25 12:13:50 +08:00
Lunny Xiao
7c5400d75b ParseRawOn support schedules (#29)
Fix gitea/act_runner#71

Reviewed-on: https://gitea.com/gitea/act/pulls/29
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Zettat123 <zettat123@noreply.gitea.io>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-committed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-24 20:15:46 +08:00
Jason Song
929ea6df75 Support gitea context (#27)
And we will be able to use context like `${{ gitea.repository }}` in workflows yaml files, it's same as `${{ github.repository }}`

Reviewed-on: https://gitea.com/gitea/act/pulls/27
Reviewed-by: Zettat123 <zettat123@noreply.gitea.io>
2023-03-23 12:14:28 +08:00
Zettat123
f6a8a0e643 Add extra path env for running go actions (#26)
At present, the runner can't run go actions even if the go environment has been set by the `setup-go` action. The reason is that `setup-go` will add the go related paths to [`GITHUB_PATH`](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path) but in #22 I forgot to apply them before running go actions. After adding the `ApplyExtraPath` function, the `setup-go` action runs properly.

Reviewed-on: https://gitea.com/gitea/act/pulls/26
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-03-21 15:31:30 +08:00
a1012112796
556fd20aed make sure special logs be sent to gitea's server (#25)
example:
https://gitea.com/a1012112796/test_action/actions/runs/7

![image](/attachments/a8931f2f-096f-41fd-8f9f-0c8322ee985a)

TODO: special handle them on ui

Signed-off-by: a1012112796 <1012112796@qq.com>

Reviewed-on: https://gitea.com/gitea/act/pulls/25
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: a1012112796 <1012112796@qq.com>
Co-committed-by: a1012112796 <1012112796@qq.com>
2023-03-17 23:01:31 +08:00
Jason Song
a8298365fe Fix incompatibility caused by tracking upstream add actions to test it (#24)
Reviewed-on: https://gitea.com/gitea/act/pulls/24
2023-03-16 15:00:11 +08:00
Jason Song
1dda0aec69 Merge tag 'nektos/v0.2.43'
Conflicts:
	pkg/container/docker_run.go
	pkg/runner/action.go
	pkg/runner/logger.go
	pkg/runner/run_context.go
	pkg/runner/runner.go
	pkg/runner/step_action_remote_test.go
2023-03-16 11:45:29 +08:00
Jason Song
49e204166d Update forking fules (#23)
As the title.

Reviewed-on: https://gitea.com/gitea/act/pulls/23
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-16 10:46:41 +08:00
Zettat123
a36b003f7a Improve running with go (#22)
Close #21

I have tested this PR and run Go actions successfully on:
- Windows host
- Docker on Windows
- Linux host
- Docker on Linux

Before running Go actions, we need to make sure that Go has been installed on the host or the Docker image.

Reviewed-on: https://gitea.com/gitea/act/pulls/22
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@gmail.com>
Co-committed-by: Zettat123 <zettat123@gmail.com>
2023-03-14 16:55:36 +08:00
Zettat123
0671d16694 Fix missing ActionRunsUsingGo (#20)
- Allow `using: "go"` when unmarshalling YAML.
- Add `ActionRunsUsingGo` to returned errors.

Co-authored-by: Zettat123 <zettat123@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/20
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Zettat123 <zettat123@noreply.gitea.io>
Co-committed-by: Zettat123 <zettat123@noreply.gitea.io>
2023-03-09 22:51:58 +08:00
a1012112796
881dbdb81b make log level configable (#19)
relatd: https://gitea.com/gitea/act_runner/pulls/39
Reviewed-on: https://gitea.com/gitea/act/pulls/19
Reviewed-by: Jason Song <i@wolfogre.com>
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: a1012112796 <1012112796@qq.com>
Co-committed-by: a1012112796 <1012112796@qq.com>
2023-03-08 14:46:39 +08:00
Jason Song
1252e551b8 Replace more strings.ReplaceAll to safeFilename (#18)
Follow #16 #17

Reviewed-on: https://gitea.com/gitea/act/pulls/18
2023-02-24 14:20:34 +08:00
Jason Song
c614d8b96c Replace more strings.ReplaceAll to safeFilename (#17)
Follow #16.

Reviewed-on: https://gitea.com/gitea/act/pulls/17
2023-02-24 12:11:30 +08:00
Jason Song
84b6649b8b Safe filename (#16)
Fix #15.

Reviewed-on: https://gitea.com/gitea/act/pulls/16
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
2023-02-24 10:17:36 +08:00
Jason Song
dca7801682 Support uses http(s)://host/owner/repo as actions (#14)
Examples:

```yaml
jobs:
  my_first_job:
    steps:
      - name: My first step
        uses: https://gitea.com/actions/heroku@main
      - name: My second step
        uses: http://example.com/actions/heroku@v2.0.1
```

Reviewed-on: https://gitea.com/gitea/act/pulls/14
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
2023-02-15 16:28:33 +08:00
Lunny Xiao
4b99ed8916 Support go run on action (#12)
Reviewed-on: https://gitea.com/gitea/act/pulls/12
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-committed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-02-15 16:10:15 +08:00
Lunny Xiao
e46ede1b17 parse raw on (#11)
Reviewed-on: https://gitea.com/gitea/act/pulls/11
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-committed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-01-31 15:49:55 +08:00
Jason Song
1ba076d321 Erase needs of job in SingleWorkflow (#9)
Reviewed-on: https://gitea.com/gitea/act/pulls/9
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
2023-01-30 11:42:19 +08:00
appleboy
0efa2d5e63 fix(test): needs condition. (#8)
as title.

Signed-off-by: Bo-Yi.Wu <appleboy.tw@gmail.com>

Co-authored-by: Bo-Yi.Wu <appleboy.tw@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/8
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-01-21 17:09:51 +08:00
Jason Song
0a37a03f2e Clone actions without token (#6)
Shouldn't provide token when cloning actions, the token comes from the instance which triggered the task, it might be not the instance which provides actions.

For GitHub, they are the same, always github.com. But for Gitea, tasks triggered by a.com can clone actions from b.com.

Reviewed-on: https://gitea.com/gitea/act/pulls/6
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
2023-01-06 13:34:38 +08:00
appleboy
88cce47022 feat(workflow): support schedule event (#4)
fix https://gitea.com/gitea/act/issues/3

Signed-off-by: Bo-Yi.Wu <appleboy.tw@gmail.com>

Co-authored-by: Bo-Yi.Wu <appleboy.tw@gmail.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/4
2022-12-10 09:14:14 +08:00
Jason Song
7920109e89 Merge tag 'nektos/v0.2.34' 2022-12-05 17:08:17 +08:00
Jason Song
4cacc14d22 feat: adjust container name format (#1)
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act/pulls/1
2022-11-24 14:45:32 +08:00
Jason Song
c6b8548d35 feat: support PlatformPicker 2022-11-22 16:39:19 +08:00
Jason Song
64cae197a4 Support step number 2022-11-22 16:11:35 +08:00
Jason Song
7fb84a54a8 chore: update LICENSE 2022-11-22 15:26:02 +08:00
Jason Song
70cc6c017b docs: add naming rule for git ref 2022-11-22 15:05:12 +08:00
Lunny Xiao
d7e9ea75fc disable graphql url because gitea doesn't support that 2022-11-22 14:42:48 +08:00
Jason Song
b9c20dcaa4 feat: support more options of containers 2022-11-22 14:42:12 +08:00
Jason Song
97629ae8af fix: set logger with trace level 2022-11-22 14:41:57 +08:00
Lunny Xiao
b9a9812ad9 Fix API 2022-11-22 14:22:03 +08:00
Lunny Xiao
113c3e98fb support bot site 2022-11-22 14:17:06 +08:00
Jason Song
7815eec33b Add custom enhancements 2022-11-22 14:16:35 +08:00
Jason Song
c051090583 Add description of branchs 2022-11-22 14:02:01 +08:00
fuxiaohei
0fa1fe0310 feat: add logger hook for standalone job logger 2022-11-22 14:00:13 +08:00
74 changed files with 3116 additions and 239 deletions

44
.gitea/workflows/test.yml Normal file
View File

@@ -0,0 +1,44 @@
name: checks
on:
- push
- pull_request
env:
GOPROXY: https://goproxy.io,direct
GOPATH: /go_path
GOCACHE: /go_cache
jobs:
lint:
name: check and test
runs-on: ubuntu-latest
steps:
- name: cache go path
id: cache-go-path
uses: https://github.com/actions/cache@v3
with:
path: /go_path
key: go_path-${{ github.repository }}-${{ github.ref_name }}
restore-keys: |
go_path-${{ github.repository }}-
go_path-
- name: cache go cache
id: cache-go-cache
uses: https://github.com/actions/cache@v3
with:
path: /go_cache
key: go_cache-${{ github.repository }}-${{ github.ref_name }}
restore-keys: |
go_cache-${{ github.repository }}-
go_cache-
- uses: actions/setup-go@v3
with:
go-version: 1.20
- uses: actions/checkout@v3
- name: vet checks
run: go vet -v ./...
- name: build
run: go build -v ./...
- name: test
run: go test -v ./pkg/jobparser
# TODO test more packages

View File

@@ -26,7 +26,7 @@ jobs:
with:
version: v1.53
only-new-issues: true
- uses: megalinter/megalinter/flavors/go@v7.7.0
- uses: megalinter/megalinter/flavors/go@v7.8.0
env:
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -61,7 +61,7 @@ jobs:
- name: Run act from cli
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
- name: Upload Codecov report
uses: codecov/codecov-action@v3.1.4
uses: codecov/codecov-action@v3.1.5
with:
files: coverage.txt
fail_ci_if_error: true # optional (default = false)
@@ -111,67 +111,67 @@ jobs:
args: release --snapshot --clean
- name: Capture x86_64 (64-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-amd64
path: dist/act_linux_amd64_v1/act
- name: Capture i386 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-i386
path: dist/act_linux_386/act
- name: Capture arm64 (64-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-arm64
path: dist/act_linux_arm64/act
- name: Capture armv6 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-armv6
path: dist/act_linux_arm_6/act
- name: Capture armv7 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-armv7
path: dist/act_linux_arm_7/act
- name: Capture x86_64 (64-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-amd64
path: dist/act_windows_amd64_v1/act.exe
- name: Capture i386 (32-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-i386
path: dist/act_windows_386/act.exe
- name: Capture arm64 (64-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-arm64
path: dist/act_windows_arm64/act.exe
- name: Capture armv7 (32-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-armv7
path: dist/act_windows_arm_7/act.exe
- name: Capture x86_64 (64-bit) MacOS binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-macos-amd64
path: dist/act_darwin_amd64_v1/act
- name: Capture arm64 (64-bit) MacOS binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-macos-arm64
path: dist/act_darwin_arm64/act

1
.gitignore vendored
View File

@@ -31,3 +31,4 @@ coverage.txt
# megalinter
report/
act

View File

@@ -71,6 +71,10 @@ pull_request_rules:
- and:
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=2'
- and:
- 'author=@nektos/act-maintainers'
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=1'
- -draft
- -merged
- -closed

View File

@@ -1,5 +1,6 @@
MIT License
Copyright (c) 2022 The Gitea Authors
Copyright (c) 2019
Permission is hereby granted, free of charge, to any person obtaining a copy

View File

@@ -1,3 +1,28 @@
## Forking rules
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
It cannot be used as command line tool anymore, but only as a library.
It's a soft fork, which means that it will tracking the latest release of nektos/act.
Branches:
- `main`: default branch, contains custom changes, based on the latest release(not the latest of the master branch) of nektos/act.
- `nektos/master`: mirror for the master branch of nektos/act.
Tags:
- `nektos/vX.Y.Z`: mirror for `vX.Y.Z` of [nektos/act](https://github.com/nektos/act/).
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
- Examples:
- `nektos/v0.2.23` -> `v0.223.*`
- `nektos/v0.3.1` -> `v0.301.*`, not ~~`v0.31.*`~~
- `nektos/v0.10.1` -> `v0.1001.*`, not ~~`v0.101.*`~~
- `nektos/v0.3.100` -> not ~~`v0.3100.*`~~, I don't think it's really going to happen, if it does, we can find a way to handle it.
---
![act-logo](https://github.com/nektos/act/wiki/img/logo-150.png)
# Overview [![push](https://github.com/nektos/act/workflows/push/badge.svg?branch=master&event=push)](https://github.com/nektos/act/actions) [![Join the chat at https://gitter.im/nektos/act](https://badges.gitter.im/nektos/act.svg)](https://gitter.im/nektos/act?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/nektos/act)](https://goreportcard.com/report/github.com/nektos/act) [![awesome-runners](https://img.shields.io/badge/listed%20on-awesome--runners-blue.svg)](https://github.com/jonico/awesome-runners)

View File

@@ -1 +1 @@
0.2.57
0.2.59

View File

@@ -55,8 +55,10 @@ type Input struct {
replaceGheActionTokenWithGithubCom string
matrix []string
actionCachePath string
actionOfflineMode bool
logPrefixJobID bool
networkName string
useNewActionCache bool
}
func (i *Input) resolve(path string) string {

View File

@@ -97,7 +97,9 @@ func Execute(ctx context.Context, version string) {
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this,will turn off force pull")
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
rootCmd.SetArgs(args())
if err := rootCmd.Execute(); err != nil {
@@ -105,23 +107,22 @@ func Execute(ctx context.Context, version string) {
}
}
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
func configLocations() []string {
configFileName := ".actrc"
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
var actrcXdg string
for _, fileName := range []string{"act/actrc", configFileName} {
if foundConfig, err := xdg.SearchConfigFile(fileName); foundConfig != "" && err == nil {
actrcXdg = foundConfig
break
}
homePath := filepath.Join(UserHomeDir, configFileName)
invocationPath := filepath.Join(".", configFileName)
// Though named xdg, adrg's lib support macOS and Windows config paths as well
// It also takes cares of creating the parent folder so we don't need to bother later
specPath, err := xdg.ConfigFile("act/actrc")
if err != nil {
specPath = homePath
}
return []string{
filepath.Join(UserHomeDir, configFileName),
actrcXdg,
filepath.Join(".", configFileName),
}
// This order should be enforced since the survey part relies on it
return []string{specPath, homePath, invocationPath}
}
var commonSocketPaths = []string{
@@ -294,19 +295,17 @@ func cleanup(inputs *Input) func(*cobra.Command, []string) {
}
}
func parseEnvs(env []string, envs map[string]string) bool {
if env != nil {
for _, envVar := range env {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
func parseEnvs(env []string) map[string]string {
envs := make(map[string]string, len(env))
for _, envVar := range env {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
return true
}
return false
return envs
}
func readYamlFile(file string) (map[string]string, error) {
@@ -412,13 +411,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
}
log.Debugf("Loading environment from %s", input.Envfile())
envs := make(map[string]string)
_ = parseEnvs(input.envs, envs)
envs := parseEnvs(input.envs)
_ = readEnvs(input.Envfile(), envs)
log.Debugf("Loading action inputs from %s", input.Inputfile())
inputs := make(map[string]string)
_ = parseEnvs(input.inputs, inputs)
inputs := parseEnvs(input.inputs)
_ = readEnvs(input.Inputfile(), inputs)
log.Debugf("Loading secrets from %s", input.Secretfile())
@@ -555,6 +552,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
}
}
if !cfgFound && len(cfgLocations) > 0 {
// The first config location refers to the global config folder one
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
log.Fatal(err)
}
@@ -581,11 +579,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
EventName: eventName,
EventPath: input.EventPath(),
DefaultBranch: defaultbranch,
ForcePull: input.forcePull,
ForcePull: !input.actionOfflineMode && input.forcePull,
ForceRebuild: input.forceRebuild,
ReuseContainers: input.reuseContainers,
Workdir: input.Workdir(),
ActionCacheDir: input.actionCachePath,
ActionOfflineMode: input.actionOfflineMode,
BindWorkdir: input.bindWorkdir,
LogOutput: !input.noOutput,
JSONLogger: input.jsonLogger,
@@ -617,6 +616,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
Matrix: matrixes,
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
}
if input.useNewActionCache {
config.ActionCache = &runner.GoGitActionCache{
Path: config.ActionCacheDir,
}
}
r, err := runner.New(config)
if err != nil {
return err

20
go.mod
View File

@@ -4,7 +4,6 @@ go 1.20
require (
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/Masterminds/semver v1.5.0
github.com/adrg/xdg v0.4.0
github.com/andreaskoch/go-fswatch v1.0.0
github.com/creack/pty v1.1.21
@@ -19,9 +18,9 @@ require (
github.com/julienschmidt/httprouter v1.3.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/mattn/go-isatty v0.0.20
github.com/moby/buildkit v0.12.4
github.com/moby/buildkit v0.12.5
github.com/moby/patternmatcher v0.6.0
github.com/opencontainers/image-spec v1.1.0-rc5
github.com/opencontainers/image-spec v1.1.0-rc3
github.com/opencontainers/selinux v1.11.0
github.com/pkg/errors v0.9.1
github.com/rhysd/actionlint v1.6.26
@@ -32,17 +31,22 @@ require (
github.com/stretchr/testify v1.8.4
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
go.etcd.io/bbolt v1.3.8
golang.org/x/term v0.15.0
golang.org/x/term v0.16.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.1
)
require (
github.com/Masterminds/semver v1.5.0
github.com/gobwas/glob v0.2.3
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/containerd/containerd v1.7.11 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/containerd v1.7.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
@@ -66,7 +70,7 @@ require (
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.7 // indirect
github.com/opencontainers/runc v1.1.12 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
@@ -82,7 +86,7 @@ require (
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/sys v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.13.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect

33
go.sum
View File

@@ -1,6 +1,6 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
@@ -10,7 +10,7 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
@@ -22,10 +22,11 @@ github.com/andreaskoch/go-fswatch v1.0.0/go.mod h1:r5/iV+4jfwoY2sYqBkg8vpF04ehOv
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw=
github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
@@ -62,6 +63,8 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@@ -111,8 +114,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1f
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/buildkit v0.12.4 h1:yKZDsObXLKarXqUx7YMnaB+TKv810bBhq0XLFWbkjT0=
github.com/moby/buildkit v0.12.4/go.mod h1:XG74uz06nPWQpnxYwgCryrVidvor0+ElUxGosbZPQG4=
github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
@@ -123,10 +126,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
@@ -247,15 +250,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

View File

@@ -9,6 +9,7 @@ import (
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync/atomic"
@@ -386,7 +387,12 @@ func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (
for _, prefix := range keys[1:] {
found := false
if err := db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
re, err := regexp.Compile(prefixPattern)
if err != nil {
continue
}
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
if !strings.HasPrefix(v.Key, prefix) {
return stop
}

View File

@@ -221,10 +221,11 @@ func findGitSlug(url string, githubInstance string) (string, string, error) {
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
type NewGitCloneExecutorInput struct {
URL string
Ref string
Dir string
Token string
URL string
Ref string
Dir string
Token string
OfflineMode bool
}
// CloneIfRequired ...
@@ -302,12 +303,16 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return err
}
isOfflineMode := input.OfflineMode
// fetch latest changes
fetchOptions, pullOptions := gitOptions(input.Token)
err = r.Fetch(&fetchOptions)
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
return err
if !isOfflineMode {
err = r.Fetch(&fetchOptions)
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
return err
}
}
var hash *plumbing.Hash
@@ -367,9 +372,10 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return err
}
}
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
logger.Debugf("Unable to pull %s: %v", refName, err)
if !isOfflineMode {
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
logger.Debugf("Unable to pull %s: %v", refName, err)
}
}
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)

View File

@@ -25,3 +25,24 @@ func Logger(ctx context.Context) logrus.FieldLogger {
func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context {
return context.WithValue(ctx, loggerContextKeyVal, logger)
}
type loggerHookKey string
const loggerHookKeyVal = loggerHookKey("logrus.Hook")
// LoggerHook returns the appropriate logger hook for current context
// the hook affects job logger, not global logger
func LoggerHook(ctx context.Context) logrus.Hook {
val := ctx.Value(loggerHookKeyVal)
if val != nil {
if hook, ok := val.(logrus.Hook); ok {
return hook
}
}
return nil
}
// WithLoggerHook adds a value to the context for the logger hook
func WithLoggerHook(ctx context.Context, hook logrus.Hook) context.Context {
return context.WithValue(ctx, loggerHookKeyVal, hook)
}

View File

@@ -30,6 +30,11 @@ type NewContainerInput struct {
NetworkAliases []string
ExposedPorts nat.PortSet
PortBindings nat.PortMap
// Gitea specific
AutoRemove bool
ValidVolumes []string
}
// FileEntry is a file to copy to a container
@@ -42,6 +47,7 @@ type FileEntry struct {
// Container for managing docker run containers
type Container interface {
Create(capAdd []string, capDrop []string) common.Executor
ConnectToNetwork(name string) common.Executor
Copy(destPath string, files ...*FileEntry) common.Executor
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
// appended with license information.

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container
@@ -17,16 +17,19 @@ import (
"strings"
"github.com/Masterminds/semver"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/connhelper"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/go-git/go-billy/v5/helper/polyfill"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
"github.com/gobwas/glob"
"github.com/imdario/mergo"
"github.com/joho/godotenv"
"github.com/kballard/go-shellquote"
@@ -35,6 +38,7 @@ import (
"golang.org/x/term"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/filecollector"
)
// NewContainer creates a reference to a container
@@ -44,6 +48,25 @@ func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
return cr
}
func (cr *containerReference) ConnectToNetwork(name string) common.Executor {
return common.
NewDebugExecutor("%sdocker network connect %s %s", logPrefix, name, cr.input.Name).
Then(
common.NewPipelineExecutor(
cr.connect(),
cr.connectToNetwork(name, cr.input.NetworkAliases),
).IfNot(common.Dryrun),
)
}
func (cr *containerReference) connectToNetwork(name string, aliases []string) common.Executor {
return func(ctx context.Context) error {
return cr.cli.NetworkConnect(ctx, name, cr.input.Name, &networktypes.EndpointSettings{
Aliases: aliases,
})
}
}
// supportsContainerImagePlatform returns true if the underlying Docker server
// API version is 1.41 and beyond
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
@@ -238,8 +261,10 @@ func RunnerArch(ctx context.Context) string {
archMapper := map[string]string{
"x86_64": "X64",
"amd64": "X64",
"386": "X86",
"aarch64": "ARM64",
"arm64": "ARM64",
}
if arch, ok := archMapper[info.Architecture]; ok {
return arch
@@ -343,10 +368,20 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
}
if len(copts.netMode.Value()) == 0 {
if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
}
// If a service container's network is set to `host`, the container will not be able to
// connect to the specified network created for the job container and the service containers.
// So comment out the following code.
// if len(copts.netMode.Value()) == 0 {
// if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
// return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
// }
// }
// If the `privileged` config has been disabled, `copts.privileged` need to be forced to false,
// even if the user specifies `--privileged` in the options string.
if !hostConfig.Privileged {
copts.privileged = false
}
containerConfig, err := parse(flags, copts, runtime.GOOS)
@@ -356,7 +391,7 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config)
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride)
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride, mergo.WithAppendSlice)
if err != nil {
return nil, nil, fmt.Errorf("Cannot merge container.Config options: '%s': '%w'", input.Options, err)
}
@@ -368,12 +403,17 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
binds := hostConfig.Binds
mounts := hostConfig.Mounts
networkMode := hostConfig.NetworkMode
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
if err != nil {
return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
}
hostConfig.Binds = binds
hostConfig.Mounts = mounts
if len(copts.netMode.Value()) > 0 {
logger.Warn("--network and --net in the options will be ignored.")
}
hostConfig.NetworkMode = networkMode
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
return config, hostConfig, nil
@@ -437,6 +477,7 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
Privileged: input.Privileged,
UsernsMode: container.UsernsMode(input.UsernsMode),
PortBindings: input.PortBindings,
AutoRemove: input.AutoRemove,
}
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
@@ -445,6 +486,11 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
return err
}
// For Gitea
config, hostConfig = cr.sanitizeConfig(ctx, config, hostConfig)
// For Gitea
// network-scoped alias is supported only for containers in user defined networks
var networkingConfig *network.NetworkingConfig
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
n := hostConfig.NetworkMode
@@ -671,10 +717,28 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
}
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
err := cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
// Mkdir
buf := &bytes.Buffer{}
tw := tar.NewWriter(buf)
_ = tw.WriteHeader(&tar.Header{
Name: destPath,
Mode: 777,
Typeflag: tar.TypeDir,
})
tw.Close()
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
if err != nil {
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
}
// Copy Content
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
if err != nil {
return fmt.Errorf("failed to copy content to container: %w", err)
}
// If this fails, then folders have wrong permissions on non root container
if cr.UID != 0 || cr.GID != 0 {
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
}
return nil
}
@@ -715,12 +779,12 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
ignorer = gitignore.NewMatcher(ps)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
Ignorer: ignorer,
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: &tarCollector{
Handler: &filecollector.TarCollector{
TarWriter: tw,
UID: cr.UID,
GID: cr.GID,
@@ -728,7 +792,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
},
}
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
if err != nil {
return err
}
@@ -855,3 +919,63 @@ func (cr *containerReference) wait() common.Executor {
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
}
}
// For Gitea
// sanitizeConfig remove the invalid configurations from `config` and `hostConfig`
func (cr *containerReference) sanitizeConfig(ctx context.Context, config *container.Config, hostConfig *container.HostConfig) (*container.Config, *container.HostConfig) {
logger := common.Logger(ctx)
if len(cr.input.ValidVolumes) > 0 {
globs := make([]glob.Glob, 0, len(cr.input.ValidVolumes))
for _, v := range cr.input.ValidVolumes {
if g, err := glob.Compile(v); err != nil {
logger.Errorf("create glob from %s error: %v", v, err)
} else {
globs = append(globs, g)
}
}
isValid := func(v string) bool {
for _, g := range globs {
if g.Match(v) {
return true
}
}
return false
}
// sanitize binds
sanitizedBinds := make([]string, 0, len(hostConfig.Binds))
for _, bind := range hostConfig.Binds {
parsed, err := loader.ParseVolume(bind)
if err != nil {
logger.Warnf("parse volume [%s] error: %v", bind, err)
continue
}
if parsed.Source == "" {
// anonymous volume
sanitizedBinds = append(sanitizedBinds, bind)
continue
}
if isValid(parsed.Source) {
sanitizedBinds = append(sanitizedBinds, bind)
} else {
logger.Warnf("[%s] is not a valid volume, will be ignored", parsed.Source)
}
}
hostConfig.Binds = sanitizedBinds
// sanitize mounts
sanitizedMounts := make([]mount.Mount, 0, len(hostConfig.Mounts))
for _, mt := range hostConfig.Mounts {
if isValid(mt.Source) {
sanitizedMounts = append(sanitizedMounts, mt)
} else {
logger.Warnf("[%s] is not a valid volume, will be ignored", mt.Source)
}
}
hostConfig.Mounts = sanitizedMounts
} else {
hostConfig.Binds = []string{}
hostConfig.Mounts = []mount.Mount{}
}
return config, hostConfig
}

View File

@@ -9,8 +9,12 @@ import (
"testing"
"time"
"github.com/nektos/act/pkg/common"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
@@ -167,3 +171,76 @@ func TestDockerExecFailure(t *testing.T) {
// Type assert containerReference implements ExecutionsEnvironment
var _ ExecutionsEnvironment = &containerReference{}
func TestCheckVolumes(t *testing.T) {
testCases := []struct {
desc string
validVolumes []string
binds []string
expectedBinds []string
}{
{
desc: "match all volumes",
validVolumes: []string{"**"},
binds: []string{
"shared_volume:/shared_volume",
"/home/test/data:/test_data",
"/etc/conf.d/base.json:/config/base.json",
"sql_data:/sql_data",
"/secrets/keys:/keys",
},
expectedBinds: []string{
"shared_volume:/shared_volume",
"/home/test/data:/test_data",
"/etc/conf.d/base.json:/config/base.json",
"sql_data:/sql_data",
"/secrets/keys:/keys",
},
},
{
desc: "no volumes can be matched",
validVolumes: []string{},
binds: []string{
"shared_volume:/shared_volume",
"/home/test/data:/test_data",
"/etc/conf.d/base.json:/config/base.json",
"sql_data:/sql_data",
"/secrets/keys:/keys",
},
expectedBinds: []string{},
},
{
desc: "only allowed volumes can be matched",
validVolumes: []string{
"shared_volume",
"/home/test/data",
"/etc/conf.d/*.json",
},
binds: []string{
"shared_volume:/shared_volume",
"/home/test/data:/test_data",
"/etc/conf.d/base.json:/config/base.json",
"sql_data:/sql_data",
"/secrets/keys:/keys",
},
expectedBinds: []string{
"shared_volume:/shared_volume",
"/home/test/data:/test_data",
"/etc/conf.d/base.json:/config/base.json",
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
logger, _ := test.NewNullLogger()
ctx := common.WithLogger(context.Background(), logger)
cr := &containerReference{
input: &NewContainerInput{
ValidVolumes: tc.validVolumes,
},
}
_, hostConf := cr.sanitizeConfig(ctx, &container.Config{}, &container.HostConfig{Binds: tc.binds})
assert.Equal(t, tc.expectedBinds, hostConf.Binds)
})
}
}

View File

@@ -1,4 +1,4 @@
//go:build WITHOUT_DOCKER || !(linux || darwin || windows)
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
package container

View File

@@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@@ -21,6 +21,7 @@ import (
"golang.org/x/term"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/filecollector"
"github.com/nektos/act/pkg/lookpath"
)
@@ -40,6 +41,12 @@ func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor {
}
}
func (e *HostEnvironment) ConnectToNetwork(name string) common.Executor {
return func(ctx context.Context) error {
return nil
}
}
func (e *HostEnvironment) Close() common.Executor {
return func(ctx context.Context) error {
return nil
@@ -65,7 +72,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
return err
}
tr := tar.NewReader(tarStream)
cp := &copyCollector{
cp := &filecollector.CopyCollector{
DstDir: destPath,
}
for {
@@ -104,16 +111,16 @@ func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore
ignorer = gitignore.NewMatcher(ps)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
Ignorer: ignorer,
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: &copyCollector{
Handler: &filecollector.CopyCollector{
DstDir: destPath,
},
}
return filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
}
}
@@ -126,21 +133,21 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
if err != nil {
return nil, err
}
tc := &tarCollector{
tc := &filecollector.TarCollector{
TarWriter: tw,
}
if fi.IsDir() {
srcPrefix := filepath.Dir(srcPath)
srcPrefix := srcPath
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
srcPrefix += string(filepath.Separator)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: tc,
}
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
if err != nil {
return nil, err
}

View File

@@ -1,4 +1,71 @@
package container
import (
"archive/tar"
"context"
"io"
"os"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
// Type assert HostEnvironment implements ExecutionsEnvironment
var _ ExecutionsEnvironment = &HostEnvironment{}
func TestCopyDir(t *testing.T) {
dir, err := os.MkdirTemp("", "test-host-env-*")
assert.NoError(t, err)
defer os.RemoveAll(dir)
ctx := context.Background()
e := &HostEnvironment{
Path: filepath.Join(dir, "path"),
TmpDir: filepath.Join(dir, "tmp"),
ToolCache: filepath.Join(dir, "tool_cache"),
ActPath: filepath.Join(dir, "act_path"),
StdOut: os.Stdout,
Workdir: path.Join("testdata", "scratch"),
}
_ = os.MkdirAll(e.Path, 0700)
_ = os.MkdirAll(e.TmpDir, 0700)
_ = os.MkdirAll(e.ToolCache, 0700)
_ = os.MkdirAll(e.ActPath, 0700)
err = e.CopyDir(e.Workdir, e.Path, true)(ctx)
assert.NoError(t, err)
}
func TestGetContainerArchive(t *testing.T) {
dir, err := os.MkdirTemp("", "test-host-env-*")
assert.NoError(t, err)
defer os.RemoveAll(dir)
ctx := context.Background()
e := &HostEnvironment{
Path: filepath.Join(dir, "path"),
TmpDir: filepath.Join(dir, "tmp"),
ToolCache: filepath.Join(dir, "tool_cache"),
ActPath: filepath.Join(dir, "act_path"),
StdOut: os.Stdout,
Workdir: path.Join("testdata", "scratch"),
}
_ = os.MkdirAll(e.Path, 0700)
_ = os.MkdirAll(e.TmpDir, 0700)
_ = os.MkdirAll(e.ToolCache, 0700)
_ = os.MkdirAll(e.ActPath, 0700)
expectedContent := []byte("sdde/7sh")
err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600)
assert.NoError(t, err)
archive, err := e.GetContainerArchive(ctx, e.Path)
assert.NoError(t, err)
defer archive.Close()
reader := tar.NewReader(archive)
h, err := reader.Next()
assert.NoError(t, err)
assert.Equal(t, "action.yml", h.Name)
content, err := io.ReadAll(reader)
assert.NoError(t, err)
assert.Equal(t, expectedContent, content)
_, err = reader.Next()
assert.ErrorIs(t, err, io.EOF)
}

View File

@@ -0,0 +1 @@
testfile

View File

@@ -155,6 +155,8 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
switch strings.ToLower(variableNode.Name) {
case "github":
return impl.env.Github, nil
case "gitea": // compatible with Gitea
return impl.env.Github, nil
case "env":
return impl.env.Env, nil
case "job":

View File

@@ -1,4 +1,4 @@
package container
package filecollector
import (
"archive/tar"
@@ -17,18 +17,18 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/index"
)
type fileCollectorHandler interface {
type Handler interface {
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
}
type tarCollector struct {
type TarCollector struct {
TarWriter *tar.Writer
UID int
GID int
DstDir string
}
func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
// create a new dir/file header
header, err := tar.FileInfoHeader(fi, linkName)
if err != nil {
@@ -59,11 +59,11 @@ func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string,
return nil
}
type copyCollector struct {
type CopyCollector struct {
DstDir string
}
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
fdestpath := filepath.Join(cc.DstDir, fpath)
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
return err
@@ -82,29 +82,29 @@ func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string
return nil
}
type fileCollector struct {
type FileCollector struct {
Ignorer gitignore.Matcher
SrcPath string
SrcPrefix string
Fs fileCollectorFs
Handler fileCollectorHandler
Fs Fs
Handler Handler
}
type fileCollectorFs interface {
type Fs interface {
Walk(root string, fn filepath.WalkFunc) error
OpenGitIndex(path string) (*index.Index, error)
Open(path string) (io.ReadCloser, error)
Readlink(path string) (string, error)
}
type defaultFs struct {
type DefaultFs struct {
}
func (*defaultFs) Walk(root string, fn filepath.WalkFunc) error {
func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error {
return filepath.Walk(root, fn)
}
func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) {
r, err := git.PlainOpen(path)
if err != nil {
return nil, err
@@ -116,16 +116,16 @@ func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
return i, nil
}
func (*defaultFs) Open(path string) (io.ReadCloser, error) {
func (*DefaultFs) Open(path string) (io.ReadCloser, error) {
return os.Open(path)
}
func (*defaultFs) Readlink(path string) (string, error) {
func (*DefaultFs) Readlink(path string) (string, error) {
return os.Readlink(path)
}
//nolint:gocyclo
func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
return func(file string, fi os.FileInfo, err error) error {
if err != nil {
@@ -166,7 +166,7 @@ func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []strin
}
}
if err == nil && entry.Mode == filemode.Submodule {
err = fc.Fs.Walk(file, fc.collectFiles(ctx, split))
err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split))
if err != nil {
return err
}

View File

@@ -1,4 +1,4 @@
package container
package filecollector
import (
"archive/tar"
@@ -95,16 +95,16 @@ func TestIgnoredTrackedfile(t *testing.T) {
tw := tar.NewWriter(tmpTar)
ps, _ := gitignore.ReadPatterns(worktree, []string{})
ignorer := gitignore.NewMatcher(ps)
fc := &fileCollector{
fc := &FileCollector{
Fs: &memoryFs{Filesystem: fs},
Ignorer: ignorer,
SrcPath: "mygitrepo",
SrcPrefix: "mygitrepo" + string(filepath.Separator),
Handler: &tarCollector{
Handler: &TarCollector{
TarWriter: tw,
},
}
err := fc.Fs.Walk("mygitrepo", fc.collectFiles(context.Background(), []string{}))
err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
assert.NoError(t, err, "successfully collect files")
tw.Close()
_, _ = tmpTar.Seek(0, io.SeekStart)
@@ -115,3 +115,58 @@ func TestIgnoredTrackedfile(t *testing.T) {
_, err = tr.Next()
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
}
func TestSymlinks(t *testing.T) {
fs := memfs.New()
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
dotgit, _ := fs.Chroot("mygitrepo/.git")
worktree, _ := fs.Chroot("mygitrepo")
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
// This file shouldn't be in the tar
f, err := worktree.Create(".env")
assert.NoError(t, err)
_, err = f.Write([]byte("test=val1\n"))
assert.NoError(t, err)
f.Close()
err = worktree.Symlink(".env", "test.env")
assert.NoError(t, err)
w, err := repo.Worktree()
assert.NoError(t, err)
// .gitignore is in the tar after adding it to the index
_, err = w.Add(".env")
assert.NoError(t, err)
_, err = w.Add("test.env")
assert.NoError(t, err)
tmpTar, _ := fs.Create("temp.tar")
tw := tar.NewWriter(tmpTar)
ps, _ := gitignore.ReadPatterns(worktree, []string{})
ignorer := gitignore.NewMatcher(ps)
fc := &FileCollector{
Fs: &memoryFs{Filesystem: fs},
Ignorer: ignorer,
SrcPath: "mygitrepo",
SrcPrefix: "mygitrepo" + string(filepath.Separator),
Handler: &TarCollector{
TarWriter: tw,
},
}
err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
assert.NoError(t, err, "successfully collect files")
tw.Close()
_, _ = tmpTar.Seek(0, io.SeekStart)
tr := tar.NewReader(tmpTar)
h, err := tr.Next()
files := map[string]tar.Header{}
for err == nil {
files[h.Name] = *h
h, err = tr.Next()
}
assert.Equal(t, ".env", files[".env"].Name)
assert.Equal(t, "test.env", files["test.env"].Name)
assert.Equal(t, ".env", files["test.env"].Linkname)
assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF")
}

185
pkg/jobparser/evaluator.go Normal file
View File

@@ -0,0 +1,185 @@
package jobparser
import (
"fmt"
"regexp"
"strings"
"github.com/nektos/act/pkg/exprparser"
"gopkg.in/yaml.v3"
)
// ExpressionEvaluator is copied from runner.expressionEvaluator,
// to avoid unnecessary dependencies
type ExpressionEvaluator struct {
interpreter exprparser.Interpreter
}
func NewExpressionEvaluator(interpreter exprparser.Interpreter) *ExpressionEvaluator {
return &ExpressionEvaluator{interpreter: interpreter}
}
func (ee ExpressionEvaluator) evaluate(in string, defaultStatusCheck exprparser.DefaultStatusCheck) (interface{}, error) {
evaluated, err := ee.interpreter.Evaluate(in, defaultStatusCheck)
return evaluated, err
}
func (ee ExpressionEvaluator) evaluateScalarYamlNode(node *yaml.Node) error {
var in string
if err := node.Decode(&in); err != nil {
return err
}
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
return nil
}
expr, _ := rewriteSubExpression(in, false)
res, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
if err != nil {
return err
}
return node.Encode(res)
}
func (ee ExpressionEvaluator) evaluateMappingYamlNode(node *yaml.Node) error {
// GitHub has this undocumented feature to merge maps, called insert directive
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
for i := 0; i < len(node.Content)/2; {
k := node.Content[i*2]
v := node.Content[i*2+1]
if err := ee.EvaluateYamlNode(v); err != nil {
return err
}
var sk string
// Merge the nested map of the insert directive
if k.Decode(&sk) == nil && insertDirective.MatchString(sk) {
node.Content = append(append(node.Content[:i*2], v.Content...), node.Content[(i+1)*2:]...)
i += len(v.Content) / 2
} else {
if err := ee.EvaluateYamlNode(k); err != nil {
return err
}
i++
}
}
return nil
}
func (ee ExpressionEvaluator) evaluateSequenceYamlNode(node *yaml.Node) error {
for i := 0; i < len(node.Content); {
v := node.Content[i]
// Preserve nested sequences
wasseq := v.Kind == yaml.SequenceNode
if err := ee.EvaluateYamlNode(v); err != nil {
return err
}
// GitHub has this undocumented feature to merge sequences / arrays
// We have a nested sequence via evaluation, merge the arrays
if v.Kind == yaml.SequenceNode && !wasseq {
node.Content = append(append(node.Content[:i], v.Content...), node.Content[i+1:]...)
i += len(v.Content)
} else {
i++
}
}
return nil
}
func (ee ExpressionEvaluator) EvaluateYamlNode(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
return ee.evaluateScalarYamlNode(node)
case yaml.MappingNode:
return ee.evaluateMappingYamlNode(node)
case yaml.SequenceNode:
return ee.evaluateSequenceYamlNode(node)
default:
return nil
}
}
func (ee ExpressionEvaluator) Interpolate(in string) string {
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
return in
}
expr, _ := rewriteSubExpression(in, true)
evaluated, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
if err != nil {
return ""
}
value, ok := evaluated.(string)
if !ok {
panic(fmt.Sprintf("Expression %s did not evaluate to a string", expr))
}
return value
}
func escapeFormatString(in string) string {
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
}
func rewriteSubExpression(in string, forceFormat bool) (string, error) {
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
return in, nil
}
strPattern := regexp.MustCompile("(?:''|[^'])*'")
pos := 0
exprStart := -1
strStart := -1
var results []string
formatOut := ""
for pos < len(in) {
if strStart > -1 {
matches := strPattern.FindStringIndex(in[pos:])
if matches == nil {
panic("unclosed string.")
}
strStart = -1
pos += matches[1]
} else if exprStart > -1 {
exprEnd := strings.Index(in[pos:], "}}")
strStart = strings.Index(in[pos:], "'")
if exprEnd > -1 && strStart > -1 {
if exprEnd < strStart {
strStart = -1
} else {
exprEnd = -1
}
}
if exprEnd > -1 {
formatOut += fmt.Sprintf("{%d}", len(results))
results = append(results, strings.TrimSpace(in[exprStart:pos+exprEnd]))
pos += exprEnd + 2
exprStart = -1
} else if strStart > -1 {
pos += strStart + 1
} else {
panic("unclosed expression.")
}
} else {
exprStart = strings.Index(in[pos:], "${{")
if exprStart != -1 {
formatOut += escapeFormatString(in[pos : pos+exprStart])
exprStart = pos + exprStart + 3
pos = exprStart
} else {
formatOut += escapeFormatString(in[pos:])
pos = len(in)
}
}
}
if len(results) == 1 && formatOut == "{0}" && !forceFormat {
return in, nil
}
out := fmt.Sprintf("format('%s', %s)", strings.ReplaceAll(formatOut, "'", "''"), strings.Join(results, ", "))
return out, nil
}

View File

@@ -0,0 +1,81 @@
package jobparser
import (
"github.com/nektos/act/pkg/exprparser"
"github.com/nektos/act/pkg/model"
"gopkg.in/yaml.v3"
)
// NewInterpeter returns an interpeter used in the server,
// need github, needs, strategy, matrix, inputs context only,
// see https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability
func NewInterpeter(
jobID string,
job *model.Job,
matrix map[string]interface{},
gitCtx *model.GithubContext,
results map[string]*JobResult,
) exprparser.Interpreter {
strategy := make(map[string]interface{})
if job.Strategy != nil {
strategy["fail-fast"] = job.Strategy.FailFast
strategy["max-parallel"] = job.Strategy.MaxParallel
}
run := &model.Run{
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{},
},
JobID: jobID,
}
for id, result := range results {
need := yaml.Node{}
_ = need.Encode(result.Needs)
run.Workflow.Jobs[id] = &model.Job{
RawNeeds: need,
Result: result.Result,
Outputs: result.Outputs,
}
}
jobs := run.Workflow.Jobs
jobNeeds := run.Job().Needs()
using := map[string]exprparser.Needs{}
for _, need := range jobNeeds {
if v, ok := jobs[need]; ok {
using[need] = exprparser.Needs{
Outputs: v.Outputs,
Result: v.Result,
}
}
}
ee := &exprparser.EvaluationEnvironment{
Github: gitCtx,
Env: nil, // no need
Job: nil, // no need
Steps: nil, // no need
Runner: nil, // no need
Secrets: nil, // no need
Strategy: strategy,
Matrix: matrix,
Needs: using,
Inputs: nil, // not supported yet
}
config := exprparser.Config{
Run: run,
WorkingDir: "", // WorkingDir is used for the function hashFiles, but it's not needed in the server
Context: "job",
}
return exprparser.NewInterpeter(ee, config)
}
// JobResult is the minimum requirement of job results for Interpeter
type JobResult struct {
Needs []string
Result string
Outputs map[string]string
}

150
pkg/jobparser/jobparser.go Normal file
View File

@@ -0,0 +1,150 @@
package jobparser
import (
"bytes"
"fmt"
"sort"
"strings"
"gopkg.in/yaml.v3"
"github.com/nektos/act/pkg/model"
)
func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
origin, err := model.ReadWorkflow(bytes.NewReader(content))
if err != nil {
return nil, fmt.Errorf("model.ReadWorkflow: %w", err)
}
workflow := &SingleWorkflow{}
if err := yaml.Unmarshal(content, workflow); err != nil {
return nil, fmt.Errorf("yaml.Unmarshal: %w", err)
}
pc := &parseContext{}
for _, o := range options {
o(pc)
}
results := map[string]*JobResult{}
for id, job := range origin.Jobs {
results[id] = &JobResult{
Needs: job.Needs(),
Result: pc.jobResults[id],
Outputs: nil, // not supported yet
}
}
var ret []*SingleWorkflow
ids, jobs, err := workflow.jobs()
if err != nil {
return nil, fmt.Errorf("invalid jobs: %w", err)
}
for i, id := range ids {
job := jobs[i]
matricxes, err := getMatrixes(origin.GetJob(id))
if err != nil {
return nil, fmt.Errorf("getMatrixes: %w", err)
}
for _, matrix := range matricxes {
job := job.Clone()
if job.Name == "" {
job.Name = id
}
job.Name = nameWithMatrix(job.Name, matrix)
job.Strategy.RawMatrix = encodeMatrix(matrix)
evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results))
runsOn := origin.GetJob(id).RunsOn()
for i, v := range runsOn {
runsOn[i] = evaluator.Interpolate(v)
}
job.RawRunsOn = encodeRunsOn(runsOn)
swf := &SingleWorkflow{
Name: workflow.Name,
RawOn: workflow.RawOn,
Env: workflow.Env,
Defaults: workflow.Defaults,
}
if err := swf.SetJob(id, job); err != nil {
return nil, fmt.Errorf("SetJob: %w", err)
}
ret = append(ret, swf)
}
}
return ret, nil
}
func WithJobResults(results map[string]string) ParseOption {
return func(c *parseContext) {
c.jobResults = results
}
}
func WithGitContext(context *model.GithubContext) ParseOption {
return func(c *parseContext) {
c.gitContext = context
}
}
type parseContext struct {
jobResults map[string]string
gitContext *model.GithubContext
}
type ParseOption func(c *parseContext)
func getMatrixes(job *model.Job) ([]map[string]interface{}, error) {
ret, err := job.GetMatrixes()
if err != nil {
return nil, fmt.Errorf("GetMatrixes: %w", err)
}
sort.Slice(ret, func(i, j int) bool {
return matrixName(ret[i]) < matrixName(ret[j])
})
return ret, nil
}
func encodeMatrix(matrix map[string]interface{}) yaml.Node {
if len(matrix) == 0 {
return yaml.Node{}
}
value := map[string][]interface{}{}
for k, v := range matrix {
value[k] = []interface{}{v}
}
node := yaml.Node{}
_ = node.Encode(value)
return node
}
func encodeRunsOn(runsOn []string) yaml.Node {
node := yaml.Node{}
if len(runsOn) == 1 {
_ = node.Encode(runsOn[0])
} else {
_ = node.Encode(runsOn)
}
return node
}
func nameWithMatrix(name string, m map[string]interface{}) string {
if len(m) == 0 {
return name
}
return name + " " + matrixName(m)
}
func matrixName(m map[string]interface{}) string {
ks := make([]string, 0, len(m))
for k := range m {
ks = append(ks, k)
}
sort.Strings(ks)
vs := make([]string, 0, len(m))
for _, v := range ks {
vs = append(vs, fmt.Sprint(m[v]))
}
return fmt.Sprintf("(%s)", strings.Join(vs, ", "))
}

View File

@@ -0,0 +1,76 @@
package jobparser
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestParse(t *testing.T) {
tests := []struct {
name string
options []ParseOption
wantErr bool
}{
{
name: "multiple_jobs",
options: nil,
wantErr: false,
},
{
name: "multiple_matrix",
options: nil,
wantErr: false,
},
{
name: "has_needs",
options: nil,
wantErr: false,
},
{
name: "has_with",
options: nil,
wantErr: false,
},
{
name: "has_secrets",
options: nil,
wantErr: false,
},
{
name: "empty_step",
options: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
content := ReadTestdata(t, tt.name+".in.yaml")
want := ReadTestdata(t, tt.name+".out.yaml")
got, err := Parse(content, tt.options...)
if tt.wantErr {
require.Error(t, err)
}
require.NoError(t, err)
builder := &strings.Builder{}
for _, v := range got {
if builder.Len() > 0 {
builder.WriteString("---\n")
}
encoder := yaml.NewEncoder(builder)
encoder.SetIndent(2)
require.NoError(t, encoder.Encode(v))
id, job := v.Job()
assert.NotEmpty(t, id)
assert.NotNil(t, job)
}
assert.Equal(t, string(want), builder.String())
})
}
}

333
pkg/jobparser/model.go Normal file
View File

@@ -0,0 +1,333 @@
package jobparser
import (
"fmt"
"github.com/nektos/act/pkg/model"
"gopkg.in/yaml.v3"
)
// SingleWorkflow is a workflow with single job and single matrix
type SingleWorkflow struct {
Name string `yaml:"name,omitempty"`
RawOn yaml.Node `yaml:"on,omitempty"`
Env map[string]string `yaml:"env,omitempty"`
RawJobs yaml.Node `yaml:"jobs,omitempty"`
Defaults Defaults `yaml:"defaults,omitempty"`
}
func (w *SingleWorkflow) Job() (string, *Job) {
ids, jobs, _ := w.jobs()
if len(ids) >= 1 {
return ids[0], jobs[0]
}
return "", nil
}
func (w *SingleWorkflow) jobs() ([]string, []*Job, error) {
ids, jobs, err := parseMappingNode[*Job](&w.RawJobs)
if err != nil {
return nil, nil, err
}
for _, job := range jobs {
steps := make([]*Step, 0, len(job.Steps))
for _, s := range job.Steps {
if s != nil {
steps = append(steps, s)
}
}
job.Steps = steps
}
return ids, jobs, nil
}
func (w *SingleWorkflow) SetJob(id string, job *Job) error {
m := map[string]*Job{
id: job,
}
out, err := yaml.Marshal(m)
if err != nil {
return err
}
node := yaml.Node{}
if err := yaml.Unmarshal(out, &node); err != nil {
return err
}
if len(node.Content) != 1 || node.Content[0].Kind != yaml.MappingNode {
return fmt.Errorf("can not set job: %q", out)
}
w.RawJobs = *node.Content[0]
return nil
}
func (w *SingleWorkflow) Marshal() ([]byte, error) {
return yaml.Marshal(w)
}
type Job struct {
Name string `yaml:"name,omitempty"`
RawNeeds yaml.Node `yaml:"needs,omitempty"`
RawRunsOn yaml.Node `yaml:"runs-on,omitempty"`
Env yaml.Node `yaml:"env,omitempty"`
If yaml.Node `yaml:"if,omitempty"`
Steps []*Step `yaml:"steps,omitempty"`
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
Services map[string]*ContainerSpec `yaml:"services,omitempty"`
Strategy Strategy `yaml:"strategy,omitempty"`
RawContainer yaml.Node `yaml:"container,omitempty"`
Defaults Defaults `yaml:"defaults,omitempty"`
Outputs map[string]string `yaml:"outputs,omitempty"`
Uses string `yaml:"uses,omitempty"`
With map[string]interface{} `yaml:"with,omitempty"`
RawSecrets yaml.Node `yaml:"secrets,omitempty"`
}
func (j *Job) Clone() *Job {
if j == nil {
return nil
}
return &Job{
Name: j.Name,
RawNeeds: j.RawNeeds,
RawRunsOn: j.RawRunsOn,
Env: j.Env,
If: j.If,
Steps: j.Steps,
TimeoutMinutes: j.TimeoutMinutes,
Services: j.Services,
Strategy: j.Strategy,
RawContainer: j.RawContainer,
Defaults: j.Defaults,
Outputs: j.Outputs,
Uses: j.Uses,
With: j.With,
RawSecrets: j.RawSecrets,
}
}
func (j *Job) Needs() []string {
return (&model.Job{RawNeeds: j.RawNeeds}).Needs()
}
func (j *Job) EraseNeeds() *Job {
j.RawNeeds = yaml.Node{}
return j
}
func (j *Job) RunsOn() []string {
return (&model.Job{RawRunsOn: j.RawRunsOn}).RunsOn()
}
type Step struct {
ID string `yaml:"id,omitempty"`
If yaml.Node `yaml:"if,omitempty"`
Name string `yaml:"name,omitempty"`
Uses string `yaml:"uses,omitempty"`
Run string `yaml:"run,omitempty"`
WorkingDirectory string `yaml:"working-directory,omitempty"`
Shell string `yaml:"shell,omitempty"`
Env yaml.Node `yaml:"env,omitempty"`
With map[string]string `yaml:"with,omitempty"`
ContinueOnError bool `yaml:"continue-on-error,omitempty"`
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
}
// String gets the name of step
func (s *Step) String() string {
if s == nil {
return ""
}
return (&model.Step{
ID: s.ID,
Name: s.Name,
Uses: s.Uses,
Run: s.Run,
}).String()
}
type ContainerSpec struct {
Image string `yaml:"image,omitempty"`
Env map[string]string `yaml:"env,omitempty"`
Ports []string `yaml:"ports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
Options string `yaml:"options,omitempty"`
Credentials map[string]string `yaml:"credentials,omitempty"`
Cmd []string `yaml:"cmd,omitempty"`
}
type Strategy struct {
FailFastString string `yaml:"fail-fast,omitempty"`
MaxParallelString string `yaml:"max-parallel,omitempty"`
RawMatrix yaml.Node `yaml:"matrix,omitempty"`
}
type Defaults struct {
Run RunDefaults `yaml:"run,omitempty"`
}
type RunDefaults struct {
Shell string `yaml:"shell,omitempty"`
WorkingDirectory string `yaml:"working-directory,omitempty"`
}
type Event struct {
Name string
acts map[string][]string
schedules []map[string]string
}
func (evt *Event) IsSchedule() bool {
return evt.schedules != nil
}
func (evt *Event) Acts() map[string][]string {
return evt.acts
}
func (evt *Event) Schedules() []map[string]string {
return evt.schedules
}
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
switch rawOn.Kind {
case yaml.ScalarNode:
var val string
err := rawOn.Decode(&val)
if err != nil {
return nil, err
}
return []*Event{
{Name: val},
}, nil
case yaml.SequenceNode:
var val []interface{}
err := rawOn.Decode(&val)
if err != nil {
return nil, err
}
res := make([]*Event, 0, len(val))
for _, v := range val {
switch t := v.(type) {
case string:
res = append(res, &Event{Name: t})
default:
return nil, fmt.Errorf("invalid type %T", t)
}
}
return res, nil
case yaml.MappingNode:
events, triggers, err := parseMappingNode[interface{}](rawOn)
if err != nil {
return nil, err
}
res := make([]*Event, 0, len(events))
for i, k := range events {
v := triggers[i]
if v == nil {
res = append(res, &Event{
Name: k,
acts: map[string][]string{},
})
continue
}
switch t := v.(type) {
case string:
res = append(res, &Event{
Name: k,
acts: map[string][]string{},
})
case []string:
res = append(res, &Event{
Name: k,
acts: map[string][]string{},
})
case map[string]interface{}:
acts := make(map[string][]string, len(t))
for act, branches := range t {
switch b := branches.(type) {
case string:
acts[act] = []string{b}
case []string:
acts[act] = b
case []interface{}:
acts[act] = make([]string, len(b))
for i, v := range b {
var ok bool
if acts[act][i], ok = v.(string); !ok {
return nil, fmt.Errorf("unknown on type: %#v", branches)
}
}
default:
return nil, fmt.Errorf("unknown on type: %#v", branches)
}
}
res = append(res, &Event{
Name: k,
acts: acts,
})
case []interface{}:
if k != "schedule" {
return nil, fmt.Errorf("unknown on type: %#v", v)
}
schedules := make([]map[string]string, len(t))
for i, tt := range t {
vv, ok := tt.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("unknown on type: %#v", v)
}
schedules[i] = make(map[string]string, len(vv))
for k, vvv := range vv {
var ok bool
if schedules[i][k], ok = vvv.(string); !ok {
return nil, fmt.Errorf("unknown on type: %#v", v)
}
}
}
res = append(res, &Event{
Name: k,
schedules: schedules,
})
default:
return nil, fmt.Errorf("unknown on type: %#v", v)
}
}
return res, nil
default:
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
}
}
// parseMappingNode parse a mapping node and preserve order.
func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
if node.Kind != yaml.MappingNode {
return nil, nil, fmt.Errorf("input node is not a mapping node")
}
var scalars []string
var datas []T
expectKey := true
for _, item := range node.Content {
if expectKey {
if item.Kind != yaml.ScalarNode {
return nil, nil, fmt.Errorf("not a valid scalar node: %v", item.Value)
}
scalars = append(scalars, item.Value)
expectKey = false
} else {
var val T
if err := item.Decode(&val); err != nil {
return nil, nil, err
}
datas = append(datas, val)
expectKey = true
}
}
if len(scalars) != len(datas) {
return nil, nil, fmt.Errorf("invalid definition of on: %v", node.Value)
}
return scalars, datas, nil
}

306
pkg/jobparser/model_test.go Normal file
View File

@@ -0,0 +1,306 @@
package jobparser
import (
"fmt"
"strings"
"testing"
"github.com/nektos/act/pkg/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestParseRawOn(t *testing.T) {
kases := []struct {
input string
result []*Event
}{
{
input: "on: issue_comment",
result: []*Event{
{
Name: "issue_comment",
},
},
},
{
input: "on:\n push",
result: []*Event{
{
Name: "push",
},
},
},
{
input: "on:\n - push\n - pull_request",
result: []*Event{
{
Name: "push",
},
{
Name: "pull_request",
},
},
},
{
input: "on:\n push:\n branches:\n - master",
result: []*Event{
{
Name: "push",
acts: map[string][]string{
"branches": {
"master",
},
},
},
},
},
{
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
result: []*Event{
{
Name: "branch_protection_rule",
acts: map[string][]string{
"types": {
"created",
"deleted",
},
},
},
},
},
{
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
result: []*Event{
{
Name: "project",
acts: map[string][]string{
"types": {
"created",
"deleted",
},
},
},
{
Name: "milestone",
acts: map[string][]string{
"types": {
"opened",
"deleted",
},
},
},
},
},
{
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
result: []*Event{
{
Name: "pull_request",
acts: map[string][]string{
"types": {
"opened",
},
"branches": {
"releases/**",
},
},
},
},
},
{
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
result: []*Event{
{
Name: "push",
acts: map[string][]string{
"branches": {
"main",
},
},
},
{
Name: "pull_request",
acts: map[string][]string{
"types": {
"opened",
},
"branches": {
"**",
},
},
},
},
},
{
input: "on:\n push:\n branches:\n - 'main'\n - 'releases/**'",
result: []*Event{
{
Name: "push",
acts: map[string][]string{
"branches": {
"main",
"releases/**",
},
},
},
},
},
{
input: "on:\n push:\n tags:\n - v1.**",
result: []*Event{
{
Name: "push",
acts: map[string][]string{
"tags": {
"v1.**",
},
},
},
},
},
{
input: "on: [pull_request, workflow_dispatch]",
result: []*Event{
{
Name: "pull_request",
},
{
Name: "workflow_dispatch",
},
},
},
{
input: "on:\n schedule:\n - cron: '20 6 * * *'",
result: []*Event{
{
Name: "schedule",
schedules: []map[string]string{
{
"cron": "20 6 * * *",
},
},
},
},
},
}
for _, kase := range kases {
t.Run(kase.input, func(t *testing.T) {
origin, err := model.ReadWorkflow(strings.NewReader(kase.input))
assert.NoError(t, err)
events, err := ParseRawOn(&origin.RawOn)
assert.NoError(t, err)
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
})
}
}
func TestSingleWorkflow_SetJob(t *testing.T) {
t.Run("erase needs", func(t *testing.T) {
content := ReadTestdata(t, "erase_needs.in.yaml")
want := ReadTestdata(t, "erase_needs.out.yaml")
swf, err := Parse(content)
require.NoError(t, err)
builder := &strings.Builder{}
for _, v := range swf {
id, job := v.Job()
require.NoError(t, v.SetJob(id, job.EraseNeeds()))
if builder.Len() > 0 {
builder.WriteString("---\n")
}
encoder := yaml.NewEncoder(builder)
encoder.SetIndent(2)
require.NoError(t, encoder.Encode(v))
}
assert.Equal(t, string(want), builder.String())
})
}
func TestParseMappingNode(t *testing.T) {
tests := []struct {
input string
scalars []string
datas []interface{}
}{
{
input: "on:\n push:\n branches:\n - master",
scalars: []string{"push"},
datas: []interface {
}{
map[string]interface{}{
"branches": []interface{}{"master"},
},
},
},
{
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
scalars: []string{"branch_protection_rule"},
datas: []interface{}{
map[string]interface{}{
"types": []interface{}{"created", "deleted"},
},
},
},
{
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
scalars: []string{"project", "milestone"},
datas: []interface{}{
map[string]interface{}{
"types": []interface{}{"created", "deleted"},
},
map[string]interface{}{
"types": []interface{}{"opened", "deleted"},
},
},
},
{
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
scalars: []string{"pull_request"},
datas: []interface{}{
map[string]interface{}{
"types": []interface{}{"opened"},
"branches": []interface{}{"releases/**"},
},
},
},
{
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
scalars: []string{"push", "pull_request"},
datas: []interface{}{
map[string]interface{}{
"branches": []interface{}{"main"},
},
map[string]interface{}{
"types": []interface{}{"opened"},
"branches": []interface{}{"**"},
},
},
},
{
input: "on:\n schedule:\n - cron: '20 6 * * *'",
scalars: []string{"schedule"},
datas: []interface{}{
[]interface{}{map[string]interface{}{
"cron": "20 6 * * *",
}},
},
},
}
for _, test := range tests {
t.Run(test.input, func(t *testing.T) {
workflow, err := model.ReadWorkflow(strings.NewReader(test.input))
assert.NoError(t, err)
scalars, datas, err := parseMappingNode[interface{}](&workflow.RawOn)
assert.NoError(t, err)
assert.EqualValues(t, test.scalars, scalars, fmt.Sprintf("%#v", scalars))
assert.EqualValues(t, test.datas, datas, fmt.Sprintf("%#v", datas))
})
}
}

View File

@@ -0,0 +1,8 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
steps:
- run: echo job-1
-

View File

@@ -0,0 +1,7 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
steps:
- run: echo job-1

View File

@@ -0,0 +1,16 @@
name: test
jobs:
job1:
runs-on: linux
steps:
- run: uname -a
job2:
runs-on: linux
steps:
- run: uname -a
needs: job1
job3:
runs-on: linux
steps:
- run: uname -a
needs: [job1, job2]

View File

@@ -0,0 +1,23 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
steps:
- run: uname -a
---
name: test
jobs:
job2:
name: job2
runs-on: linux
steps:
- run: uname -a
---
name: test
jobs:
job3:
name: job3
runs-on: linux
steps:
- run: uname -a

View File

@@ -0,0 +1,16 @@
name: test
jobs:
job1:
runs-on: linux
steps:
- run: uname -a
job2:
runs-on: linux
steps:
- run: uname -a
needs: job1
job3:
runs-on: linux
steps:
- run: uname -a
needs: [job1, job2]

View File

@@ -0,0 +1,25 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
steps:
- run: uname -a
---
name: test
jobs:
job2:
name: job2
needs: job1
runs-on: linux
steps:
- run: uname -a
---
name: test
jobs:
job3:
name: job3
needs: [job1, job2]
runs-on: linux
steps:
- run: uname -a

View File

@@ -0,0 +1,14 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
uses: .gitea/workflows/build.yml
secrets:
secret: hideme
job2:
name: job2
runs-on: linux
uses: .gitea/workflows/build.yml
secrets: inherit

View File

@@ -0,0 +1,16 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
uses: .gitea/workflows/build.yml
secrets:
secret: hideme
---
name: test
jobs:
job2:
name: job2
runs-on: linux
uses: .gitea/workflows/build.yml
secrets: inherit

15
pkg/jobparser/testdata/has_with.in.yaml vendored Normal file
View File

@@ -0,0 +1,15 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
uses: .gitea/workflows/build.yml
with:
package: service
job2:
name: job2
runs-on: linux
uses: .gitea/workflows/build.yml
with:
package: module

View File

@@ -0,0 +1,17 @@
name: test
jobs:
job1:
name: job1
runs-on: linux
uses: .gitea/workflows/build.yml
with:
package: service
---
name: test
jobs:
job2:
name: job2
runs-on: linux
uses: .gitea/workflows/build.yml
with:
package: module

View File

@@ -0,0 +1,22 @@
name: test
jobs:
zzz:
runs-on: linux
steps:
- run: echo zzz
job1:
runs-on: linux
steps:
- run: uname -a && go version
job2:
runs-on: linux
steps:
- run: uname -a && go version
job3:
runs-on: linux
steps:
- run: uname -a && go version
aaa:
runs-on: linux
steps:
- run: uname -a && go version

View File

@@ -0,0 +1,39 @@
name: test
jobs:
zzz:
name: zzz
runs-on: linux
steps:
- run: echo zzz
---
name: test
jobs:
job1:
name: job1
runs-on: linux
steps:
- run: uname -a && go version
---
name: test
jobs:
job2:
name: job2
runs-on: linux
steps:
- run: uname -a && go version
---
name: test
jobs:
job3:
name: job3
runs-on: linux
steps:
- run: uname -a && go version
---
name: test
jobs:
aaa:
name: aaa
runs-on: linux
steps:
- run: uname -a && go version

View File

@@ -0,0 +1,13 @@
name: test
jobs:
job1:
strategy:
matrix:
os: [ubuntu-22.04, ubuntu-20.04]
version: [1.17, 1.18, 1.19]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version

View File

@@ -0,0 +1,101 @@
name: test
jobs:
job1:
name: job1 (ubuntu-20.04, 1.17)
runs-on: ubuntu-20.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-20.04
version:
- 1.17
---
name: test
jobs:
job1:
name: job1 (ubuntu-20.04, 1.18)
runs-on: ubuntu-20.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-20.04
version:
- 1.18
---
name: test
jobs:
job1:
name: job1 (ubuntu-20.04, 1.19)
runs-on: ubuntu-20.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-20.04
version:
- 1.19
---
name: test
jobs:
job1:
name: job1 (ubuntu-22.04, 1.17)
runs-on: ubuntu-22.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-22.04
version:
- 1.17
---
name: test
jobs:
job1:
name: job1 (ubuntu-22.04, 1.18)
runs-on: ubuntu-22.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-22.04
version:
- 1.18
---
name: test
jobs:
job1:
name: job1 (ubuntu-22.04, 1.19)
runs-on: ubuntu-22.04
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.version }}
- run: uname -a && go version
strategy:
matrix:
os:
- ubuntu-22.04
version:
- 1.19

View File

@@ -0,0 +1,18 @@
package jobparser
import (
"embed"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
//go:embed testdata
var testdata embed.FS
func ReadTestdata(t *testing.T, name string) []byte {
content, err := testdata.ReadFile(filepath.Join("testdata", name))
require.NoError(t, err)
return content
}

View File

@@ -20,7 +20,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
// Force input to lowercase for case insensitive comparison
format := ActionRunsUsing(strings.ToLower(using))
switch format {
case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite:
case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo:
*a = format
default:
return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{
@@ -29,6 +29,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
ActionRunsUsingNode12,
ActionRunsUsingNode16,
ActionRunsUsingNode20,
ActionRunsUsingGo,
}, format))
}
return nil
@@ -45,6 +46,8 @@ const (
ActionRunsUsingDocker = "docker"
// ActionRunsUsingComposite for running composite
ActionRunsUsingComposite = "composite"
// ActionRunsUsingGo for running with go
ActionRunsUsingGo = "go"
)
// ActionRuns are a field in Action

View File

@@ -168,7 +168,7 @@ func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInsta
if ghc.Repository == "" {
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
if err != nil {
common.Logger(ctx).Warningf("unable to get git repo: %v", err)
common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err)
return
}
ghc.Repository = repo

View File

@@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
workflow.Name = wf.workflowDirEntry.Name()
}
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
for k := range workflow.Jobs {
if ok := jobNameRegex.MatchString(k); !ok {
_ = f.Close()
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
}
err = validateJobName(workflow)
if err != nil {
_ = f.Close()
return nil, err
}
wp.workflows = append(wp.workflows, workflow)
@@ -164,6 +162,49 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
return wp, nil
}
// CombineWorkflowPlanner combines workflows to a WorkflowPlanner
func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
return &workflowPlanner{
workflows: workflows,
}
}
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
wp := new(workflowPlanner)
log.Debugf("Reading workflow %s", name)
workflow, err := ReadWorkflow(f)
if err != nil {
if err == io.EOF {
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
}
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
}
workflow.File = name
if workflow.Name == "" {
workflow.Name = name
}
err = validateJobName(workflow)
if err != nil {
return nil, err
}
wp.workflows = append(wp.workflows, workflow)
return wp, nil
}
func validateJobName(workflow *Workflow) error {
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
for k := range workflow.Jobs {
if ok := jobNameRegex.MatchString(k); !ok {
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
}
}
return nil
}
type workflowPlanner struct {
workflows []*Workflow
}

View File

@@ -66,6 +66,30 @@ func (w *Workflow) OnEvent(event string) interface{} {
return nil
}
func (w *Workflow) OnSchedule() []string {
schedules := w.OnEvent("schedule")
if schedules == nil {
return []string{}
}
switch val := schedules.(type) {
case []interface{}:
allSchedules := []string{}
for _, v := range val {
for k, cron := range v.(map[string]interface{}) {
if k != "cron" {
continue
}
allSchedules = append(allSchedules, cron.(string))
}
}
return allSchedules
default:
}
return []string{}
}
type WorkflowDispatchInput struct {
Description string `yaml:"description"`
Required bool `yaml:"required"`
@@ -79,22 +103,40 @@ type WorkflowDispatch struct {
}
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
if w.RawOn.Kind != yaml.MappingNode {
switch w.RawOn.Kind {
case yaml.ScalarNode:
var val string
if !decodeNode(w.RawOn, &val) {
return nil
}
if val == "workflow_dispatch" {
return &WorkflowDispatch{}
}
case yaml.SequenceNode:
var val []string
if !decodeNode(w.RawOn, &val) {
return nil
}
for _, v := range val {
if v == "workflow_dispatch" {
return &WorkflowDispatch{}
}
}
case yaml.MappingNode:
var val map[string]yaml.Node
if !decodeNode(w.RawOn, &val) {
return nil
}
n, found := val["workflow_dispatch"]
var workflowDispatch WorkflowDispatch
if found && decodeNode(n, &workflowDispatch) {
return &workflowDispatch
}
default:
return nil
}
var val map[string]yaml.Node
if !decodeNode(w.RawOn, &val) {
return nil
}
var config WorkflowDispatch
node := val["workflow_dispatch"]
if !decodeNode(node, &config) {
return nil
}
return &config
return nil
}
type WorkflowCallInput struct {
@@ -531,10 +573,14 @@ type ContainerSpec struct {
Args string
Name string
Reuse bool
// Gitea specific
Cmd []string `yaml:"cmd"`
}
// Step is the structure of one step in a job
type Step struct {
Number int `yaml:"-"`
ID string `yaml:"id"`
If yaml.Node `yaml:"if"`
Name string `yaml:"name"`
@@ -581,7 +627,7 @@ func (s *Step) GetEnv() map[string]string {
func (s *Step) ShellCommand() string {
shellCommand := ""
//Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
// Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
switch s.Shell {
case "", "bash":
shellCommand = "bash --noprofile --norc -e -o pipefail {0}"

View File

@@ -7,6 +7,88 @@ import (
"github.com/stretchr/testify/assert"
)
func TestReadWorkflow_ScheduleEvent(t *testing.T) {
yaml := `
name: local-action-docker-url
on:
schedule:
- cron: '30 5 * * 1,3'
- cron: '30 5 * * 2,4'
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-url
`
workflow, err := ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
schedules := workflow.OnEvent("schedule")
assert.Len(t, schedules, 2)
newSchedules := workflow.OnSchedule()
assert.Len(t, newSchedules, 2)
assert.Equal(t, "30 5 * * 1,3", newSchedules[0])
assert.Equal(t, "30 5 * * 2,4", newSchedules[1])
yaml = `
name: local-action-docker-url
on:
schedule:
test: '30 5 * * 1,3'
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-url
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
newSchedules = workflow.OnSchedule()
assert.Len(t, newSchedules, 0)
yaml = `
name: local-action-docker-url
on:
schedule:
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-url
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
newSchedules = workflow.OnSchedule()
assert.Len(t, newSchedules, 0)
yaml = `
name: local-action-docker-url
on: [push, tag]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-url
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
newSchedules = workflow.OnSchedule()
assert.Len(t, newSchedules, 0)
}
func TestReadWorkflow_StringEvent(t *testing.T) {
yaml := `
name: local-action-docker-url
@@ -417,3 +499,107 @@ func TestStep_ShellCommand(t *testing.T) {
})
}
}
func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) {
yaml := `
name: local-action-docker-url
`
workflow, err := ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch := workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: push
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: workflow_dispatch
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on: [push, pull_request]
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: [push, workflow_dispatch]
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on:
- push
- workflow_dispatch
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on:
push:
pull_request:
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on:
push:
pull_request:
workflow_dispatch:
inputs:
logLevel:
description: 'Log level'
required: true
default: 'warning'
type: choice
options:
- info
- warning
- debug
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Equal(t, WorkflowDispatchInput{
Default: "warning",
Description: "Log level",
Options: []string{
"info",
"warning",
"debug",
},
Required: true,
Type: "choice",
}, workflowDispatch.Inputs["logLevel"])
}

View File

@@ -3,6 +3,7 @@ package runner
import (
"context"
"embed"
"errors"
"fmt"
"io"
"io/fs"
@@ -41,11 +42,24 @@ var trampoline embed.FS
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
logger := common.Logger(ctx)
allErrors := []error{}
addError := func(fileName string, err error) {
if err != nil {
allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step %w", fileName, step.String(), actionPath, err))
} else {
// One successful read, clear error state
allErrors = nil
}
}
reader, closer, err := readFile("action.yml")
addError("action.yml", err)
if os.IsNotExist(err) {
reader, closer, err = readFile("action.yaml")
if err != nil {
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
addError("action.yaml", err)
if os.IsNotExist(err) {
_, closer, err := readFile("Dockerfile")
addError("Dockerfile", err)
if err == nil {
closer.Close()
action := &model.Action{
Name: "(Synthetic)",
@@ -90,10 +104,10 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
return action, nil
}
}
return nil, err
}
} else if err != nil {
return nil, err
}
if allErrors != nil {
return nil, errors.Join(allErrors...)
}
defer closer.Close()
@@ -110,9 +124,6 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
if stepModel.Type() != model.StepTypeUsesActionRemote {
return nil
}
if err := removeGitIgnore(ctx, actionDir); err != nil {
return err
}
var containerActionDirCopy string
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
@@ -121,6 +132,21 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
if !strings.HasSuffix(containerActionDirCopy, `/`) {
containerActionDirCopy += `/`
}
if rc.Config != nil && rc.Config.ActionCache != nil {
raction := step.(*stepActionRemote)
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
if err != nil {
return err
}
defer ta.Close()
return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta)
}
if err := removeGitIgnore(ctx, actionDir); err != nil {
return err
}
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
}
@@ -171,6 +197,21 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
}
return execAsComposite(step)(ctx)
case model.ActionRunsUsingGo:
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
return err
}
rc.ApplyExtraPath(ctx, step.getEnv())
execFileName := fmt.Sprintf("%s.out", action.Runs.Main)
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Main}
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
return common.NewPipelineExecutor(
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
)(ctx)
default:
return fmt.Errorf(fmt.Sprintf("The runs.using key must be one of: %v, got %s", []string{
model.ActionRunsUsingDocker,
@@ -178,6 +219,7 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
model.ActionRunsUsingNode16,
model.ActionRunsUsingNode20,
model.ActionRunsUsingComposite,
model.ActionRunsUsingGo,
}, action.Runs.Using))
}
}
@@ -265,6 +307,13 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
return err
}
defer buildContext.Close()
} else if rc.Config.ActionCache != nil {
rstep := step.(*stepActionRemote)
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
if err != nil {
return err
}
defer buildContext.Close()
}
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
ContextDir: contextDir,
@@ -366,23 +415,25 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
networkMode = "default"
}
stepContainer := container.NewContainer(&container.NewContainerInput{
Cmd: cmd,
Entrypoint: entrypoint,
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
Image: image,
Username: rc.Config.Secrets["DOCKER_USERNAME"],
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
Name: createContainerName(rc.jobContainerName(), stepModel.ID),
Env: envList,
Mounts: mounts,
NetworkMode: networkMode,
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Options: rc.Config.ContainerOptions,
Cmd: cmd,
Entrypoint: entrypoint,
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
Image: image,
Username: rc.Config.Secrets["DOCKER_USERNAME"],
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
Env: envList,
Mounts: mounts,
NetworkMode: networkMode,
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Options: rc.Config.ContainerOptions,
AutoRemove: rc.Config.AutoRemove,
ValidVolumes: rc.Config.ValidVolumes,
})
return stepContainer
}
@@ -458,7 +509,8 @@ func hasPreStep(step actionStep) common.Conditional {
return action.Runs.Using == model.ActionRunsUsingComposite ||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
action.Runs.Using == model.ActionRunsUsingNode16 ||
action.Runs.Using == model.ActionRunsUsingNode20) &&
action.Runs.Using == model.ActionRunsUsingNode20 ||
action.Runs.Using == model.ActionRunsUsingGo) &&
action.Runs.Pre != "")
}
}
@@ -517,6 +569,43 @@ func runPreStep(step actionStep) common.Executor {
}
return fmt.Errorf("missing steps in composite action")
case model.ActionRunsUsingGo:
// defaults in pre steps were missing, however provided inputs are available
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
// todo: refactor into step
var actionDir string
var actionPath string
if _, ok := step.(*stepActionRemote); ok {
actionPath = newRemoteAction(stepModel.Uses).Path
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
} else {
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
actionPath = ""
}
actionLocation := ""
if actionPath != "" {
actionLocation = path.Join(actionDir, actionPath)
} else {
actionLocation = actionDir
}
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
return err
}
rc.ApplyExtraPath(ctx, step.getEnv())
execFileName := fmt.Sprintf("%s.out", action.Runs.Pre)
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Pre}
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
return common.NewPipelineExecutor(
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
)(ctx)
default:
return nil
}
@@ -554,7 +643,8 @@ func hasPostStep(step actionStep) common.Conditional {
return action.Runs.Using == model.ActionRunsUsingComposite ||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
action.Runs.Using == model.ActionRunsUsingNode16 ||
action.Runs.Using == model.ActionRunsUsingNode20) &&
action.Runs.Using == model.ActionRunsUsingNode20 ||
action.Runs.Using == model.ActionRunsUsingGo) &&
action.Runs.Post != "")
}
}
@@ -610,6 +700,19 @@ func runPostStep(step actionStep) common.Executor {
}
return fmt.Errorf("missing steps in composite action")
case model.ActionRunsUsingGo:
populateEnvsFromSavedState(step.getEnv(), step, rc)
rc.ApplyExtraPath(ctx, step.getEnv())
execFileName := fmt.Sprintf("%s.out", action.Runs.Post)
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Post}
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
return common.NewPipelineExecutor(
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
)(ctx)
default:
return nil
}

View File

@@ -6,6 +6,7 @@ import (
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
"path"
@@ -86,6 +87,9 @@ func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token s
Auth: auth,
Force: true,
}); err != nil {
if tagOrSha && errors.Is(err, git.NoErrAlreadyUpToDate) {
return "", fmt.Errorf("couldn't find remote ref \"%s\"", ref)
}
return "", err
}
if tagOrSha {

View File

@@ -140,6 +140,7 @@ func (rc *RunContext) compositeExecutor(action *model.Action) *compositeSteps {
if step.ID == "" {
step.ID = fmt.Sprintf("%d", i)
}
step.Number = i
// create a copy of the step, since this composite action could
// run multiple times and we might modify the instance

View File

@@ -77,7 +77,8 @@ func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
logger.Infof(" \U00002753 %s", line)
}
return false
// return true to let gitea's logger handle these special outputs also
return true
}
}

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
"github.com/nektos/act/pkg/model"
)
@@ -63,6 +64,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
if stepModel.ID == "" {
stepModel.ID = fmt.Sprintf("%d", i)
}
stepModel.Number = i
step, err := sf.newStep(stepModel, rc)
@@ -70,7 +72,19 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
return common.NewErrorExecutor(err)
}
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, step.pre()))
preExec := step.pre()
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, func(ctx context.Context) error {
logger := common.Logger(ctx)
preErr := preExec(ctx)
if preErr != nil {
logger.Errorf("%v", preErr)
common.SetJobError(ctx, preErr)
} else if ctx.Err() != nil {
logger.Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return preErr
}))
stepExec := step.main()
steps = append(steps, useStepLogger(rc, stepModel, stepStageMain, func(ctx context.Context) error {
@@ -108,6 +122,18 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
if err = info.stopContainer()(ctx); err != nil {
logger.Errorf("Error while stop job container: %v", err)
}
if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
// clean network in docker mode only
// if the value of `ContainerNetworkMode` is empty string,
// it means that the network to which containers are connecting is created by `act_runner`,
// so, we should remove the network at last.
networkName, _ := rc.networkName()
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
logger.Errorf("Error while cleaning network: %v", err)
}
}
}
setJobResult(ctx, info, rc, jobError == nil)
setJobOutputs(ctx, rc)
@@ -179,7 +205,7 @@ func setJobOutputs(ctx context.Context, rc *RunContext) {
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
return func(ctx context.Context) error {
ctx = withStepLogger(ctx, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
rawLogger := common.Logger(ctx).WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {

View File

@@ -96,6 +96,17 @@ func WithJobLogger(ctx context.Context, jobID string, jobName string, config *Co
logger.SetFormatter(formatter)
}
{ // Adapt to Gitea
if hook := common.LoggerHook(ctx); hook != nil {
logger.AddHook(hook)
}
if config.JobLoggerLevel != nil {
logger.SetLevel(*config.JobLoggerLevel)
} else {
logger.SetLevel(logrus.TraceLevel)
}
}
logger.SetFormatter(&maskedFormatter{
Formatter: logger.Formatter,
masker: valueMasker(config.InsecureSecrets, config.Secrets),
@@ -132,11 +143,12 @@ func WithCompositeStepLogger(ctx context.Context, stepID string) context.Context
}).WithContext(ctx))
}
func withStepLogger(ctx context.Context, stepID string, stepName string, stageName string) context.Context {
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
rtn := common.Logger(ctx).WithFields(logrus.Fields{
"step": stepName,
"stepID": []string{stepID},
"stage": stageName,
"stepNumber": stepNumber,
"step": stepName,
"stepID": []string{stepID},
"stage": stageName,
})
return common.WithLogger(ctx, rtn)
}

View File

@@ -1,6 +1,7 @@
package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
@@ -8,6 +9,7 @@ import (
"os"
"path"
"regexp"
"strings"
"sync"
"github.com/nektos/act/pkg/common"
@@ -16,15 +18,45 @@ import (
)
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
return newReusableWorkflowExecutor(rc, rc.Config.Workdir, rc.Run.Job().Uses)
if !rc.Config.NoSkipCheckout {
fullPath := rc.Run.Job().Uses
fileName := path.Base(fullPath)
workflowDir := strings.TrimSuffix(fullPath, path.Join("/", fileName))
workflowDir = strings.TrimPrefix(workflowDir, "./")
return common.NewPipelineExecutor(
newReusableWorkflowExecutor(rc, workflowDir, fileName),
)
}
// ./.gitea/workflows/wf.yml -> .gitea/workflows/wf.yml
trimmedUses := strings.TrimPrefix(rc.Run.Job().Uses, "./")
// uses string format is {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}
uses := fmt.Sprintf("%s/%s@%s", rc.Config.PresetGitHubContext.Repository, trimmedUses, rc.Config.PresetGitHubContext.Sha)
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(rc.Config.GitHubInstance, uses)
if remoteReusableWorkflow == nil {
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
}
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(uses))
// If the repository is private, we need a token to clone it
token := rc.Config.GetToken()
return common.NewPipelineExecutor(
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
)
}
func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
uses := rc.Run.Job().Uses
remoteReusableWorkflow := newRemoteReusableWorkflow(uses)
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(rc.Config.GitHubInstance, uses)
if remoteReusableWorkflow == nil {
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.github/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
}
// uses with safe filename makes the target directory look something like this {owner}-{repo}-.github-workflows-{filename}@{ref}
@@ -33,12 +65,54 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
filename := fmt.Sprintf("%s/%s@%s", remoteReusableWorkflow.Org, remoteReusableWorkflow.Repo, remoteReusableWorkflow.Ref)
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(filename))
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
token := ""
if rc.Config.ActionCache != nil {
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
}
return common.NewPipelineExecutor(
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir)),
newReusableWorkflowExecutor(rc, workflowDir, fmt.Sprintf("./.github/workflows/%s", remoteReusableWorkflow.Filename)),
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
)
}
func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, remoteReusableWorkflow *remoteReusableWorkflow) common.Executor {
return func(ctx context.Context) error {
ghctx := rc.getGithubContext(ctx)
remoteReusableWorkflow.URL = ghctx.ServerURL
sha, err := rc.Config.ActionCache.Fetch(ctx, filename, remoteReusableWorkflow.CloneURL(), remoteReusableWorkflow.Ref, ghctx.Token)
if err != nil {
return err
}
archive, err := rc.Config.ActionCache.GetTarArchive(ctx, filename, sha, fmt.Sprintf(".github/workflows/%s", remoteReusableWorkflow.Filename))
if err != nil {
return err
}
defer archive.Close()
treader := tar.NewReader(archive)
if _, err = treader.Next(); err != nil {
return err
}
planner, err := model.NewSingleWorkflowPlanner(remoteReusableWorkflow.Filename, treader)
if err != nil {
return err
}
plan, err := planner.PlanEvent("workflow_call")
if err != nil {
return err
}
runner, err := NewReusableWorkflowRunner(rc)
if err != nil {
return err
}
return runner.NewPlanExecutor(plan)(ctx)
}
}
var (
executorLock sync.Mutex
)
@@ -52,7 +126,7 @@ func newMutexExecutor(executor common.Executor) common.Executor {
}
}
func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkflow, targetDirectory string) common.Executor {
func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkflow, targetDirectory, token string) common.Executor {
return common.NewConditionalExecutor(
func(ctx context.Context) bool {
_, err := os.Stat(targetDirectory)
@@ -60,12 +134,16 @@ func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkfl
return notExists
},
func(ctx context.Context) error {
remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
// Do not change the remoteReusableWorkflow.URL, because:
// 1. Gitea doesn't support specifying GithubContext.ServerURL by the GITHUB_SERVER_URL env
// 2. Gitea has already full URL with rc.Config.GitHubInstance when calling newRemoteReusableWorkflowWithPlat
// remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
return git.NewGitCloneExecutor(git.NewGitCloneExecutorInput{
URL: remoteReusableWorkflow.CloneURL(),
Ref: remoteReusableWorkflow.Ref,
Dir: targetDirectory,
Token: rc.Config.Token,
URL: remoteReusableWorkflow.CloneURL(),
Ref: remoteReusableWorkflow.Ref,
Dir: targetDirectory,
Token: token,
OfflineMode: rc.Config.ActionOfflineMode,
})(ctx)
},
nil,
@@ -111,12 +189,44 @@ type remoteReusableWorkflow struct {
Repo string
Filename string
Ref string
GitPlatform string
}
func (r *remoteReusableWorkflow) CloneURL() string {
return fmt.Sprintf("%s/%s/%s", r.URL, r.Org, r.Repo)
// In Gitea, r.URL always has the protocol prefix, we don't need to add extra prefix in this case.
if strings.HasPrefix(r.URL, "http://") || strings.HasPrefix(r.URL, "https://") {
return fmt.Sprintf("%s/%s/%s", r.URL, r.Org, r.Repo)
}
return fmt.Sprintf("https://%s/%s/%s", r.URL, r.Org, r.Repo)
}
func (r *remoteReusableWorkflow) FilePath() string {
return fmt.Sprintf("./.%s/workflows/%s", r.GitPlatform, r.Filename)
}
// For Gitea
// newRemoteReusableWorkflowWithPlat create a `remoteReusableWorkflow`
// workflows from `.gitea/workflows` and `.github/workflows` are supported
func newRemoteReusableWorkflowWithPlat(url, uses string) *remoteReusableWorkflow {
// GitHub docs:
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses
r := regexp.MustCompile(`^([^/]+)/([^/]+)/\.([^/]+)/workflows/([^@]+)@(.*)$`)
matches := r.FindStringSubmatch(uses)
if len(matches) != 6 {
return nil
}
return &remoteReusableWorkflow{
Org: matches[1],
Repo: matches[2],
GitPlatform: matches[3],
Filename: matches[4],
Ref: matches[5],
URL: url,
}
}
// deprecated: use newRemoteReusableWorkflowWithPlat
func newRemoteReusableWorkflow(uses string) *remoteReusableWorkflow {
// GitHub docs:
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses

View File

@@ -81,11 +81,16 @@ func (rc *RunContext) GetEnv() map[string]string {
}
}
rc.Env["ACT"] = "true"
if !rc.Config.NoSkipCheckout {
rc.Env["ACT_SKIP_CHECKOUT"] = "true"
}
return rc.Env
}
func (rc *RunContext) jobContainerName() string {
return createContainerName("act", rc.String())
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
}
// networkName return the name of the network which will be created by `act` automatically for job,
@@ -135,7 +140,7 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
ext := container.LinuxContainerEnvironmentExtensions{}
mounts := map[string]string{
"act-toolcache": "/toolcache",
"act-toolcache": "/opt/hostedtoolcache",
name + "-env": ext.GetActPath(),
}
@@ -167,6 +172,14 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
mounts[name] = ext.ToContainerPath(rc.Config.Workdir)
}
// For Gitea
// add some default binds and mounts to ValidVolumes
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, "act-toolcache")
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name)
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name+"-env")
// TODO: add a new configuration to control whether the docker daemon can be mounted
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket))
return binds, mounts
}
@@ -261,6 +274,9 @@ func (rc *RunContext) startJobContainer() common.Executor {
logger.Infof("\U0001f680 Start image=%s", image)
name := rc.jobContainerName()
// For gitea, to support --volumes-from <container_name_or_id> in options.
// We need to set the container name to the environment variable.
rc.Env["JOB_CONTAINER_NAME"] = name
envList := make([]string, 0)
@@ -274,9 +290,13 @@ func (rc *RunContext) startJobContainer() common.Executor {
binds, mounts := rc.GetBindsAndMounts()
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
// if using service containers, will create a new network for the containers.
// and it will be removed after at last.
networkName, createAndDeleteNetwork := rc.networkName()
networkName := string(rc.Config.ContainerNetworkMode)
var createAndDeleteNetwork bool
if networkName == "" {
// if networkName is empty string, will create a new network for the containers.
// and it will be removed after at last.
networkName, createAndDeleteNetwork = rc.networkName()
}
// add service containers
for serviceID, spec := range rc.Run.Job().Services {
@@ -289,6 +309,11 @@ func (rc *RunContext) startJobContainer() common.Executor {
for k, v := range interpolatedEnvs {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
interpolatedCmd := make([]string, 0, len(spec.Cmd))
for _, v := range spec.Cmd {
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
}
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
if err != nil {
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
@@ -316,6 +341,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
Username: username,
Password: password,
Cmd: interpolatedCmd,
Env: envs,
Mounts: serviceMounts,
Binds: serviceBinds,
@@ -324,11 +350,13 @@ func (rc *RunContext) startJobContainer() common.Executor {
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
AutoRemove: rc.Config.AutoRemove,
Options: rc.ExprEval.Interpolate(ctx, spec.Options),
NetworkMode: networkName,
NetworkAliases: []string{serviceID},
ExposedPorts: exposedPorts,
PortBindings: portBindings,
ValidVolumes: rc.Config.ValidVolumes,
})
rc.ServiceContainers = append(rc.ServiceContainers, c)
}
@@ -391,6 +419,8 @@ func (rc *RunContext) startJobContainer() common.Executor {
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Options: rc.options(ctx),
AutoRemove: rc.Config.AutoRemove,
ValidVolumes: rc.Config.ValidVolumes,
})
if rc.JobContainer == nil {
return errors.New("Failed to create job container")
@@ -400,7 +430,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
rc.pullServicesImages(rc.Config.ForcePull),
rc.JobContainer.Pull(rc.Config.ForcePull),
rc.stopJobContainer(),
container.NewDockerNetworkCreateExecutor(networkName).IfBool(createAndDeleteNetwork),
container.NewDockerNetworkCreateExecutor(networkName).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
rc.startServiceContainers(networkName),
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
rc.JobContainer.Start(false),
@@ -639,6 +669,17 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
}
runsOn := rc.Run.Job().RunsOn()
for i, v := range runsOn {
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
}
if pick := rc.Config.PlatformPicker; pick != nil {
if image := pick(runsOn); image != "" {
return image
}
}
for _, platformName := range rc.runsOnPlatformNames(ctx) {
image := rc.Config.Platforms[strings.ToLower(platformName)]
if image != "" {
@@ -676,7 +717,7 @@ func (rc *RunContext) options(ctx context.Context) string {
job := rc.Run.Job()
c := job.Container()
if c != nil {
return rc.ExprEval.Interpolate(ctx, c.Options)
return rc.Config.ContainerOptions + " " + rc.ExprEval.Interpolate(ctx, c.Options)
}
return rc.Config.ContainerOptions
@@ -725,6 +766,7 @@ func mergeMaps(maps ...map[string]string) map[string]string {
return rtnMap
}
// deprecated: use createSimpleContainerName
func createContainerName(parts ...string) string {
name := strings.Join(parts, "-")
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
@@ -738,6 +780,22 @@ func createContainerName(parts ...string) string {
return fmt.Sprintf("%s-%x", trimmedName, hash)
}
func createSimpleContainerName(parts ...string) string {
pattern := regexp.MustCompile("[^a-zA-Z0-9-]")
name := make([]string, 0, len(parts))
for _, v := range parts {
v = pattern.ReplaceAllString(v, "-")
v = strings.Trim(v, "-")
for strings.Contains(v, "--") {
v = strings.ReplaceAll(v, "--", "-")
}
if v != "" {
name = append(name, v)
}
}
return strings.Join(name, "_")
}
func trimToLen(s string, l int) string {
if l < 0 {
l = 0
@@ -820,6 +878,36 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
ghc.Actor = "nektos/act"
}
{ // Adapt to Gitea
if preset := rc.Config.PresetGitHubContext; preset != nil {
ghc.Event = preset.Event
ghc.RunID = preset.RunID
ghc.RunNumber = preset.RunNumber
ghc.Actor = preset.Actor
ghc.Repository = preset.Repository
ghc.EventName = preset.EventName
ghc.Sha = preset.Sha
ghc.Ref = preset.Ref
ghc.RefName = preset.RefName
ghc.RefType = preset.RefType
ghc.HeadRef = preset.HeadRef
ghc.BaseRef = preset.BaseRef
ghc.Token = preset.Token
ghc.RepositoryOwner = preset.RepositoryOwner
ghc.RetentionDays = preset.RetentionDays
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
ghc.ServerURL = instance
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
ghc.GraphQLURL = "" // Gitea doesn't support graphql
return ghc
}
}
if rc.EventJSON != "" {
err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
if err != nil {
@@ -849,6 +937,18 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
}
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
ghc.ServerURL = instance
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
ghc.GraphQLURL = "" // Gitea doesn't support graphql
}
// allow to be overridden by user
if rc.Config.Env["GITHUB_SERVER_URL"] != "" {
ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"]
@@ -936,6 +1036,17 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
env["GITHUB_API_URL"] = github.APIURL
env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL
{ // Adapt to Gitea
instance := rc.Config.GitHubInstance
if !strings.HasPrefix(instance, "http://") &&
!strings.HasPrefix(instance, "https://") {
instance = "https://" + instance
}
env["GITHUB_SERVER_URL"] = instance
env["GITHUB_API_URL"] = instance + "/api/v1" // the version of Gitea is v1
env["GITHUB_GRAPHQL_URL"] = "" // Gitea doesn't support graphql
}
if rc.Config.ArtifactServerPath != "" {
setActionRuntimeVars(rc, env)
}

View File

@@ -682,3 +682,24 @@ func TestRunContextGetEnv(t *testing.T) {
})
}
}
func Test_createSimpleContainerName(t *testing.T) {
tests := []struct {
parts []string
want string
}{
{
parts: []string{"a--a", "BB正", "c-C"},
want: "a-a_BB_c-C",
},
{
parts: []string{"a-a", "", "-"},
want: "a-a",
},
}
for _, tt := range tests {
t.Run(strings.Join(tt.parts, " "), func(t *testing.T) {
assert.Equalf(t, tt.want, createSimpleContainerName(tt.parts...), "createSimpleContainerName(%v)", tt.parts)
})
}
}

View File

@@ -6,11 +6,13 @@ import (
"fmt"
"os"
"runtime"
"time"
docker_container "github.com/docker/docker/api/types/container"
log "github.com/sirupsen/logrus"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
)
// Runner provides capabilities to run GitHub actions
@@ -23,6 +25,7 @@ type Config struct {
Actor string // the user that triggered the event
Workdir string // path to working directory
ActionCacheDir string // path used for caching action contents
ActionOfflineMode bool // when offline, use caching action contents
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
@@ -59,6 +62,25 @@ type Config struct {
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Matrix map[string]map[string]bool // Matrix config to run
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
ActionCache ActionCache // Use a custom ActionCache Implementation
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
ContainerNamePrefix string // the prefix of container name
ContainerMaxLifetime time.Duration // the max lifetime of job containers
DefaultActionInstance string // the default actions web site
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
JobLoggerLevel *log.Level // the level of job logger
ValidVolumes []string // only volumes (and bind mounts) in this slice can be mounted on the job container or service containers
}
// GetToken: Adapt to Gitea
func (c Config) GetToken() string {
token := c.Secrets["GITHUB_TOKEN"]
if c.Secrets["GITEA_TOKEN"] != "" {
token = c.Secrets["GITEA_TOKEN"]
}
return token
}
type caller struct {
@@ -82,7 +104,9 @@ func New(runnerConfig *Config) (Runner, error) {
func (runner *runnerImpl) configure() (Runner, error) {
runner.eventJSON = "{}"
if runner.config.EventPath != "" {
if runner.config.EventJSON != "" {
runner.eventJSON = runner.config.EventJSON
} else if runner.config.EventPath != "" {
log.Debugf("Reading event.json from %s", runner.config.EventPath)
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
if err != nil {

View File

@@ -554,6 +554,43 @@ func TestRunEventSecrets(t *testing.T) {
tjfi.runTest(context.Background(), t, &Config{Secrets: secrets, Env: env})
}
func TestRunWithService(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
log.SetLevel(log.DebugLevel)
ctx := context.Background()
platforms := map[string]string{
"ubuntu-latest": "node:12.20.1-buster-slim",
}
workflowPath := "services"
eventName := "push"
workdir, err := filepath.Abs("testdata")
assert.NoError(t, err, workflowPath)
runnerConfig := &Config{
Workdir: workdir,
EventName: eventName,
Platforms: platforms,
ReuseContainers: false,
}
runner, err := New(runnerConfig)
assert.NoError(t, err, workflowPath)
planner, err := model.NewWorkflowPlanner(fmt.Sprintf("testdata/%s", workflowPath), true)
assert.NoError(t, err, workflowPath)
plan, err := planner.PlanEvent(eventName)
assert.NoError(t, err, workflowPath)
err = runner.NewPlanExecutor(plan)(ctx)
assert.NoError(t, err, workflowPath)
}
func TestRunActionInputs(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")

View File

@@ -34,6 +34,9 @@ const (
stepStagePost
)
// Controls how many symlinks are resolved for local and remote Actions
const maxSymlinkDepth = 10
func (s stepStage) String() string {
switch s {
case stepStagePre:
@@ -307,3 +310,13 @@ func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]st
}
}
}
func symlinkJoin(filename, sym, parent string) (string, error) {
dir := path.Dir(filename)
dest := path.Join(dir, sym)
prefix := path.Clean(parent) + "/"
if strings.HasPrefix(dest, prefix) || prefix == "./" {
return dest, nil
}
return "", fmt.Errorf("symlink tries to access file '%s' outside of '%s'", strings.ReplaceAll(dest, "'", "''"), strings.ReplaceAll(parent, "'", "''"))
}

View File

@@ -3,7 +3,10 @@ package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
@@ -42,15 +45,31 @@ func (sal *stepActionLocal) main() common.Executor {
localReader := func(ctx context.Context) actionYamlReader {
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
return func(filename string) (io.Reader, io.Closer, error) {
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, path.Join(cpath, filename))
if err != nil {
return nil, nil, os.ErrNotExist
spath := path.Join(cpath, filename)
for i := 0; i < maxSymlinkDepth; i++ {
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath)
if errors.Is(err, fs.ErrNotExist) {
return nil, nil, err
} else if err != nil {
return nil, nil, fs.ErrNotExist
}
treader := tar.NewReader(tars)
header, err := treader.Next()
if errors.Is(err, io.EOF) {
return nil, nil, os.ErrNotExist
} else if err != nil {
return nil, nil, err
}
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
spath, err = symlinkJoin(spath, header.Linkname, cpath)
if err != nil {
return nil, nil, err
}
} else {
return treader, tars, nil
}
}
treader := tar.NewReader(tars)
if _, err := treader.Next(); err != nil {
return nil, nil, os.ErrNotExist
}
return treader, tars, nil
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
}
}

View File

@@ -1,6 +1,7 @@
package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
@@ -28,11 +29,11 @@ type stepActionRemote struct {
action *model.Action
env map[string]string
remoteAction *remoteAction
cacheDir string
resolvedSha string
}
var (
stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
)
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
return func(ctx context.Context) error {
@@ -41,14 +42,18 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
return nil
}
// For gitea:
// Since actions can specify the download source via a url prefix.
// The prefix may contain some sensitive information that needs to be stored in secrets,
// so we need to interpolate the expression value for uses first.
sar.Step.Uses = sar.RunContext.NewExpressionEvaluator(ctx).Interpolate(ctx, sar.Step.Uses)
sar.remoteAction = newRemoteAction(sar.Step.Uses)
if sar.remoteAction == nil {
return fmt.Errorf("Expected format {org}/{repo}[/path]@ref. Actual '%s' Input string was not in a correct format", sar.Step.Uses)
}
github := sar.getGithubContext(ctx)
sar.remoteAction.URL = github.ServerURL
if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout {
common.Logger(ctx).Debugf("Skipping local actions/checkout because workdir was already copied")
return nil
@@ -60,13 +65,62 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
}
}
if sar.RunContext.Config.ActionCache != nil {
cache := sar.RunContext.Config.ActionCache
var err error
sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo)
repoURL := sar.remoteAction.URL + "/" + sar.cacheDir
repoRef := sar.remoteAction.Ref
sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, repoURL, repoRef, github.Token)
if err != nil {
return fmt.Errorf("failed to fetch \"%s\" version \"%s\": %w", repoURL, repoRef, err)
}
remoteReader := func(ctx context.Context) actionYamlReader {
return func(filename string) (io.Reader, io.Closer, error) {
spath := path.Join(sar.remoteAction.Path, filename)
for i := 0; i < maxSymlinkDepth; i++ {
tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath)
if err != nil {
return nil, nil, os.ErrNotExist
}
treader := tar.NewReader(tars)
header, err := treader.Next()
if err != nil {
return nil, nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
spath, err = symlinkJoin(spath, header.Linkname, ".")
if err != nil {
return nil, nil, err
}
} else {
return treader, tars, nil
}
}
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
}
}
actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile)
sar.action = actionModel
return err
}
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
URL: sar.remoteAction.CloneURL(),
URL: sar.remoteAction.CloneURL(sar.RunContext.Config.DefaultActionInstance),
Ref: sar.remoteAction.Ref,
Dir: actionDir,
Token: github.Token,
Token: "", /*
Shouldn't provide token when cloning actions,
the token comes from the instance which triggered the task,
however, it might be not the same instance which provides actions.
For GitHub, they are the same, always github.com.
But for Gitea, tasks triggered by a.com can clone actions from b.com.
*/
OfflineMode: sar.RunContext.Config.ActionOfflineMode,
})
var ntErr common.Executor
if err := gitClone(ctx); err != nil {
@@ -212,8 +266,16 @@ type remoteAction struct {
Ref string
}
func (ra *remoteAction) CloneURL() string {
return fmt.Sprintf("%s/%s/%s", ra.URL, ra.Org, ra.Repo)
func (ra *remoteAction) CloneURL(u string) string {
if ra.URL == "" {
if !strings.HasPrefix(u, "http://") && !strings.HasPrefix(u, "https://") {
u = "https://" + u
}
} else {
u = ra.URL
}
return fmt.Sprintf("%s/%s/%s", u, ra.Org, ra.Repo)
}
func (ra *remoteAction) IsCheckout() bool {
@@ -224,6 +286,26 @@ func (ra *remoteAction) IsCheckout() bool {
}
func newRemoteAction(action string) *remoteAction {
// support http(s)://host/owner/repo@v3
for _, schema := range []string{"https://", "http://"} {
if strings.HasPrefix(action, schema) {
splits := strings.SplitN(strings.TrimPrefix(action, schema), "/", 2)
if len(splits) != 2 {
return nil
}
ret := parseAction(splits[1])
if ret == nil {
return nil
}
ret.URL = schema + splits[0]
return ret
}
}
return parseAction(action)
}
func parseAction(action string) *remoteAction {
// GitHub's document[^] describes:
// > We strongly recommend that you include the version of
// > the action you are using by specifying a Git ref, SHA, or Docker tag number.
@@ -239,7 +321,7 @@ func newRemoteAction(action string) *remoteAction {
Repo: matches[2],
Path: matches[4],
Ref: matches[6],
URL: "https://github.com",
URL: "",
}
}

View File

@@ -616,6 +616,100 @@ func TestStepActionRemotePost(t *testing.T) {
}
}
func Test_newRemoteAction(t *testing.T) {
tests := []struct {
action string
want *remoteAction
wantCloneURL string
}{
{
action: "actions/heroku@main",
want: &remoteAction{
URL: "",
Org: "actions",
Repo: "heroku",
Path: "",
Ref: "main",
},
wantCloneURL: "https://github.com/actions/heroku",
},
{
action: "actions/aws/ec2@main",
want: &remoteAction{
URL: "",
Org: "actions",
Repo: "aws",
Path: "ec2",
Ref: "main",
},
wantCloneURL: "https://github.com/actions/aws",
},
{
action: "./.github/actions/my-action", // it's valid for GitHub, but act don't support it
want: nil,
},
{
action: "docker://alpine:3.8", // it's valid for GitHub, but act don't support it
want: nil,
},
{
action: "https://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
want: &remoteAction{
URL: "https://gitea.com",
Org: "actions",
Repo: "heroku",
Path: "",
Ref: "main",
},
wantCloneURL: "https://gitea.com/actions/heroku",
},
{
action: "https://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
want: &remoteAction{
URL: "https://gitea.com",
Org: "actions",
Repo: "aws",
Path: "ec2",
Ref: "main",
},
wantCloneURL: "https://gitea.com/actions/aws",
},
{
action: "http://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
want: &remoteAction{
URL: "http://gitea.com",
Org: "actions",
Repo: "heroku",
Path: "",
Ref: "main",
},
wantCloneURL: "http://gitea.com/actions/heroku",
},
{
action: "http://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
want: &remoteAction{
URL: "http://gitea.com",
Org: "actions",
Repo: "aws",
Path: "ec2",
Ref: "main",
},
wantCloneURL: "http://gitea.com/actions/aws",
},
}
for _, tt := range tests {
t.Run(tt.action, func(t *testing.T) {
got := newRemoteAction(tt.action)
assert.Equalf(t, tt.want, got, "newRemoteAction(%v)", tt.action)
cloneURL := ""
if got != nil {
cloneURL = got.CloneURL("github.com")
}
assert.Equalf(t, tt.wantCloneURL, cloneURL, "newRemoteAction(%v).CloneURL()", tt.action)
})
}
}
func Test_safeFilename(t *testing.T) {
tests := []struct {
s string

View File

@@ -114,22 +114,24 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd []
binds, mounts := rc.GetBindsAndMounts()
stepContainer := ContainerNewContainer(&container.NewContainerInput{
Cmd: cmd,
Entrypoint: entrypoint,
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
Image: image,
Username: rc.Config.Secrets["DOCKER_USERNAME"],
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
Name: createContainerName(rc.jobContainerName(), step.ID),
Env: envList,
Mounts: mounts,
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Cmd: cmd,
Entrypoint: entrypoint,
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
Image: image,
Username: rc.Config.Secrets["DOCKER_USERNAME"],
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+step.ID),
Env: envList,
Mounts: mounts,
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
AutoRemove: rc.Config.AutoRemove,
ValidVolumes: rc.Config.ValidVolumes,
})
return stepContainer
}