Compare commits
195 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
229dbaf153 | ||
|
f1df2ca5d6 | ||
|
d77991c95a | ||
|
c81a770bc5 | ||
|
7cbb1a910e | ||
|
a18648ee73 | ||
|
baf3bcf48b | ||
|
518d8c96f3 | ||
|
2ea7891787 | ||
|
16f35f64eb | ||
|
ded31bdbc0 | ||
|
60bcfb5d4f | ||
|
7c6237d93f | ||
|
b6718fdf5d | ||
|
0c1f2edb99 | ||
|
721857e4a0 | ||
|
6b1010ad07 | ||
|
e12252a43a | ||
|
8609522aa4 | ||
|
6a876c4f99 | ||
|
de529139af | ||
|
3715266494 | ||
|
35d6e9f71e | ||
|
d3a56cdb69 | ||
|
d970056601 | ||
|
9884da0122 | ||
|
de0644499a | ||
|
816a7d410a | ||
|
50dcc57e4b | ||
|
9bdddf18e0 | ||
|
6d527bf1a3 | ||
|
ac1ba34518 | ||
|
97749a27b9 | ||
|
d70b225e85 | ||
|
c0130ed030 | ||
|
148a545021 | ||
|
65a925b4ed | ||
|
c5ce502f9f | ||
|
f2bd194c7f | ||
|
4ab812c1b1 | ||
|
5a331d2d99 | ||
|
3ae0a9dfb7 | ||
|
24b04dfa55 | ||
|
5c4a96bcb7 | ||
|
62abf4fe11 | ||
|
cfedc518ca | ||
|
68c72b9a51 | ||
|
ad7a8a3559 | ||
|
220d6f1251 | ||
|
6745999c10 | ||
|
8518d70bdf | ||
|
d3dfde055a | ||
|
5e76853b55 | ||
|
2eb4de02ee | ||
|
75ffa205c4 | ||
|
342ad6a51a | ||
|
568f053723 | ||
|
351ae99bc1 | ||
|
385d71a215 | ||
|
f764ecb283 | ||
|
d65cf869e6 | ||
|
775a128cd4 | ||
|
8f12a6c947 | ||
|
83fb85f702 | ||
|
3daf313205 | ||
|
7c5400d75b | ||
|
929ea6df75 | ||
|
f6a8a0e643 | ||
|
9fab59954c | ||
|
8c28c9fd8f | ||
|
2fa0a5f769 | ||
|
a6c95ef2a7 | ||
|
5a2112a7f8 | ||
|
fad986af5f | ||
|
35cac27f4c | ||
|
636c8a34ae | ||
|
556fd20aed | ||
|
a8298365fe | ||
|
1dda0aec69 | ||
|
49e204166d | ||
|
09de42f067 | ||
|
a36b003f7a | ||
|
6744e68ee2 | ||
|
0671d16694 | ||
|
ac5dd8feb8 | ||
|
24440d9f15 | ||
|
f3c88b5091 | ||
|
881dbdb81b | ||
|
aeee2052de | ||
|
7ee2138418 | ||
|
8dbd151fa7 | ||
|
6601d8d8e8 | ||
|
19abab6375 | ||
|
973dd7f7ef | ||
|
44b510f48c | ||
|
5500c928eb | ||
|
04d12b0206 | ||
|
1252e551b8 | ||
|
c614d8b96c | ||
|
84b6649b8b | ||
|
89cb558558 | ||
|
53095d76f4 | ||
|
4b56aced15 | ||
|
05eaeaa528 | ||
|
4eba04b229 | ||
|
8790c9b8e6 | ||
|
b7a9eb9fbf | ||
|
2f55276cea | ||
|
be4a1477a5 | ||
|
21ea3d0d5f | ||
|
1316307313 | ||
|
b0a5068f6d | ||
|
34ab8150bf | ||
|
8048baf435 | ||
|
7c3c5349ab | ||
|
98ad62fcef | ||
|
c378a7d28b | ||
|
44333c758a | ||
|
36dbbc1dfa | ||
|
f41e9127a8 | ||
|
460c78da07 | ||
|
22dc1e0e7e | ||
|
281a52f0d6 | ||
|
e775fea265 | ||
|
24c16fbf25 | ||
|
ce168f9595 | ||
|
c4b64ec1c1 | ||
|
f91b2aa5db | ||
|
72d03214c5 | ||
|
42b9b73d38 | ||
|
787388daf5 | ||
|
b91e4b0d55 | ||
|
e259dc2835 | ||
|
70ae63bd10 | ||
|
01952d05c8 | ||
|
3748772201 | ||
|
932863bef5 | ||
|
09e9f6a1ac | ||
|
a2856aed75 | ||
|
612e08f113 | ||
|
1b7212f5fd | ||
|
75d19d0ff5 | ||
|
7ada9d3f74 | ||
|
78e1ceb0a8 | ||
|
d690a5fee9 | ||
|
19e6929398 | ||
|
b00babd0d9 | ||
|
82a8c1e80d | ||
|
566b9d843e | ||
|
7ebcc1c816 | ||
|
3e23b4fbb5 | ||
|
63ae215071 | ||
|
efb12b7f12 | ||
|
b3c06dc19d | ||
|
0d0780580c | ||
|
872a69548c | ||
|
33cb8bca64 | ||
|
d8ba8cbf17 | ||
|
003947ea33 | ||
|
0988b47752 | ||
|
d064863f9b | ||
|
93907931df | ||
|
767e6a8696 | ||
|
b2fb9e64ac | ||
|
8b4f210872 | ||
|
3f3b25ae84 | ||
|
3ac756b37a | ||
|
7e8d070811 | ||
|
a53a1c2000 | ||
|
3f4a6dc462 | ||
|
b14398eff3 | ||
|
f0c6fa11be | ||
|
cfc1b91aa1 | ||
|
0d388572a8 | ||
|
9c7085195c | ||
|
e3a722e7c6 | ||
|
d3ce2b0a46 | ||
|
4989f444f1 | ||
|
c044035d9e | ||
|
767969502a | ||
|
469d024c1f | ||
|
6ab71ecebb | ||
|
bef9b5c3c7 | ||
|
0c8c082ac0 | ||
|
b8d7e947cf | ||
|
a8e05cded6 | ||
|
d281230cce | ||
|
7d9951200f | ||
|
0f8861b9e3 | ||
|
8c5748a55c | ||
|
57bf4d27a2 | ||
|
4c2524ab4d | ||
|
d9fe63ec24 | ||
|
7073eac240 | ||
|
1797775be3 |
44
.gitea/workflows/test.yml
Normal file
44
.gitea/workflows/test.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
name: checks
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
GOPROXY: https://goproxy.io,direct
|
||||
GOPATH: /go_path
|
||||
GOCACHE: /go_cache
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: check and test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: cache go path
|
||||
id: cache-go-path
|
||||
uses: https://github.com/actions/cache@v3
|
||||
with:
|
||||
path: /go_path
|
||||
key: go_path-${{ github.repository }}-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
go_path-${{ github.repository }}-
|
||||
go_path-
|
||||
- name: cache go cache
|
||||
id: cache-go-cache
|
||||
uses: https://github.com/actions/cache@v3
|
||||
with:
|
||||
path: /go_cache
|
||||
key: go_cache-${{ github.repository }}-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
go_cache-${{ github.repository }}-
|
||||
go_cache-
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20
|
||||
- uses: actions/checkout@v3
|
||||
- name: vet checks
|
||||
run: go vet -v ./...
|
||||
- name: build
|
||||
run: go build -v ./...
|
||||
- name: test
|
||||
run: go test -v ./pkg/jobparser
|
||||
# TODO test more packages
|
2
.github/actions/choco/Dockerfile
vendored
2
.github/actions/choco/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.16
|
||||
FROM alpine:3.17
|
||||
|
||||
ARG CHOCOVERSION=1.1.0
|
||||
|
||||
|
77
.github/actions/run-tests/action.yml
vendored
Normal file
77
.github/actions/run-tests/action.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: 'run-tests'
|
||||
description: 'Runs go test and upload a step summary'
|
||||
inputs:
|
||||
filter:
|
||||
description: 'The go test pattern for the tests to run'
|
||||
required: false
|
||||
default: ''
|
||||
upload-logs-name:
|
||||
description: 'Choose the name of the log artifact'
|
||||
required: false
|
||||
default: logs-${{ github.job }}-${{ strategy.job-index }}
|
||||
upload-logs:
|
||||
description: 'If true uploads logs of each tests as an artifact'
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: none # No reason to grant access to the GITHUB_TOKEN
|
||||
script: |
|
||||
let myOutput = '';
|
||||
var fs = require('fs');
|
||||
var uploadLogs = process.env.UPLOAD_LOGS === 'true';
|
||||
if(uploadLogs) {
|
||||
await io.mkdirP('logs');
|
||||
}
|
||||
var filename = null;
|
||||
const options = {};
|
||||
options.ignoreReturnCode = true;
|
||||
options.env = Object.assign({}, process.env);
|
||||
delete options.env.ACTIONS_RUNTIME_URL;
|
||||
delete options.env.ACTIONS_RUNTIME_TOKEN;
|
||||
delete options.env.ACTIONS_CACHE_URL;
|
||||
options.listeners = {
|
||||
stdout: (data) => {
|
||||
for(line of data.toString().split('\n')) {
|
||||
if(/^\s*(===\s[^\s]+\s|---\s[^\s]+:\s)/.test(line)) {
|
||||
if(uploadLogs) {
|
||||
var runprefix = "=== RUN ";
|
||||
if(line.startsWith(runprefix)) {
|
||||
filename = "logs/" + line.substring(runprefix.length).replace(/[^A-Za-z0-9]/g, '-') + ".txt";
|
||||
fs.writeFileSync(filename, line + "\n");
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
filename = null;
|
||||
}
|
||||
}
|
||||
myOutput += line + "\n";
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
var args = ['test', '-v', '-cover', '-coverprofile=coverage.txt', '-covermode=atomic', '-timeout', '15m'];
|
||||
var filter = process.env.FILTER;
|
||||
if(filter) {
|
||||
args.push('-run');
|
||||
args.push(filter);
|
||||
}
|
||||
args.push('./...');
|
||||
var exitcode = await exec.exec('go', args, options);
|
||||
if(process.env.GITHUB_STEP_SUMMARY) {
|
||||
core.summary.addCodeBlock(myOutput);
|
||||
await core.summary.write();
|
||||
}
|
||||
process.exit(exitcode);
|
||||
env:
|
||||
FILTER: ${{ inputs.filter }}
|
||||
UPLOAD_LOGS: ${{ inputs.upload-logs }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && inputs.upload-logs == 'true' && !env.ACT
|
||||
with:
|
||||
name: ${{ inputs.upload-logs-name }}
|
||||
path: logs
|
34
.github/workflows/checks.yml
vendored
34
.github/workflows/checks.yml
vendored
@@ -1,6 +1,10 @@
|
||||
name: checks
|
||||
on: [pull_request, workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
env:
|
||||
ACT_OWNER: ${{ github.repository_owner }}
|
||||
ACT_REPOSITORY: ${{ github.repository }}
|
||||
@@ -15,14 +19,14 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
- uses: golangci/golangci-lint-action@v3.3.1
|
||||
- uses: golangci/golangci-lint-action@v3.4.0
|
||||
with:
|
||||
version: v1.47.2
|
||||
- uses: megalinter/megalinter/flavors/go@v6.15.0
|
||||
- uses: megalinter/megalinter/flavors/go@v6.22.2
|
||||
env:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -39,7 +43,7 @@ jobs:
|
||||
fetch-depth: 2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
@@ -50,9 +54,14 @@ jobs:
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- run: go test -v -cover -coverprofile=coverage.txt -covermode=atomic -timeout 15m ./...
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
upload-logs-name: logs-linux
|
||||
- name: Run act from cli
|
||||
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
||||
- name: Upload Codecov report
|
||||
uses: codecov/codecov-action@v3.1.1
|
||||
uses: codecov/codecov-action@v3.1.3
|
||||
with:
|
||||
files: coverage.txt
|
||||
fail_ci_if_error: true # optional (default = false)
|
||||
@@ -69,19 +78,22 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
- run: go test -v -run ^TestRunEventHostEnvironment$ ./...
|
||||
# TODO merge coverage with test-linux
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
filter: '^TestRunEventHostEnvironment$'
|
||||
upload-logs-name: logs-${{ matrix.os }}
|
||||
|
||||
snapshot:
|
||||
name: snapshot
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
@@ -93,7 +105,7 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
version: latest
|
||||
args: release --snapshot --rm-dist
|
||||
|
4
.github/workflows/promote.yml
vendored
4
.github/workflows/promote.yml
vendored
@@ -17,8 +17,8 @@ jobs:
|
||||
fetch-depth: 0
|
||||
ref: master
|
||||
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
- uses: fregante/setup-git-user@v1
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: fregante/setup-git-user@v2
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
|
30
.github/workflows/release.yml
vendored
30
.github/workflows/release.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
@@ -39,3 +39,29 @@ jobs:
|
||||
version: ${{ github.ref }}
|
||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
||||
push: true
|
||||
- name: GitHub CLI extension
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
script: |
|
||||
const mainRef = (await github.rest.git.getRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: 'heads/main',
|
||||
})).data;
|
||||
console.log(mainRef);
|
||||
github.rest.git.createRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: context.ref,
|
||||
sha: mainRef.object.sha,
|
||||
});
|
||||
winget:
|
||||
needs: release
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: nektos.act
|
||||
installers-regex: '_Windows_\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
|
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
name: Stale
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
||||
@@ -19,5 +19,5 @@ jobs:
|
||||
exempt-pr-labels: 'stale-exempt'
|
||||
remove-stale-when-updated: 'True'
|
||||
operations-per-run: 500
|
||||
days-before-stale: 30
|
||||
days-before-stale: 180
|
||||
days-before-close: 14
|
||||
|
@@ -14,7 +14,7 @@ DISABLE_LINTERS:
|
||||
- MARKDOWN_MARKDOWN_LINK_CHECK
|
||||
- REPOSITORY_CHECKOV
|
||||
- REPOSITORY_TRIVY
|
||||
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE)
|
||||
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE|VERSION)
|
||||
MARKDOWN_MARKDOWNLINT_CONFIG_FILE: .markdownlint.yml
|
||||
PARALLEL: false
|
||||
PRINT_ALPACA: false
|
||||
|
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fix"],
|
||||
"go.testTimeout": "300s",
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
|
6
Makefile
6
Makefile
@@ -96,7 +96,11 @@ ifneq ($(shell git status -s),)
|
||||
@echo "Unable to promote a dirty workspace"
|
||||
@exit 1
|
||||
endif
|
||||
echo -n $(NEW_VERSION) > VERSION
|
||||
git add VERSION
|
||||
git commit -m "chore: bump VERSION to $(NEW_VERSION)"
|
||||
git tag -a -m "releasing v$(NEW_VERSION)" v$(NEW_VERSION)
|
||||
git push origin master
|
||||
git push origin v$(NEW_VERSION)
|
||||
|
||||
.PHONY: snapshot
|
||||
@@ -105,3 +109,5 @@ snapshot:
|
||||
--rm-dist \
|
||||
--single-target \
|
||||
--snapshot
|
||||
|
||||
.PHONY: clean all
|
||||
|
124
README.md
124
README.md
@@ -1,14 +1,25 @@
|
||||
## Naming rules:
|
||||
## Forking rules
|
||||
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
|
||||
|
||||
It cannot be used as command line tool anymore, but only as a library.
|
||||
|
||||
It's a soft fork, which means that it will tracking the latest release of nektos/act.
|
||||
|
||||
Branches:
|
||||
|
||||
- `main`: default branch, contains custom changes.
|
||||
- `nektos/master`: mirror for `master` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `main`: default branch, contains custom changes, based on the latest release(not the latest of the master branch) of nektos/act.
|
||||
- `nektos/master`: mirror for the master branch of nektos/act.
|
||||
|
||||
Tags:
|
||||
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- `nektos/vX.Y.Z`: mirror for `vX.Y.Z` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- Examples:
|
||||
- `nektos/v0.2.23` -> `v0.223.*`
|
||||
- `nektos/v0.3.1` -> `v0.301.*`, not ~~`v0.31.*`~~
|
||||
- `nektos/v0.10.1` -> `v0.1001.*`, not ~~`v0.101.*`~~
|
||||
- `nektos/v0.3.100` -> not ~~`v0.3100.*`~~, I don't think it's really going to happen, if it does, we can find a way to handle it.
|
||||
|
||||
---
|
||||
|
||||
@@ -85,6 +96,14 @@ choco install act-cli
|
||||
scoop install act
|
||||
```
|
||||
|
||||
### [Winget](https://learn.microsoft.com/en-us/windows/package-manager/) (Windows)
|
||||
|
||||
[](https://repology.org/project/act-run-github-actions/versions)
|
||||
|
||||
```shell
|
||||
winget install nektos.act
|
||||
```
|
||||
|
||||
### [AUR](https://aur.archlinux.org/packages/act/) (Linux)
|
||||
|
||||
[](https://aur.archlinux.org/packages/act/)
|
||||
@@ -122,6 +141,14 @@ Using the latest [Nix command](https://nixos.wiki/wiki/Nix_command), you can run
|
||||
nix run nixpkgs#act
|
||||
```
|
||||
|
||||
## Installation as GitHub CLI extension
|
||||
|
||||
Act can be installed as a [GitHub CLI](https://cli.github.com/) extension:
|
||||
|
||||
```sh
|
||||
gh extension install https://github.com/nektos/gh-act
|
||||
```
|
||||
|
||||
## Other install options
|
||||
|
||||
### Bash script
|
||||
@@ -129,7 +156,7 @@ nix run nixpkgs#act
|
||||
Run this command in your terminal:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
curl -s https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
```
|
||||
|
||||
### Manual download
|
||||
@@ -162,6 +189,9 @@ act pull_request
|
||||
# Run a specific job:
|
||||
act -j test
|
||||
|
||||
# Collect artifacts to the /tmp/artifacts folder:
|
||||
act --artifact-server-path /tmp/artifacts
|
||||
|
||||
# Run a job in a specific workflow (useful if you have duplicate job names)
|
||||
act -j lint -W .github/workflows/checks.yml
|
||||
|
||||
@@ -177,49 +207,6 @@ act -v
|
||||
When running `act` for the first time, it will ask you to choose image to be used as default.
|
||||
It will save that information to `~/.actrc`, please refer to [Configuration](#configuration) for more information about `.actrc` and to [Runners](#runners) for information about used/available Docker images.
|
||||
|
||||
# Flags
|
||||
|
||||
```none
|
||||
-a, --actor string user that triggered the event (default "nektos/act")
|
||||
--replace-ghe-action-with-github-com If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com=github/super-linter)
|
||||
--replace-ghe-action-token-with-github-com If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token
|
||||
--artifact-server-path string Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.
|
||||
--artifact-server-port string Defines the port where the artifact server listens (will only bind to localhost). (default "34567")
|
||||
-b, --bind bind working directory to container, rather than copy
|
||||
--container-architecture string Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.
|
||||
--container-cap-add stringArray kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)
|
||||
--container-cap-drop stringArray kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)
|
||||
--container-daemon-socket string Path to Docker daemon socket which will be mounted to containers (default "/var/run/docker.sock")
|
||||
--defaultbranch string the name of the main branch
|
||||
--detect-event Use first event type from workflow as event that triggered the workflow
|
||||
-C, --directory string working directory (default ".")
|
||||
-n, --dryrun dryrun mode
|
||||
--env stringArray env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)
|
||||
--env-file string environment file to read and use as env in the containers (default ".env")
|
||||
-e, --eventpath string path to event JSON file
|
||||
--github-instance string GitHub instance to use. Don't use this if you are not using GitHub Enterprise Server. (default "github.com")
|
||||
-g, --graph draw workflows
|
||||
-h, --help help for act
|
||||
--insecure-secrets NOT RECOMMENDED! Doesn't hide secrets while printing logs.
|
||||
-j, --job string run job
|
||||
-l, --list list workflows
|
||||
--no-recurse Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag
|
||||
-P, --platform stringArray custom image to use per platform (e.g. -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04)
|
||||
--privileged use privileged mode
|
||||
-p, --pull pull docker image(s) even if already present
|
||||
-q, --quiet disable logging of output from steps
|
||||
--rebuild rebuild local action docker image(s) even if already present
|
||||
-r, --reuse don't remove container(s) on successfully completed workflow(s) to maintain state between runs
|
||||
--rm automatically remove container(s)/volume(s) after a workflow(s) failure
|
||||
-s, --secret stringArray secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)
|
||||
--secret-file string file with list of secrets to read from (e.g. --secret-file .secrets) (default ".secrets")
|
||||
--use-gitignore Controls whether paths specified in .gitignore should be copied into container (default true)
|
||||
--userns string user namespace to use
|
||||
-v, --verbose verbose output
|
||||
-w, --watch watch the contents of the local repo and run when files change
|
||||
-W, --workflows string path to workflow file(s) (default "./.github/workflows/")
|
||||
```
|
||||
|
||||
## `GITHUB_TOKEN`
|
||||
|
||||
GitHub [automatically provides](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret) a `GITHUB_TOKEN` secret when running workflows inside GitHub.
|
||||
@@ -356,10 +343,41 @@ MY_ENV_VAR=MY_ENV_VAR_VALUE
|
||||
MY_2ND_ENV_VAR="my 2nd env var value"
|
||||
```
|
||||
|
||||
# Skipping jobs
|
||||
|
||||
You cannot use the `env` context in job level if conditions, but you can add a custom event property to the `github` context. You can use this method also on step level if conditions.
|
||||
|
||||
```yml
|
||||
on: push
|
||||
jobs:
|
||||
deploy:
|
||||
if: ${{ !github.event.act }} # skip during local actions testing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: exit 0
|
||||
```
|
||||
|
||||
And use this `event.json` file with act otherwise the Job will run:
|
||||
|
||||
```json
|
||||
{
|
||||
"act": true
|
||||
}
|
||||
```
|
||||
|
||||
Run act like
|
||||
|
||||
```sh
|
||||
act -e event.json
|
||||
```
|
||||
|
||||
_Hint: you can add / append `-e event.json` as a line into `./.actrc`_
|
||||
|
||||
# Skipping steps
|
||||
|
||||
Act adds a special environment variable `ACT` that can be used to skip a step that you
|
||||
don't want to run locally. E.g. a step that posts a Slack message or bumps a version number.
|
||||
**You cannot use this method in job level if conditions, see [Skipping jobs](#skipping-jobs)**
|
||||
|
||||
```yml
|
||||
- name: Some step
|
||||
@@ -391,7 +409,7 @@ act pull_request -e pull-request.json
|
||||
|
||||
Act will properly provide `github.head_ref` and `github.base_ref` to the action as expected.
|
||||
|
||||
## Pass Inputs to Manually Triggered Workflows
|
||||
# Pass Inputs to Manually Triggered Workflows
|
||||
|
||||
Example workflow file
|
||||
|
||||
@@ -417,6 +435,14 @@ jobs:
|
||||
echo "Hello ${{ github.event.inputs.NAME }} and ${{ github.event.inputs.SOME_VALUE }}!"
|
||||
```
|
||||
|
||||
## via input or input-file flag
|
||||
|
||||
- `act --input NAME=somevalue` - use `somevalue` as the value for `NAME` input.
|
||||
- `act --input-file my.input` - load input values from `my.input` file.
|
||||
- input file format is the same as `.env` format
|
||||
|
||||
## via JSON
|
||||
|
||||
Example JSON payload file conveniently named `payload.json`
|
||||
|
||||
```json
|
||||
|
27
cmd/dir.go
Normal file
27
cmd/dir.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
UserHomeDir string
|
||||
CacheHomeDir string
|
||||
)
|
||||
|
||||
func init() {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
UserHomeDir = home
|
||||
|
||||
if v := os.Getenv("XDG_CACHE_HOME"); v != "" {
|
||||
CacheHomeDir = v
|
||||
} else {
|
||||
CacheHomeDir = filepath.Join(UserHomeDir, ".cache")
|
||||
}
|
||||
}
|
14
cmd/input.go
14
cmd/input.go
@@ -17,12 +17,14 @@ type Input struct {
|
||||
bindWorkdir bool
|
||||
secrets []string
|
||||
envs []string
|
||||
inputs []string
|
||||
platforms []string
|
||||
dryrun bool
|
||||
forcePull bool
|
||||
forceRebuild bool
|
||||
noOutput bool
|
||||
envfile string
|
||||
inputfile string
|
||||
secretfile string
|
||||
insecureSecrets bool
|
||||
defaultBranch string
|
||||
@@ -30,6 +32,7 @@ type Input struct {
|
||||
usernsMode string
|
||||
containerArchitecture string
|
||||
containerDaemonSocket string
|
||||
containerOptions string
|
||||
noWorkflowRecurse bool
|
||||
useGitIgnore bool
|
||||
githubInstance string
|
||||
@@ -37,12 +40,18 @@ type Input struct {
|
||||
containerCapDrop []string
|
||||
autoRemove bool
|
||||
artifactServerPath string
|
||||
artifactServerAddr string
|
||||
artifactServerPort string
|
||||
noCacheServer bool
|
||||
cacheServerPath string
|
||||
cacheServerAddr string
|
||||
cacheServerPort uint16
|
||||
jsonLogger bool
|
||||
noSkipCheckout bool
|
||||
remoteName string
|
||||
replaceGheActionWithGithubCom []string
|
||||
replaceGheActionTokenWithGithubCom string
|
||||
matrix []string
|
||||
}
|
||||
|
||||
func (i *Input) resolve(path string) string {
|
||||
@@ -83,3 +92,8 @@ func (i *Input) WorkflowsPath() string {
|
||||
func (i *Input) EventPath() string {
|
||||
return i.resolve(i.eventPath)
|
||||
}
|
||||
|
||||
// Inputfile returns the path to the input file
|
||||
func (i *Input) Inputfile() string {
|
||||
return i.resolve(i.inputfile)
|
||||
}
|
||||
|
140
cmd/notices.go
Normal file
140
cmd/notices.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Notice struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func displayNotices(input *Input) {
|
||||
select {
|
||||
case notices := <-noticesLoaded:
|
||||
if len(notices) > 0 {
|
||||
noticeLogger := log.New()
|
||||
if input.jsonLogger {
|
||||
noticeLogger.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
noticeLogger.SetFormatter(&log.TextFormatter{
|
||||
DisableQuote: true,
|
||||
DisableTimestamp: true,
|
||||
PadLevelText: true,
|
||||
})
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
for _, notice := range notices {
|
||||
level, err := log.ParseLevel(notice.Level)
|
||||
if err != nil {
|
||||
level = log.InfoLevel
|
||||
}
|
||||
noticeLogger.Log(level, notice.Message)
|
||||
}
|
||||
}
|
||||
case <-time.After(time.Second * 1):
|
||||
log.Debugf("Timeout waiting for notices")
|
||||
}
|
||||
}
|
||||
|
||||
var noticesLoaded = make(chan []Notice)
|
||||
|
||||
func loadVersionNotices(version string) {
|
||||
go func() {
|
||||
noticesLoaded <- getVersionNotices(version)
|
||||
}()
|
||||
}
|
||||
|
||||
const NoticeURL = "https://api.nektosact.com/notices"
|
||||
|
||||
func getVersionNotices(version string) []Notice {
|
||||
if os.Getenv("ACT_DISABLE_VERSION_CHECK") == "1" {
|
||||
return nil
|
||||
}
|
||||
|
||||
noticeURL, err := url.Parse(NoticeURL)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil
|
||||
}
|
||||
query := noticeURL.Query()
|
||||
query.Add("os", runtime.GOOS)
|
||||
query.Add("arch", runtime.GOARCH)
|
||||
query.Add("version", version)
|
||||
|
||||
noticeURL.RawQuery = query.Encode()
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", noticeURL.String(), nil)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
etag := loadNoticesEtag()
|
||||
if etag != "" {
|
||||
log.Debugf("Conditional GET for notices etag=%s", etag)
|
||||
req.Header.Set("If-None-Match", etag)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
newEtag := resp.Header.Get("Etag")
|
||||
if newEtag != "" {
|
||||
log.Debugf("Saving notices etag=%s", newEtag)
|
||||
saveNoticesEtag(newEtag)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
notices := []Notice{}
|
||||
if resp.StatusCode == 304 {
|
||||
log.Debug("No new notices")
|
||||
return nil
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(¬ices); err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return notices
|
||||
}
|
||||
|
||||
func loadNoticesEtag() string {
|
||||
p := etagPath()
|
||||
content, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to load etag from %s: %e", p, err)
|
||||
}
|
||||
return strings.TrimSuffix(string(content), "\n")
|
||||
}
|
||||
|
||||
func saveNoticesEtag(etag string) {
|
||||
p := etagPath()
|
||||
err := os.WriteFile(p, []byte(strings.TrimSuffix(etag, "\n")), 0o600)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to save etag to %s: %e", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
func etagPath() string {
|
||||
dir := filepath.Join(CacheHomeDir, "act")
|
||||
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return filepath.Join(dir, ".notices.etag")
|
||||
}
|
341
cmd/root.go
341
cmd/root.go
@@ -12,13 +12,15 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/adrg/xdg"
|
||||
"github.com/andreaskoch/go-fswatch"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
gitignore "github.com/sabhiram/go-gitignore"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/nektos/act/pkg/artifactcache"
|
||||
"github.com/nektos/act/pkg/artifacts"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
@@ -34,7 +36,8 @@ func Execute(ctx context.Context, version string) {
|
||||
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: newRunCommand(ctx, input),
|
||||
PersistentPreRun: setupLogging,
|
||||
PersistentPreRun: setup(input),
|
||||
PersistentPostRun: cleanup(input),
|
||||
Version: version,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
@@ -47,11 +50,12 @@ func Execute(ctx context.Context, version string) {
|
||||
rootCmd.Flags().StringVar(&input.remoteName, "remote-name", "origin", "git remote name that will be used to retrieve url of git repo")
|
||||
rootCmd.Flags().StringArrayVarP(&input.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.inputs, "input", "", []string{}, "action input to make available to actions (e.g. --input myinput=foo)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.platforms, "platform", "P", []string{}, "custom image to use per platform (e.g. -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04)")
|
||||
rootCmd.Flags().BoolVarP(&input.reuseContainers, "reuse", "r", false, "don't remove container(s) on successfully completed workflow(s) to maintain state between runs")
|
||||
rootCmd.Flags().BoolVarP(&input.bindWorkdir, "bind", "b", false, "bind working directory to container, rather than copy")
|
||||
rootCmd.Flags().BoolVarP(&input.forcePull, "pull", "p", false, "pull docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forcePull, "pull", "p", true, "pull docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forceRebuild, "rebuild", "", true, "rebuild local action docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
|
||||
rootCmd.Flags().StringVarP(&input.eventPath, "eventpath", "e", "", "path to event JSON file")
|
||||
rootCmd.Flags().StringVar(&input.defaultBranch, "defaultbranch", "", "the name of the main branch")
|
||||
@@ -63,6 +67,7 @@ func Execute(ctx context.Context, version string) {
|
||||
rootCmd.Flags().BoolVar(&input.autoRemove, "rm", false, "automatically remove container(s)/volume(s) after a workflow(s) failure")
|
||||
rootCmd.Flags().StringArrayVarP(&input.replaceGheActionWithGithubCom, "replace-ghe-action-with-github-com", "", []string{}, "If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com =github/super-linter)")
|
||||
rootCmd.Flags().StringVar(&input.replaceGheActionTokenWithGithubCom, "replace-ghe-action-token-with-github-com", "", "If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token")
|
||||
rootCmd.Flags().StringArrayVarP(&input.matrix, "matrix", "", []string{}, "specify which matrix configuration to include (e.g. --matrix java:13")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.actor, "actor", "a", "nektos/act", "user that triggered the event")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.workflowsPath, "workflows", "W", "./.github/workflows/", "path to workflow file(s)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
|
||||
@@ -74,12 +79,19 @@ func Execute(ctx context.Context, version string) {
|
||||
rootCmd.PersistentFlags().StringVarP(&input.secretfile, "secret-file", "", ".secrets", "file with list of secrets to read from (e.g. --secret-file .secrets)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.inputfile, "input-file", "", ".input", "input file to read and use as action input")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "", "URI to Docker Engine socket (e.g.: unix://~/.docker/run/docker.sock or - to disable bind mounting the socket)")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerOptions, "container-options", "", "", "Custom docker container options for the job container without an options property in the job definition")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.githubInstance, "github-instance", "", "github.com", "GitHub instance to use. Don't use this if you are not using GitHub Enterprise Server.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPath, "artifact-server-path", "", "", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerAddr, "artifact-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the artifact server binds.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noCacheServer, "no-cache-server", "", false, "Disable cache server")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerPath, "cache-server-path", "", filepath.Join(CacheHomeDir, "actcache"), "Defines the path where the cache server stores caches.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
|
||||
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
||||
rootCmd.SetArgs(args())
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
@@ -88,26 +100,51 @@ func Execute(ctx context.Context, version string) {
|
||||
}
|
||||
|
||||
func configLocations() []string {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
configFileName := ".actrc"
|
||||
|
||||
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
var actrcXdg string
|
||||
if xdg, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok && xdg != "" {
|
||||
actrcXdg = filepath.Join(xdg, ".actrc")
|
||||
} else {
|
||||
actrcXdg = filepath.Join(home, ".config", ".actrc")
|
||||
for _, fileName := range []string{"act/actrc", configFileName} {
|
||||
if foundConfig, err := xdg.SearchConfigFile(fileName); foundConfig != "" && err == nil {
|
||||
actrcXdg = foundConfig
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return []string{
|
||||
filepath.Join(home, ".actrc"),
|
||||
filepath.Join(UserHomeDir, configFileName),
|
||||
actrcXdg,
|
||||
filepath.Join(".", ".actrc"),
|
||||
filepath.Join(".", configFileName),
|
||||
}
|
||||
}
|
||||
|
||||
var commonSocketPaths = []string{
|
||||
"/var/run/docker.sock",
|
||||
"/var/run/podman/podman.sock",
|
||||
"$HOME/.colima/docker.sock",
|
||||
"$XDG_RUNTIME_DIR/docker.sock",
|
||||
`\\.\pipe\docker_engine`,
|
||||
"$HOME/.docker/run/docker.sock",
|
||||
}
|
||||
|
||||
// returns socket path or false if not found any
|
||||
func socketLocation() (string, bool) {
|
||||
if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists {
|
||||
return dockerHost, true
|
||||
}
|
||||
|
||||
for _, p := range commonSocketPaths {
|
||||
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
|
||||
if strings.HasPrefix(p, `\\.\`) {
|
||||
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true
|
||||
}
|
||||
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func args() []string {
|
||||
actrc := configLocations()
|
||||
|
||||
@@ -121,15 +158,6 @@ func args() []string {
|
||||
}
|
||||
|
||||
func bugReport(ctx context.Context, version string) error {
|
||||
var commonSocketPaths = []string{
|
||||
"/var/run/docker.sock",
|
||||
"/var/run/podman/podman.sock",
|
||||
"$HOME/.colima/docker.sock",
|
||||
"$XDG_RUNTIME_DIR/docker.sock",
|
||||
`\\.\pipe\docker_engine`,
|
||||
"$HOME/.docker/run/docker.sock",
|
||||
}
|
||||
|
||||
sprintf := func(key, val string) string {
|
||||
return fmt.Sprintf("%-24s%s\n", key, val)
|
||||
}
|
||||
@@ -140,19 +168,20 @@ func bugReport(ctx context.Context, version string) error {
|
||||
report += sprintf("NumCPU:", fmt.Sprint(runtime.NumCPU()))
|
||||
|
||||
var dockerHost string
|
||||
if dockerHost = os.Getenv("DOCKER_HOST"); dockerHost == "" {
|
||||
dockerHost = "DOCKER_HOST environment variable is unset/empty."
|
||||
var exists bool
|
||||
if dockerHost, exists = os.LookupEnv("DOCKER_HOST"); !exists {
|
||||
dockerHost = "DOCKER_HOST environment variable is not set"
|
||||
} else if dockerHost == "" {
|
||||
dockerHost = "DOCKER_HOST environment variable is empty."
|
||||
}
|
||||
|
||||
report += sprintf("Docker host:", dockerHost)
|
||||
report += fmt.Sprintln("Sockets found:")
|
||||
for _, p := range commonSocketPaths {
|
||||
if strings.HasPrefix(p, `$`) {
|
||||
v := strings.Split(p, `/`)[0]
|
||||
p = strings.Replace(p, v, os.Getenv(strings.TrimPrefix(v, `$`)), 1)
|
||||
}
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if _, err := os.Lstat(os.ExpandEnv(p)); err != nil {
|
||||
continue
|
||||
} else if _, err := os.Stat(os.ExpandEnv(p)); err != nil {
|
||||
report += fmt.Sprintf("\t%s(broken)\n", p)
|
||||
} else {
|
||||
report += fmt.Sprintf("\t%s\n", p)
|
||||
}
|
||||
@@ -241,16 +270,57 @@ func readArgsFile(file string, split bool) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
func setupLogging(cmd *cobra.Command, _ []string) {
|
||||
func setup(inputs *Input) func(*cobra.Command, []string) {
|
||||
return func(cmd *cobra.Command, _ []string) {
|
||||
verbose, _ := cmd.Flags().GetBool("verbose")
|
||||
if verbose {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}
|
||||
loadVersionNotices(cmd.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanup(inputs *Input) func(*cobra.Command, []string) {
|
||||
return func(cmd *cobra.Command, _ []string) {
|
||||
displayNotices(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
func parseEnvs(env []string, envs map[string]string) bool {
|
||||
if env != nil {
|
||||
for _, envVar := range env {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readYamlFile(file string) (map[string]string, error) {
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := map[string]string{}
|
||||
if err = yaml.Unmarshal(content, &ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func readEnvs(path string, envs map[string]string) bool {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
env, err := godotenv.Read(path)
|
||||
var env map[string]string
|
||||
if ext := filepath.Ext(path); ext == ".yml" || ext == ".yaml" {
|
||||
env, err = readYamlFile(path)
|
||||
} else {
|
||||
env, err = godotenv.Read(path)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading from %s: %v", path, err)
|
||||
}
|
||||
@@ -262,6 +332,36 @@ func readEnvs(path string, envs map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func parseMatrix(matrix []string) map[string]map[string]bool {
|
||||
// each matrix entry should be of the form - string:string
|
||||
r := regexp.MustCompile(":")
|
||||
matrixes := make(map[string]map[string]bool)
|
||||
for _, m := range matrix {
|
||||
matrix := r.Split(m, 2)
|
||||
if len(matrix) < 2 {
|
||||
log.Fatalf("Invalid matrix format. Failed to parse %s", m)
|
||||
} else {
|
||||
if _, ok := matrixes[matrix[0]]; !ok {
|
||||
matrixes[matrix[0]] = make(map[string]bool)
|
||||
}
|
||||
matrixes[matrix[0]][matrix[1]] = true
|
||||
}
|
||||
}
|
||||
return matrixes
|
||||
}
|
||||
|
||||
func isDockerHostURI(daemonPath string) bool {
|
||||
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
|
||||
scheme := daemonPath[:protoIndex]
|
||||
if strings.IndexFunc(scheme, func(r rune) bool {
|
||||
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
|
||||
}) == -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
@@ -273,33 +373,54 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
return bugReport(ctx, cmd.Version)
|
||||
}
|
||||
|
||||
// Prefer DOCKER_HOST, don't override it
|
||||
socketPath, hasDockerHost := os.LookupEnv("DOCKER_HOST")
|
||||
if !hasDockerHost {
|
||||
// a - in containerDaemonSocket means don't mount, preserve this value
|
||||
// otherwise if input.containerDaemonSocket is a filepath don't use it as socketPath
|
||||
skipMount := input.containerDaemonSocket == "-" || !isDockerHostURI(input.containerDaemonSocket)
|
||||
if input.containerDaemonSocket != "" && !skipMount {
|
||||
socketPath = input.containerDaemonSocket
|
||||
} else {
|
||||
socket, found := socketLocation()
|
||||
if !found {
|
||||
log.Errorln("daemon Docker Engine socket not found and containerDaemonSocket option was not set")
|
||||
} else {
|
||||
socketPath = socket
|
||||
}
|
||||
if !skipMount {
|
||||
input.containerDaemonSocket = socketPath
|
||||
}
|
||||
}
|
||||
os.Setenv("DOCKER_HOST", socketPath)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" && input.containerArchitecture == "" {
|
||||
l := log.New()
|
||||
l.SetFormatter(&log.TextFormatter{
|
||||
DisableQuote: true,
|
||||
DisableTimestamp: true,
|
||||
})
|
||||
l.Warnf(" \U000026A0 You are using Apple M1 chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
|
||||
l.Warnf(" \U000026A0 You are using Apple M-series chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
|
||||
}
|
||||
|
||||
log.Debugf("Loading environment from %s", input.Envfile())
|
||||
envs := make(map[string]string)
|
||||
if input.envs != nil {
|
||||
for _, envVar := range input.envs {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = parseEnvs(input.envs, envs)
|
||||
_ = readEnvs(input.Envfile(), envs)
|
||||
|
||||
log.Debugf("Loading action inputs from %s", input.Inputfile())
|
||||
inputs := make(map[string]string)
|
||||
_ = parseEnvs(input.inputs, inputs)
|
||||
_ = readEnvs(input.Inputfile(), inputs)
|
||||
|
||||
log.Debugf("Loading secrets from %s", input.Secretfile())
|
||||
secrets := newSecrets(input.secrets)
|
||||
_ = readEnvs(input.Secretfile(), secrets)
|
||||
|
||||
matrixes := parseMatrix(input.matrix)
|
||||
log.Debugf("Evaluated matrix inclusions: %v", matrixes)
|
||||
|
||||
planner, err := model.NewWorkflowPlanner(input.WorkflowsPath(), input.noWorkflowRecurse)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -329,7 +450,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
var filterPlan *model.Plan
|
||||
|
||||
// Determine the event name to be filtered
|
||||
var filterEventName string = ""
|
||||
var filterEventName string
|
||||
|
||||
if len(args) > 0 {
|
||||
log.Debugf("Using first passed in arguments event for filtering: %s", args[0])
|
||||
@@ -341,23 +462,35 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
filterEventName = events[0]
|
||||
}
|
||||
|
||||
var plannerErr error
|
||||
if jobID != "" {
|
||||
log.Debugf("Preparing plan with a job: %s", jobID)
|
||||
filterPlan = planner.PlanJob(jobID)
|
||||
filterPlan, plannerErr = planner.PlanJob(jobID)
|
||||
} else if filterEventName != "" {
|
||||
log.Debugf("Preparing plan for a event: %s", filterEventName)
|
||||
filterPlan = planner.PlanEvent(filterEventName)
|
||||
filterPlan, plannerErr = planner.PlanEvent(filterEventName)
|
||||
} else {
|
||||
log.Debugf("Preparing plan with all jobs")
|
||||
filterPlan = planner.PlanAll()
|
||||
filterPlan, plannerErr = planner.PlanAll()
|
||||
}
|
||||
if filterPlan == nil && plannerErr != nil {
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
if list {
|
||||
return printList(filterPlan)
|
||||
err = printList(filterPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
if graph {
|
||||
return drawGraph(filterPlan)
|
||||
err = drawGraph(filterPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
// plan with triggered jobs
|
||||
@@ -385,10 +518,13 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
// build the plan for this run
|
||||
if jobID != "" {
|
||||
log.Debugf("Planning job: %s", jobID)
|
||||
plan = planner.PlanJob(jobID)
|
||||
plan, plannerErr = planner.PlanJob(jobID)
|
||||
} else {
|
||||
log.Debugf("Planning jobs for event: %s", eventName)
|
||||
plan = planner.PlanEvent(eventName)
|
||||
plan, plannerErr = planner.PlanEvent(eventName)
|
||||
}
|
||||
if plan == nil && plannerErr != nil {
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
// check to see if the main branch was defined
|
||||
@@ -414,6 +550,19 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
input.platforms = readArgsFile(cfgLocations[0], true)
|
||||
}
|
||||
}
|
||||
deprecationWarning := "--%s is deprecated and will be removed soon, please switch to cli: `--container-options \"%[2]s\"` or `.actrc`: `--container-options %[2]s`."
|
||||
if input.privileged {
|
||||
log.Warnf(deprecationWarning, "privileged", "--privileged")
|
||||
}
|
||||
if len(input.usernsMode) > 0 {
|
||||
log.Warnf(deprecationWarning, "userns", fmt.Sprintf("--userns=%s", input.usernsMode))
|
||||
}
|
||||
if len(input.containerCapAdd) > 0 {
|
||||
log.Warnf(deprecationWarning, "container-cap-add", fmt.Sprintf("--cap-add=%s", input.containerCapAdd))
|
||||
}
|
||||
if len(input.containerCapDrop) > 0 {
|
||||
log.Warnf(deprecationWarning, "container-cap-drop", fmt.Sprintf("--cap-drop=%s", input.containerCapDrop))
|
||||
}
|
||||
|
||||
// run the plan
|
||||
config := &runner.Config{
|
||||
@@ -430,6 +579,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
JSONLogger: input.jsonLogger,
|
||||
Env: envs,
|
||||
Secrets: secrets,
|
||||
Inputs: inputs,
|
||||
Token: secrets["GITHUB_TOKEN"],
|
||||
InsecureSecrets: input.insecureSecrets,
|
||||
Platforms: input.newPlatforms(),
|
||||
@@ -437,37 +587,60 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
UsernsMode: input.usernsMode,
|
||||
ContainerArchitecture: input.containerArchitecture,
|
||||
ContainerDaemonSocket: input.containerDaemonSocket,
|
||||
ContainerOptions: input.containerOptions,
|
||||
UseGitIgnore: input.useGitIgnore,
|
||||
GitHubInstance: input.githubInstance,
|
||||
ContainerCapAdd: input.containerCapAdd,
|
||||
ContainerCapDrop: input.containerCapDrop,
|
||||
AutoRemove: input.autoRemove,
|
||||
ArtifactServerPath: input.artifactServerPath,
|
||||
ArtifactServerAddr: input.artifactServerAddr,
|
||||
ArtifactServerPort: input.artifactServerPort,
|
||||
NoSkipCheckout: input.noSkipCheckout,
|
||||
RemoteName: input.remoteName,
|
||||
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
|
||||
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
|
||||
Matrix: matrixes,
|
||||
}
|
||||
r, err := runner.New(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerPort)
|
||||
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerAddr, input.artifactServerPort)
|
||||
|
||||
const cacheURLKey = "ACTIONS_CACHE_URL"
|
||||
var cacheHandler *artifactcache.Handler
|
||||
if !input.noCacheServer && envs[cacheURLKey] == "" {
|
||||
var err error
|
||||
cacheHandler, err = artifactcache.StartHandler(input.cacheServerPath, input.cacheServerAddr, input.cacheServerPort, common.Logger(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envs[cacheURLKey] = cacheHandler.ExternalURL() + "/"
|
||||
}
|
||||
|
||||
ctx = common.WithDryrun(ctx, input.dryrun)
|
||||
if watch, err := cmd.Flags().GetBool("watch"); err != nil {
|
||||
return err
|
||||
} else if watch {
|
||||
return watchAndRun(ctx, r.NewPlanExecutor(plan))
|
||||
err = watchAndRun(ctx, r.NewPlanExecutor(plan))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
|
||||
cancel()
|
||||
_ = cacheHandler.Close()
|
||||
return nil
|
||||
})
|
||||
return executor(ctx)
|
||||
err = executor(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -492,7 +665,7 @@ func defaultImageSurvey(actrc string) error {
|
||||
case "Medium":
|
||||
option = "-P ubuntu-latest=catthehacker/ubuntu:act-latest\n-P ubuntu-22.04=catthehacker/ubuntu:act-22.04\n-P ubuntu-20.04=catthehacker/ubuntu:act-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:act-18.04\n"
|
||||
case "Micro":
|
||||
option = "-P ubuntu-latest=node:16-buster-slim\n-P -P ubuntu-22.04=node:16-bullseye-slim\n ubuntu-20.04=node:16-buster-slim\n-P ubuntu-18.04=node:16-buster-slim\n"
|
||||
option = "-P ubuntu-latest=node:16-buster-slim\n-P ubuntu-22.04=node:16-bullseye-slim\n-P ubuntu-20.04=node:16-buster-slim\n-P ubuntu-18.04=node:16-buster-slim\n"
|
||||
}
|
||||
|
||||
f, err := os.Create(actrc)
|
||||
@@ -515,45 +688,47 @@ func defaultImageSurvey(actrc string) error {
|
||||
}
|
||||
|
||||
func watchAndRun(ctx context.Context, fn common.Executor) error {
|
||||
recurse := true
|
||||
checkIntervalInSeconds := 2
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ignore *gitignore.GitIgnore
|
||||
if _, err := os.Stat(filepath.Join(dir, ".gitignore")); !os.IsNotExist(err) {
|
||||
ignore, _ = gitignore.CompileIgnoreFile(filepath.Join(dir, ".gitignore"))
|
||||
} else {
|
||||
ignore = &gitignore.GitIgnore{}
|
||||
ignoreFile := filepath.Join(dir, ".gitignore")
|
||||
ignore := &gitignore.GitIgnore{}
|
||||
if info, err := os.Stat(ignoreFile); err == nil && !info.IsDir() {
|
||||
ignore, err = gitignore.CompileIgnoreFile(ignoreFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compile %q: %w", ignoreFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
folderWatcher := fswatch.NewFolderWatcher(
|
||||
dir,
|
||||
recurse,
|
||||
true,
|
||||
ignore.MatchesPath,
|
||||
checkIntervalInSeconds,
|
||||
2, // 2 seconds
|
||||
)
|
||||
|
||||
folderWatcher.Start()
|
||||
defer folderWatcher.Stop()
|
||||
|
||||
go func() {
|
||||
for folderWatcher.IsRunning() {
|
||||
if err = fn(ctx); err != nil {
|
||||
break
|
||||
}
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
for changes := range folderWatcher.ChangeDetails() {
|
||||
log.Debugf("%s", changes.String())
|
||||
if err = fn(ctx); err != nil {
|
||||
break
|
||||
}
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
}
|
||||
}
|
||||
}()
|
||||
<-ctx.Done()
|
||||
folderWatcher.Stop()
|
||||
// run once before watching
|
||||
if err := fn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for folderWatcher.IsRunning() {
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case changes := <-folderWatcher.ChangeDetails():
|
||||
log.Debugf("%s", changes.String())
|
||||
if err := fn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
88
go.mod
88
go.mod
@@ -5,81 +5,81 @@ go 1.18
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/adrg/xdg v0.4.0
|
||||
github.com/andreaskoch/go-fswatch v1.0.0
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/docker/cli v20.10.21+incompatible
|
||||
github.com/docker/cli v23.0.4+incompatible
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/docker/docker v23.0.4+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/go-git/go-billy/v5 v5.3.1
|
||||
github.com/go-git/go-git/v5 v5.4.2
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/imdario/mergo v0.3.13
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/go-git/go-billy/v5 v5.4.1
|
||||
github.com/go-git/go-git/v5 v5.6.2-0.20230411180853-ce62f3e9ff86
|
||||
github.com/imdario/mergo v0.3.15
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/buildkit v0.10.6
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||
github.com/opencontainers/selinux v1.10.2
|
||||
github.com/mattn/go-isatty v0.0.18
|
||||
github.com/moby/buildkit v0.11.6
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
github.com/opencontainers/selinux v1.11.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rhysd/actionlint v1.6.22
|
||||
github.com/rhysd/actionlint v1.6.24
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.1
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/term v0.7.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.3 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220404123522-616f957b79ad // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.6.6 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/cloudflare/circl v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.6.20 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emirpasic/gods v1.12.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.15.12 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/moby/sys/mount v0.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.5 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.3.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/robfig/cron v1.2.0 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 // indirect
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/go-git/go-git/v5 => github.com/ZauberNerd/go-git/v5 v5.4.3-0.20220315170230-29ec1bc1e5db
|
||||
|
4
main.go
4
main.go
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
@@ -9,7 +10,8 @@ import (
|
||||
"github.com/nektos/act/cmd"
|
||||
)
|
||||
|
||||
var version = "v0.2.27-dev" // Manually bump after tagging next release
|
||||
//go:embed VERSION
|
||||
var version string
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
8
pkg/artifactcache/doc.go
Normal file
8
pkg/artifactcache/doc.go
Normal file
@@ -0,0 +1,8 @@
|
||||
// Package artifactcache provides a cache handler for the runner.
|
||||
//
|
||||
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
||||
//
|
||||
// TODO: Authorization
|
||||
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
package artifactcache
|
488
pkg/artifactcache/handler.go
Normal file
488
pkg/artifactcache/handler.go
Normal file
@@ -0,0 +1,488 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/timshannon/bolthold"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
const (
|
||||
urlBase = "/_apis/artifactcache"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
db *bolthold.Store
|
||||
storage *Storage
|
||||
router *httprouter.Router
|
||||
listener net.Listener
|
||||
server *http.Server
|
||||
logger logrus.FieldLogger
|
||||
|
||||
gcing int32 // TODO: use atomic.Bool when we can use Go 1.19
|
||||
gcAt time.Time
|
||||
|
||||
outboundIP string
|
||||
}
|
||||
|
||||
func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) {
|
||||
h := &Handler{}
|
||||
|
||||
if logger == nil {
|
||||
discard := logrus.New()
|
||||
discard.Out = io.Discard
|
||||
logger = discard
|
||||
}
|
||||
logger = logger.WithField("module", "artifactcache")
|
||||
h.logger = logger
|
||||
|
||||
if dir == "" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir = filepath.Join(home, ".cache", "actcache")
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := bolthold.Open(filepath.Join(dir, "bolt.db"), 0o644, &bolthold.Options{
|
||||
Encoder: json.Marshal,
|
||||
Decoder: json.Unmarshal,
|
||||
Options: &bbolt.Options{
|
||||
Timeout: 5 * time.Second,
|
||||
NoGrowSync: bbolt.DefaultOptions.NoGrowSync,
|
||||
FreelistType: bbolt.DefaultOptions.FreelistType,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.db = db
|
||||
|
||||
storage, err := NewStorage(filepath.Join(dir, "cache"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.storage = storage
|
||||
|
||||
if outboundIP != "" {
|
||||
h.outboundIP = outboundIP
|
||||
} else if ip := common.GetOutboundIP(); ip == nil {
|
||||
return nil, fmt.Errorf("unable to determine outbound IP address")
|
||||
} else {
|
||||
h.outboundIP = ip.String()
|
||||
}
|
||||
|
||||
router := httprouter.New()
|
||||
router.GET(urlBase+"/cache", h.middleware(h.find))
|
||||
router.POST(urlBase+"/caches", h.middleware(h.reserve))
|
||||
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
|
||||
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
|
||||
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
|
||||
router.POST(urlBase+"/clean", h.middleware(h.clean))
|
||||
|
||||
h.router = router
|
||||
|
||||
h.gcCache()
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server := &http.Server{
|
||||
ReadHeaderTimeout: 2 * time.Second,
|
||||
Handler: router,
|
||||
}
|
||||
go func() {
|
||||
if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) {
|
||||
logger.Errorf("http serve: %v", err)
|
||||
}
|
||||
}()
|
||||
h.listener = listener
|
||||
h.server = server
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *Handler) ExternalURL() string {
|
||||
// TODO: make the external url configurable if necessary
|
||||
return fmt.Sprintf("http://%s:%d",
|
||||
h.outboundIP,
|
||||
h.listener.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
|
||||
func (h *Handler) Close() error {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
var retErr error
|
||||
if h.server != nil {
|
||||
err := h.server.Close()
|
||||
if err != nil {
|
||||
retErr = err
|
||||
}
|
||||
h.server = nil
|
||||
}
|
||||
if h.listener != nil {
|
||||
err := h.listener.Close()
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
retErr = err
|
||||
}
|
||||
h.listener = nil
|
||||
}
|
||||
if h.db != nil {
|
||||
err := h.db.Close()
|
||||
if err != nil {
|
||||
retErr = err
|
||||
}
|
||||
h.db = nil
|
||||
}
|
||||
return retErr
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/cache
|
||||
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
||||
// cache keys are case insensitive
|
||||
for i, key := range keys {
|
||||
keys[i] = strings.ToLower(key)
|
||||
}
|
||||
version := r.URL.Query().Get("version")
|
||||
|
||||
cache, err := h.findCache(keys, version)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
if cache == nil {
|
||||
h.responseJSON(w, r, 204)
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := h.storage.Exist(cache.ID); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
} else if !ok {
|
||||
_ = h.db.Delete(cache.ID, cache)
|
||||
h.responseJSON(w, r, 204)
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 200, map[string]any{
|
||||
"result": "hit",
|
||||
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
|
||||
"cacheKey": cache.Key,
|
||||
})
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches
|
||||
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
api := &Request{}
|
||||
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
// cache keys are case insensitive
|
||||
api.Key = strings.ToLower(api.Key)
|
||||
|
||||
cache := api.ToCache()
|
||||
cache.FillKeyVersionHash()
|
||||
if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("already exist"))
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
cache.CreatedAt = now
|
||||
cache.UsedAt = now
|
||||
if err := h.db.Insert(bolthold.NextSequence(), cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
// write back id to db
|
||||
if err := h.db.Update(cache.ID, cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 200, map[string]any{
|
||||
"cacheId": cache.ID,
|
||||
})
|
||||
}
|
||||
|
||||
// PATCH /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{}
|
||||
if err := h.db.Get(id, cache); err != nil {
|
||||
if errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
}
|
||||
h.useCache(id)
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{}
|
||||
if err := h.db.Get(id, cache); err != nil {
|
||||
if errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache.Complete = true
|
||||
if err := h.db.Update(cache.ID, cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/artifacts/:id
|
||||
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
h.useCache(id)
|
||||
h.storage.Serve(w, r, uint64(id))
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/clean
|
||||
func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
// TODO: don't support force deleting cache entries
|
||||
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
h.logger.Debugf("%s %s", r.Method, r.RequestURI)
|
||||
handler(w, r, params)
|
||||
go h.gcCache()
|
||||
}
|
||||
}
|
||||
|
||||
// if not found, return (nil, nil) instead of an error.
|
||||
func (h *Handler) findCache(keys []string, version string) (*Cache, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
key := keys[0] // the first key is for exact match.
|
||||
|
||||
cache := &Cache{
|
||||
Key: key,
|
||||
Version: version,
|
||||
}
|
||||
cache.FillKeyVersionHash()
|
||||
|
||||
if err := h.db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
} else if cache.Complete {
|
||||
return cache, nil
|
||||
}
|
||||
stop := fmt.Errorf("stop")
|
||||
|
||||
for _, prefix := range keys[1:] {
|
||||
found := false
|
||||
if err := h.db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
|
||||
if !strings.HasPrefix(v.Key, prefix) {
|
||||
return stop
|
||||
}
|
||||
if v.Complete {
|
||||
cache = v
|
||||
found = true
|
||||
return stop
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
if !errors.Is(err, stop) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return cache, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (h *Handler) useCache(id int64) {
|
||||
cache := &Cache{}
|
||||
if err := h.db.Get(id, cache); err != nil {
|
||||
return
|
||||
}
|
||||
cache.UsedAt = time.Now().Unix()
|
||||
_ = h.db.Update(cache.ID, cache)
|
||||
}
|
||||
|
||||
func (h *Handler) gcCache() {
|
||||
if atomic.LoadInt32(&h.gcing) != 0 {
|
||||
return
|
||||
}
|
||||
if !atomic.CompareAndSwapInt32(&h.gcing, 0, 1) {
|
||||
return
|
||||
}
|
||||
defer atomic.StoreInt32(&h.gcing, 0)
|
||||
|
||||
if time.Since(h.gcAt) < time.Hour {
|
||||
h.logger.Debugf("skip gc: %v", h.gcAt.String())
|
||||
return
|
||||
}
|
||||
h.gcAt = time.Now()
|
||||
h.logger.Debugf("gc: %v", h.gcAt.String())
|
||||
|
||||
const (
|
||||
keepUsed = 30 * 24 * time.Hour
|
||||
keepUnused = 7 * 24 * time.Hour
|
||||
keepTemp = 5 * time.Minute
|
||||
)
|
||||
|
||||
var caches []*Cache
|
||||
if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
if cache.Complete {
|
||||
continue
|
||||
}
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := h.db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := h.db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
var data []byte
|
||||
if len(v) == 0 || v[0] == nil {
|
||||
data, _ = json.Marshal(struct{}{})
|
||||
} else if err, ok := v[0].(error); ok {
|
||||
h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
|
||||
data, _ = json.Marshal(map[string]any{
|
||||
"error": err.Error(),
|
||||
})
|
||||
} else {
|
||||
data, _ = json.Marshal(v[0])
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
func parseContentRange(s string) (int64, int64, error) {
|
||||
// support the format like "bytes 11-22/*" only
|
||||
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
|
||||
s1, s2, _ := strings.Cut(s, "-")
|
||||
|
||||
start, err := strconv.ParseInt(s1, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
stop, err := strconv.ParseInt(s2, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
return start, stop, nil
|
||||
}
|
469
pkg/artifactcache/handler_test.go
Normal file
469
pkg/artifactcache/handler_test.go
Normal file
@@ -0,0 +1,469 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||
handler, err := StartHandler(dir, "", 0, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase)
|
||||
|
||||
defer func() {
|
||||
t.Run("inpect db", func(t *testing.T) {
|
||||
require.NoError(t, handler.db.Bolt().View(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
|
||||
t.Logf("%s: %s", k, v)
|
||||
return nil
|
||||
})
|
||||
}))
|
||||
})
|
||||
t.Run("close", func(t *testing.T) {
|
||||
require.NoError(t, handler.Close())
|
||||
assert.Nil(t, handler.server)
|
||||
assert.Nil(t, handler.listener)
|
||||
assert.Nil(t, handler.db)
|
||||
_, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Run("get not exist", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 204, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("reserve and upload", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, key, version, content)
|
||||
})
|
||||
|
||||
t.Run("clean", func(t *testing.T) {
|
||||
resp, err := http.Post(fmt.Sprintf("%s/clean", base), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("reserve with bad request", func(t *testing.T) {
|
||||
body := []byte(`invalid json`)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("duplicate reserve", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
}
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upload with bad id", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/invalid_id", base), bytes.NewReader(nil))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("upload without reserve", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, 1000), bytes.NewReader(nil))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("upload with complete", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upload with invalid range", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes xx-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit with bad id", func(t *testing.T) {
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/invalid_id", base), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit with not exist id", func(t *testing.T) {
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("duplicate commit", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit early", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content[:50]))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-59/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 500, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get with bad id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/invalid_id", base))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with not exist id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 404, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with not exist id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 404, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with multiple keys", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
keys := [3]string{
|
||||
key + "_a",
|
||||
key + "_a_b",
|
||||
key + "_a_b_c",
|
||||
}
|
||||
contents := [3][]byte{
|
||||
make([]byte, 100),
|
||||
make([]byte, 200),
|
||||
make([]byte, 300),
|
||||
}
|
||||
for i := range contents {
|
||||
_, err := rand.Read(contents[i])
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, keys[i], version, contents[i])
|
||||
}
|
||||
|
||||
reqKeys := strings.Join([]string{
|
||||
key + "_a_b_x",
|
||||
key + "_a_b",
|
||||
key + "_a",
|
||||
}, ",")
|
||||
var archiveLocation string
|
||||
{
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, keys[1], got.CacheKey)
|
||||
archiveLocation = got.ArchiveLocation
|
||||
}
|
||||
{
|
||||
resp, err := http.Get(archiveLocation) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[1], got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("case insensitive", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, key+"_ABC", version, content)
|
||||
|
||||
{
|
||||
reqKey := key + "_aBc"
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, key+"_abc", got.CacheKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) {
|
||||
var id uint64
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: int64(len(content)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
var archiveLocation string
|
||||
{
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, strings.ToLower(key), got.CacheKey)
|
||||
archiveLocation = got.ArchiveLocation
|
||||
}
|
||||
{
|
||||
resp, err := http.Get(archiveLocation) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, content, got)
|
||||
}
|
||||
}
|
38
pkg/artifactcache/model.go
Normal file
38
pkg/artifactcache/model.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Key string `json:"key" `
|
||||
Version string `json:"version"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
}
|
||||
|
||||
func (c *Request) ToCache() *Cache {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &Cache{
|
||||
Key: c.Key,
|
||||
Version: c.Version,
|
||||
Size: c.Size,
|
||||
}
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||
Key string `json:"key" boltholdIndex:"Key"`
|
||||
Version string `json:"version" boltholdIndex:"Version"`
|
||||
KeyVersionHash string `json:"keyVersionHash" boltholdUnique:"KeyVersionHash"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
Complete bool `json:"complete"`
|
||||
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
|
||||
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
|
||||
}
|
||||
|
||||
func (c *Cache) FillKeyVersionHash() {
|
||||
c.KeyVersionHash = fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%s:%s", c.Key, c.Version))))
|
||||
}
|
126
pkg/artifactcache/storage.go
Normal file
126
pkg/artifactcache/storage.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
rootDir string
|
||||
}
|
||||
|
||||
func NewStorage(rootDir string) (*Storage, error) {
|
||||
if err := os.MkdirAll(rootDir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Storage{
|
||||
rootDir: rootDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Exist(id uint64) (bool, error) {
|
||||
name := s.filename(id)
|
||||
if _, err := os.Stat(name); os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error {
|
||||
name := s.tempName(id, offset)
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Storage) Commit(id uint64, size int64) error {
|
||||
defer func() {
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}()
|
||||
|
||||
name := s.filename(id)
|
||||
tempNames, err := s.tempNames(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var written int64
|
||||
for _, v := range tempNames {
|
||||
f, err := os.Open(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := io.Copy(file, f)
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
written += n
|
||||
}
|
||||
|
||||
if written != size {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(name)
|
||||
return fmt.Errorf("broken file: %v != %v", written, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) {
|
||||
name := s.filename(id)
|
||||
http.ServeFile(w, r, name)
|
||||
}
|
||||
|
||||
func (s *Storage) Remove(id uint64) {
|
||||
_ = os.Remove(s.filename(id))
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}
|
||||
|
||||
func (s *Storage) filename(id uint64) string {
|
||||
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempDir(id uint64) string {
|
||||
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempName(id uint64, offset int64) string {
|
||||
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
|
||||
}
|
||||
|
||||
func (s *Storage) tempNames(id uint64) ([]string, error) {
|
||||
dir := s.tempDir(id)
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var names []string
|
||||
for _, v := range files {
|
||||
if !v.IsDir() {
|
||||
names = append(names, filepath.Join(dir, v.Name()))
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
}
|
30
pkg/artifactcache/testdata/example/example.yaml
vendored
Normal file
30
pkg/artifactcache/testdata/example/example.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copied from https://github.com/actions/cache#example-cache-workflow
|
||||
name: Caching Primes
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: env
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Cache Primes
|
||||
id: cache-primes
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: prime-numbers
|
||||
key: ${{ runner.os }}-primes-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-primes
|
||||
${{ runner.os }}
|
||||
|
||||
- name: Generate Prime Numbers
|
||||
if: steps.cache-primes.outputs.cache-hit != 'true'
|
||||
run: cat /proc/sys/kernel/random/uuid > prime-numbers
|
||||
|
||||
- name: Use Prime Numbers
|
||||
run: cat prime-numbers
|
@@ -9,12 +9,12 @@ import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
@@ -46,28 +46,34 @@ type ResponseMessage struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type MkdirFS interface {
|
||||
fs.FS
|
||||
MkdirAll(path string, perm fs.FileMode) error
|
||||
Open(name string) (fs.File, error)
|
||||
OpenAtEnd(name string) (fs.File, error)
|
||||
type WritableFile interface {
|
||||
io.WriteCloser
|
||||
}
|
||||
|
||||
type MkdirFsImpl struct {
|
||||
dir string
|
||||
fs.FS
|
||||
type WriteFS interface {
|
||||
OpenWritable(name string) (WritableFile, error)
|
||||
OpenAppendable(name string) (WritableFile, error)
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
||||
return os.MkdirAll(fsys.dir+"/"+path, perm)
|
||||
type readWriteFSImpl struct {
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) Open(name string) (fs.File, error) {
|
||||
return os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
||||
func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
|
||||
file, err := os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR, 0644)
|
||||
func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
|
||||
}
|
||||
|
||||
func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o644)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,13 +83,16 @@ func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
var gzipExtension = ".gz__"
|
||||
|
||||
func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
func safeResolve(baseDir string, relPath string) string {
|
||||
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
|
||||
}
|
||||
|
||||
func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
|
||||
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
runID := params.ByName("runId")
|
||||
|
||||
@@ -108,19 +117,15 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
itemPath += gzipExtension
|
||||
}
|
||||
|
||||
filePath := fmt.Sprintf("%s/%s", runID, itemPath)
|
||||
safeRunPath := safeResolve(baseDir, runID)
|
||||
safePath := safeResolve(safeRunPath, itemPath)
|
||||
|
||||
err := fsys.MkdirAll(path.Dir(filePath), os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
file, err := func() (fs.File, error) {
|
||||
file, err := func() (WritableFile, error) {
|
||||
contentRange := req.Header.Get("Content-Range")
|
||||
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
|
||||
return fsys.OpenAtEnd(filePath)
|
||||
return fsys.OpenAppendable(safePath)
|
||||
}
|
||||
return fsys.Open(filePath)
|
||||
return fsys.OpenWritable(safePath)
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
@@ -170,11 +175,13 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
})
|
||||
}
|
||||
|
||||
func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
||||
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
runID := params.ByName("runId")
|
||||
|
||||
entries, err := fs.ReadDir(fsys, runID)
|
||||
safePath := safeResolve(baseDir, runID)
|
||||
|
||||
entries, err := fs.ReadDir(fsys, safePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -204,12 +211,12 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
container := params.ByName("container")
|
||||
itemPath := req.URL.Query().Get("itemPath")
|
||||
dirPath := fmt.Sprintf("%s/%s", container, itemPath)
|
||||
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
|
||||
|
||||
var files []ContainerItem
|
||||
err := fs.WalkDir(fsys, dirPath, func(path string, entry fs.DirEntry, err error) error {
|
||||
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, err error) error {
|
||||
if !entry.IsDir() {
|
||||
rel, err := filepath.Rel(dirPath, path)
|
||||
rel, err := filepath.Rel(safePath, path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -218,7 +225,7 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
rel = strings.TrimSuffix(rel, gzipExtension)
|
||||
|
||||
files = append(files, ContainerItem{
|
||||
Path: fmt.Sprintf("%s/%s", itemPath, rel),
|
||||
Path: filepath.Join(itemPath, rel),
|
||||
ItemType: "file",
|
||||
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
|
||||
})
|
||||
@@ -245,10 +252,12 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
path := params.ByName("path")[1:]
|
||||
|
||||
file, err := fsys.Open(path)
|
||||
safePath := safeResolve(baseDir, path)
|
||||
|
||||
file, err := fsys.Open(safePath)
|
||||
if err != nil {
|
||||
// try gzip file
|
||||
file, err = fsys.Open(path + gzipExtension)
|
||||
file, err = fsys.Open(safePath + gzipExtension)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -262,7 +271,7 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
})
|
||||
}
|
||||
|
||||
func Serve(ctx context.Context, artifactPath string, port string) context.CancelFunc {
|
||||
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
|
||||
serverContext, cancel := context.WithCancel(ctx)
|
||||
logger := common.Logger(serverContext)
|
||||
|
||||
@@ -273,20 +282,19 @@ func Serve(ctx context.Context, artifactPath string, port string) context.Cancel
|
||||
router := httprouter.New()
|
||||
|
||||
logger.Debugf("Artifacts base path '%s'", artifactPath)
|
||||
fs := os.DirFS(artifactPath)
|
||||
uploads(router, MkdirFsImpl{artifactPath, fs})
|
||||
downloads(router, fs)
|
||||
ip := common.GetOutboundIP().String()
|
||||
fsys := readWriteFSImpl{}
|
||||
uploads(router, artifactPath, fsys)
|
||||
downloads(router, artifactPath, fsys)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%s", ip, port),
|
||||
Addr: fmt.Sprintf("%s:%s", addr, port),
|
||||
ReadHeaderTimeout: 2 * time.Second,
|
||||
Handler: router,
|
||||
}
|
||||
|
||||
// run server
|
||||
go func() {
|
||||
logger.Infof("Start server on http://%s:%s", ip, port)
|
||||
logger.Infof("Start server on http://%s:%s", addr, port)
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -21,44 +20,43 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MapFsImpl struct {
|
||||
fstest.MapFS
|
||||
type writableMapFile struct {
|
||||
fstest.MapFile
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
||||
// mocked no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
type WritableFile struct {
|
||||
fs.File
|
||||
fsys fstest.MapFS
|
||||
path string
|
||||
}
|
||||
|
||||
func (file WritableFile) Write(data []byte) (int, error) {
|
||||
file.fsys[file.path].Data = data
|
||||
func (f *writableMapFile) Write(data []byte) (int, error) {
|
||||
f.Data = data
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) Open(path string) (fs.File, error) {
|
||||
var file = fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
}
|
||||
fsys.MapFS[path] = &file
|
||||
|
||||
result, err := fsys.MapFS.Open(path)
|
||||
return WritableFile{result, fsys.MapFS, path}, err
|
||||
func (f *writableMapFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) OpenAtEnd(path string) (fs.File, error) {
|
||||
var file = fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
}
|
||||
fsys.MapFS[path] = &file
|
||||
type writeMapFS struct {
|
||||
fstest.MapFS
|
||||
}
|
||||
|
||||
result, err := fsys.MapFS.Open(path)
|
||||
return WritableFile{result, fsys.MapFS, path}, err
|
||||
func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) {
|
||||
var file = &writableMapFile{
|
||||
MapFile: fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
},
|
||||
}
|
||||
fsys.MapFS[name] = &file.MapFile
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) {
|
||||
var file = &writableMapFile{
|
||||
MapFile: fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
},
|
||||
}
|
||||
fsys.MapFS[name] = &file.MapFile
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||
@@ -67,7 +65,7 @@ func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -93,7 +91,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -111,7 +109,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal("success", response.Message)
|
||||
assert.Equal("content", string(memfs["1/some/file"].Data))
|
||||
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||
}
|
||||
|
||||
func TestFinalizeArtifactUpload(t *testing.T) {
|
||||
@@ -120,7 +118,7 @@ func TestFinalizeArtifactUpload(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -144,13 +142,13 @@ func TestListArtifacts(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/file.txt": {
|
||||
"artifact/server/path/1/file.txt": {
|
||||
Data: []byte(""),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -176,13 +174,13 @@ func TestListArtifactContainer(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/some/file": {
|
||||
"artifact/server/path/1/some/file": {
|
||||
Data: []byte(""),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -200,7 +198,7 @@ func TestListArtifactContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal(1, len(response.Value))
|
||||
assert.Equal("some/file/.", response.Value[0].Path)
|
||||
assert.Equal("some/file", response.Value[0].Path)
|
||||
assert.Equal("file", response.Value[0].ItemType)
|
||||
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
||||
}
|
||||
@@ -209,13 +207,13 @@ func TestDownloadArtifactFile(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/some/file": {
|
||||
"artifact/server/path/1/some/file": {
|
||||
Data: []byte("content"),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -240,7 +238,8 @@ type TestJobFileInfo struct {
|
||||
containerArchitecture string
|
||||
}
|
||||
|
||||
var aritfactsPath = path.Join(os.TempDir(), "test-artifacts")
|
||||
var artifactsPath = path.Join(os.TempDir(), "test-artifacts")
|
||||
var artifactsAddr = "127.0.0.1"
|
||||
var artifactsPort = "12345"
|
||||
|
||||
func TestArtifactFlow(t *testing.T) {
|
||||
@@ -250,7 +249,7 @@ func TestArtifactFlow(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cancel := Serve(ctx, aritfactsPath, artifactsPort)
|
||||
cancel := Serve(ctx, artifactsPath, artifactsAddr, artifactsPort)
|
||||
defer cancel()
|
||||
|
||||
platforms := map[string]string{
|
||||
@@ -259,6 +258,7 @@ func TestArtifactFlow(t *testing.T) {
|
||||
|
||||
tables := []TestJobFileInfo{
|
||||
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
||||
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
|
||||
}
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
@@ -271,7 +271,7 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
t.Run(tjfi.workflowPath, func(t *testing.T) {
|
||||
fmt.Printf("::group::%s\n", tjfi.workflowPath)
|
||||
|
||||
if err := os.RemoveAll(aritfactsPath); err != nil {
|
||||
if err := os.RemoveAll(artifactsPath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -286,7 +286,8 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
ReuseContainers: false,
|
||||
ContainerArchitecture: tjfi.containerArchitecture,
|
||||
GitHubInstance: "github.com",
|
||||
ArtifactServerPath: aritfactsPath,
|
||||
ArtifactServerPath: artifactsPath,
|
||||
ArtifactServerAddr: artifactsAddr,
|
||||
ArtifactServerPort: artifactsPort,
|
||||
}
|
||||
|
||||
@@ -296,15 +297,96 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
|
||||
plan := planner.PlanEvent(tjfi.eventName)
|
||||
|
||||
plan, err := planner.PlanEvent(tjfi.eventName)
|
||||
if err == nil {
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
if tjfi.errorMessage == "" {
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
} else {
|
||||
assert.Error(t, err, tjfi.errorMessage)
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, plan)
|
||||
}
|
||||
|
||||
fmt.Println("::endgroup::")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMkdirFsImplSafeResolve(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
baseDir := "/foo/bar"
|
||||
|
||||
tests := map[string]struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
"simple": {input: "baz", want: "/foo/bar/baz"},
|
||||
"nested": {input: "baz/blue", want: "/foo/bar/baz/blue"},
|
||||
"dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"},
|
||||
"leading dots": {input: "../../parent", want: "/foo/bar/parent"},
|
||||
"root path": {input: "/root", want: "/foo/bar/root"},
|
||||
"root": {input: "/", want: "/foo/bar"},
|
||||
"empty": {input: "", want: "/foo/bar"},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownloadArtifactFileUnsafePath(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"artifact/server/path/some/file": {
|
||||
Data: []byte("content"),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
|
||||
}
|
||||
|
||||
data := rr.Body.Bytes()
|
||||
|
||||
assert.Equal("content", string(data))
|
||||
}
|
||||
|
||||
func TestArtifactUploadBlobUnsafePath(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content"))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
assert.Fail("Wrong status")
|
||||
}
|
||||
|
||||
response := ResponseMessage{}
|
||||
err := json.Unmarshal(rr.Body.Bytes(), &response)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
assert.Equal("success", response.Message)
|
||||
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||
}
|
||||
|
43
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
43
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
|
||||
name: "GHSL-2023-0004"
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
test-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "hello world" > test.txt
|
||||
- name: curl upload
|
||||
uses: wei/curl@v1
|
||||
with:
|
||||
args: -s --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: my-artifact
|
||||
path: test-artifacts
|
||||
- name: 'Verify Artifact #1'
|
||||
run: |
|
||||
file="test-artifacts/secret.txt"
|
||||
if [ ! -f $file ] ; then
|
||||
echo "Expected file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat $file)" != "hello world" ] ; then
|
||||
echo "File contents of downloaded artifact are incorrect"
|
||||
exit 1
|
||||
fi
|
||||
- name: Verify download should work by clean extra dots
|
||||
uses: wei/curl@v1
|
||||
with:
|
||||
args: --path-as-is -s -o out.txt --fail ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt
|
||||
- name: 'Verify download content'
|
||||
run: |
|
||||
file="out.txt"
|
||||
if [ ! -f $file ] ; then
|
||||
echo "Expected file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat $file)" != "hello world" ] ; then
|
||||
echo "File contents of downloaded artifact are incorrect"
|
||||
exit 1
|
||||
fi
|
@@ -7,20 +7,19 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
"github.com/go-ini/ini"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,41 +54,40 @@ func (e *Error) Commit() string {
|
||||
// FindGitRevision get the current git revision
|
||||
func FindGitRevision(ctx context.Context, file string) (shortSha string, sha string, err error) {
|
||||
logger := common.Logger(ctx)
|
||||
gitDir, err := findGitDirectory(file)
|
||||
|
||||
gitDir, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("path", file, "not located inside a git repository")
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
head, err := gitDir.Reference(plumbing.HEAD, true)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
bts, err := os.ReadFile(filepath.Join(gitDir, "HEAD"))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
if head.Hash().IsZero() {
|
||||
return "", "", fmt.Errorf("HEAD sha1 could not be resolved")
|
||||
}
|
||||
|
||||
var ref = strings.TrimSpace(strings.TrimPrefix(string(bts), "ref:"))
|
||||
var refBuf []byte
|
||||
if strings.HasPrefix(ref, "refs/") {
|
||||
// load commitid ref
|
||||
refBuf, err = os.ReadFile(filepath.Join(gitDir, ref))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
refBuf = []byte(ref)
|
||||
}
|
||||
hash := head.Hash().String()
|
||||
|
||||
logger.Debugf("Found revision: %s", refBuf)
|
||||
return string(refBuf[:7]), strings.TrimSpace(string(refBuf)), nil
|
||||
logger.Debugf("Found revision: %s", hash)
|
||||
return hash[:7], strings.TrimSpace(hash), nil
|
||||
}
|
||||
|
||||
// FindGitRef get the current git ref
|
||||
func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
logger := common.Logger(ctx)
|
||||
gitDir, err := findGitDirectory(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
logger.Debugf("Loading revision from git directory '%s'", gitDir)
|
||||
|
||||
logger.Debugf("Loading revision from git directory")
|
||||
_, ref, err := FindGitRevision(ctx, file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -100,16 +98,36 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
// Prefer the git library to iterate over the references and find a matching tag or branch.
|
||||
var refTag = ""
|
||||
var refBranch = ""
|
||||
r, err := git.PlainOpen(filepath.Join(gitDir, ".."))
|
||||
if err == nil {
|
||||
iter, err := r.References()
|
||||
if err == nil {
|
||||
for {
|
||||
r, err := iter.Next()
|
||||
if r == nil || err != nil {
|
||||
break
|
||||
repo, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// logger.Debugf("Reference: name=%s sha=%s", r.Name().String(), r.Hash().String())
|
||||
|
||||
iter, err := repo.References()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// find the reference that matches the revision's has
|
||||
err = iter.ForEach(func(r *plumbing.Reference) error {
|
||||
/* tags and branches will have the same hash
|
||||
* when a user checks out a tag, it is not mentioned explicitly
|
||||
* in the go-git package, we must identify the revision
|
||||
* then check if any tag matches that revision,
|
||||
* if so then we checked out a tag
|
||||
* else we look for branches and if matches,
|
||||
* it means we checked out a branch
|
||||
*
|
||||
* If a branches matches first we must continue and check all tags (all references)
|
||||
* in case we match with a tag later in the interation
|
||||
*/
|
||||
if r.Hash().String() == ref {
|
||||
if r.Name().IsTag() {
|
||||
refTag = r.Name().String()
|
||||
@@ -118,10 +136,20 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
refBranch = r.Name().String()
|
||||
}
|
||||
}
|
||||
|
||||
// we found what we where looking for
|
||||
if refTag != "" && refBranch != "" {
|
||||
return storer.ErrStop
|
||||
}
|
||||
iter.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// order matters here see above comment.
|
||||
if refTag != "" {
|
||||
return refTag, nil
|
||||
}
|
||||
@@ -129,39 +157,7 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
return refBranch, nil
|
||||
}
|
||||
|
||||
// If the above doesn't work, fall back to the old way
|
||||
|
||||
// try tags first
|
||||
tag, err := findGitPrettyRef(ctx, ref, gitDir, "refs/tags")
|
||||
if err != nil || tag != "" {
|
||||
return tag, err
|
||||
}
|
||||
// and then branches
|
||||
return findGitPrettyRef(ctx, ref, gitDir, "refs/heads")
|
||||
}
|
||||
|
||||
func findGitPrettyRef(ctx context.Context, head, root, sub string) (string, error) {
|
||||
var name string
|
||||
var err = filepath.Walk(filepath.Join(root, sub), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name != "" || info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
var bts []byte
|
||||
if bts, err = os.ReadFile(path); err != nil {
|
||||
return err
|
||||
}
|
||||
var pointsTo = strings.TrimSpace(string(bts))
|
||||
if head == pointsTo {
|
||||
// On Windows paths are separated with backslash character so they should be replaced to provide proper git refs format
|
||||
name = strings.TrimPrefix(strings.ReplaceAll(strings.Replace(path, root, "", 1), `\`, `/`), "/")
|
||||
common.Logger(ctx).Debugf("HEAD matches %s", name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return name, err
|
||||
return "", fmt.Errorf("failed to identify reference (tag/branch) for the checked-out revision '%s'", ref)
|
||||
}
|
||||
|
||||
// FindGithubRepo get the repo
|
||||
@@ -179,26 +175,27 @@ func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string
|
||||
}
|
||||
|
||||
func findGitRemoteURL(ctx context.Context, file, remoteName string) (string, error) {
|
||||
gitDir, err := findGitDirectory(file)
|
||||
repo, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
common.Logger(ctx).Debugf("Loading slug from git directory '%s'", gitDir)
|
||||
|
||||
gitconfig, err := ini.InsensitiveLoad(fmt.Sprintf("%s/config", gitDir))
|
||||
remote, err := repo.Remote(remoteName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
remote, err := gitconfig.GetSection(fmt.Sprintf(`remote "%s"`, remoteName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
if len(remote.Config().URLs) < 1 {
|
||||
return "", fmt.Errorf("remote '%s' exists but has no URL", remoteName)
|
||||
}
|
||||
urlKey, err := remote.GetKey("url")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
url := urlKey.String()
|
||||
return url, nil
|
||||
|
||||
return remote.Config().URLs[0], nil
|
||||
}
|
||||
|
||||
func findGitSlug(url string, githubInstance string) (string, string, error) {
|
||||
@@ -222,35 +219,6 @@ func findGitSlug(url string, githubInstance string) (string, string, error) {
|
||||
return "", url, nil
|
||||
}
|
||||
|
||||
func findGitDirectory(fromFile string) (string, error) {
|
||||
absPath, err := filepath.Abs(fromFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var dir string
|
||||
if fi.Mode().IsDir() {
|
||||
dir = absPath
|
||||
} else {
|
||||
dir = filepath.Dir(absPath)
|
||||
}
|
||||
|
||||
gitPath := filepath.Join(dir, ".git")
|
||||
fi, err = os.Stat(gitPath)
|
||||
if err == nil && fi.Mode().IsDir() {
|
||||
return gitPath, nil
|
||||
} else if dir == "/" || dir == "C:\\" || dir == "c:\\" {
|
||||
return "", &Error{err: ErrNoRepo}
|
||||
}
|
||||
|
||||
return findGitDirectory(filepath.Dir(dir))
|
||||
}
|
||||
|
||||
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
||||
type NewGitCloneExecutorInput struct {
|
||||
URL string
|
||||
@@ -292,7 +260,7 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = os.Chmod(input.Dir, 0755); err != nil {
|
||||
if err = os.Chmod(input.Dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@@ -82,12 +82,19 @@ func TestFindGitRemoteURL(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
|
||||
err = gitCmd("config", "-f", fmt.Sprintf("%s/.git/config", basedir), "--add", "remote.origin.url", remoteURL)
|
||||
err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL)
|
||||
assert.NoError(err)
|
||||
|
||||
u, err := findGitRemoteURL(context.Background(), basedir, "origin")
|
||||
assert.NoError(err)
|
||||
assert.Equal(remoteURL, u)
|
||||
|
||||
remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git"
|
||||
err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL)
|
||||
assert.NoError(err)
|
||||
u, err = findGitRemoteURL(context.Background(), basedir, "upstream")
|
||||
assert.NoError(err)
|
||||
assert.Equal(remoteURL, u)
|
||||
}
|
||||
|
||||
func TestGitFindRef(t *testing.T) {
|
||||
@@ -160,7 +167,7 @@ func TestGitFindRef(t *testing.T) {
|
||||
name := name
|
||||
t.Run(name, func(t *testing.T) {
|
||||
dir := filepath.Join(basedir, name)
|
||||
require.NoError(t, os.MkdirAll(dir, 0755))
|
||||
require.NoError(t, os.MkdirAll(dir, 0o755))
|
||||
require.NoError(t, gitCmd("-C", dir, "init", "--initial-branch=master"))
|
||||
require.NoError(t, cleanGitHooks(dir))
|
||||
tt.Prepare(t, dir)
|
||||
|
@@ -2,20 +2,74 @@ package common
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://stackoverflow.com/a/37382208
|
||||
// Get preferred outbound ip of this machine
|
||||
// GetOutboundIP returns an outbound IP address of this machine.
|
||||
// It tries to access the internet and returns the local IP address of the connection.
|
||||
// If the machine cannot access the internet, it returns a preferred IP address from network interfaces.
|
||||
// It returns nil if no IP address is found.
|
||||
func GetOutboundIP() net.IP {
|
||||
// See https://stackoverflow.com/a/37382208
|
||||
conn, err := net.Dial("udp", "8.8.8.8:80")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err == nil {
|
||||
defer conn.Close()
|
||||
return conn.LocalAddr().(*net.UDPAddr).IP
|
||||
}
|
||||
|
||||
localAddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
// So the machine cannot access the internet. Pick an IP address from network interfaces.
|
||||
if ifs, err := net.Interfaces(); err == nil {
|
||||
type IP struct {
|
||||
net.IP
|
||||
net.Interface
|
||||
}
|
||||
var ips []IP
|
||||
for _, i := range ifs {
|
||||
if addrs, err := i.Addrs(); err == nil {
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
if ip.IsGlobalUnicast() {
|
||||
ips = append(ips, IP{ip, i})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ips) > 1 {
|
||||
sort.Slice(ips, func(i, j int) bool {
|
||||
ifi := ips[i].Interface
|
||||
ifj := ips[j].Interface
|
||||
|
||||
return localAddr.IP
|
||||
// ethernet is preferred
|
||||
if vi, vj := strings.HasPrefix(ifi.Name, "e"), strings.HasPrefix(ifj.Name, "e"); vi != vj {
|
||||
return vi
|
||||
}
|
||||
|
||||
ipi := ips[i].IP
|
||||
ipj := ips[j].IP
|
||||
|
||||
// IPv4 is preferred
|
||||
if vi, vj := ipi.To4() != nil, ipj.To4() != nil; vi != vj {
|
||||
return vi
|
||||
}
|
||||
|
||||
// en0 is preferred to en1
|
||||
if ifi.Name != ifj.Name {
|
||||
return ifi.Name < ifj.Name
|
||||
}
|
||||
|
||||
// fallback
|
||||
return ipi.String() < ipj.String()
|
||||
})
|
||||
return ips[0].IP
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
76
pkg/container/container_types.go
Normal file
76
pkg/container/container_types.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewContainerInput the input for the New function
|
||||
type NewContainerInput struct {
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
|
||||
// Gitea specific
|
||||
AutoRemove bool
|
||||
|
||||
NetworkAliases []string
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
type FileEntry struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}
|
||||
|
||||
// Container for managing docker run containers
|
||||
type Container interface {
|
||||
Create(capAdd []string, capDrop []string) common.Executor
|
||||
ConnectToNetwork(name string) common.Executor
|
||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
||||
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
||||
Pull(forcePull bool) common.Executor
|
||||
Start(attach bool) common.Executor
|
||||
Exec(command []string, env map[string]string, user, workdir string) common.Executor
|
||||
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
|
||||
UpdateFromImageEnv(env *map[string]string) common.Executor
|
||||
Remove() common.Executor
|
||||
Close() common.Executor
|
||||
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||
type NewDockerBuildExecutorInput struct {
|
||||
ContextDir string
|
||||
Dockerfile string
|
||||
Container Container
|
||||
ImageTag string
|
||||
Platform string
|
||||
}
|
||||
|
||||
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
|
||||
type NewDockerPullExecutorInput struct {
|
||||
Image string
|
||||
ForcePull bool
|
||||
Platform string
|
||||
Username string
|
||||
Password string
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -36,3 +38,24 @@ func LoadDockerAuthConfig(ctx context.Context, image string) (types.AuthConfig,
|
||||
|
||||
return types.AuthConfig(authConfig), nil
|
||||
}
|
||||
|
||||
func LoadDockerAuthConfigs(ctx context.Context) map[string]types.AuthConfig {
|
||||
logger := common.Logger(ctx)
|
||||
config, err := config.Load(config.Dir())
|
||||
if err != nil {
|
||||
logger.Warnf("Could not load docker config: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !config.ContainsAuth() {
|
||||
config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore)
|
||||
}
|
||||
|
||||
creds, _ := config.GetAllCredentials()
|
||||
authConfigs := make(map[string]types.AuthConfig, len(creds))
|
||||
for k, v := range creds {
|
||||
authConfigs[k] = types.AuthConfig(v)
|
||||
}
|
||||
|
||||
return authConfigs
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -8,22 +10,14 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// github.com/docker/docker/builder/dockerignore is deprecated
|
||||
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
||||
"github.com/moby/patternmatcher"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||
type NewDockerBuildExecutorInput struct {
|
||||
ContextDir string
|
||||
Container Container
|
||||
ImageTag string
|
||||
Platform string
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutor function to create a run executor for the container
|
||||
func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -50,12 +44,14 @@ func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
Tags: tags,
|
||||
Remove: true,
|
||||
Platform: input.Platform,
|
||||
AuthConfigs: LoadDockerAuthConfigs(ctx),
|
||||
Dockerfile: input.Dockerfile,
|
||||
}
|
||||
var buildContext io.ReadCloser
|
||||
if input.Container != nil {
|
||||
buildContext, err = input.Container.GetContainerArchive(ctx, input.ContextDir+"/.")
|
||||
} else {
|
||||
buildContext, err = createBuildContext(ctx, input.ContextDir, "Dockerfile")
|
||||
buildContext, err = createBuildContext(ctx, input.ContextDir, input.Dockerfile)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,8 +97,8 @@ func createBuildContext(ctx context.Context, contextDir string, relDockerfile st
|
||||
// parses the Dockerfile. Ignore errors here, as they will have been
|
||||
// caught by validateContextDirectory above.
|
||||
var includes = []string{"."}
|
||||
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
|
||||
keepThem1, _ := patternmatcher.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := patternmatcher.Matches(relDockerfile, excludes)
|
||||
if keepThem1 || keepThem2 {
|
||||
includes = append(includes, ".dockerignore", relDockerfile)
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
||||
// appended with license information.
|
||||
//
|
||||
|
@@ -663,8 +663,8 @@ func TestRunFlagsParseShmSize(t *testing.T) {
|
||||
|
||||
func TestParseRestartPolicy(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"always:2:3": "invalid restart policy format",
|
||||
"on-failure:invalid": "maximum retry count must be an integer",
|
||||
"always:2:3": "invalid restart policy format: maximum retry count must be an integer",
|
||||
"on-failure:invalid": "invalid restart policy format: maximum retry count must be an integer",
|
||||
}
|
||||
valids := map[string]container.RestartPolicy{
|
||||
"": {},
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -5,7 +7,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||
@@ -17,33 +19,15 @@ func ImageExistsLocally(ctx context.Context, imageName string, platform string)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", imageName)
|
||||
|
||||
imageListOptions := types.ImageListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
images, err := cli.ImageList(ctx, imageListOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(images) > 0 {
|
||||
if platform == "any" || platform == "" {
|
||||
return true, nil
|
||||
}
|
||||
for _, v := range images {
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, v.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%s/%s", inspectImage.Os, inspectImage.Architecture) == platform {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if platform == "" || platform == "any" || fmt.Sprintf("%s/%s", inspectImage.Os, inspectImage.Architecture) == platform {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
@@ -52,38 +36,25 @@ func ImageExistsLocally(ctx context.Context, imageName string, platform string)
|
||||
// RemoveImage removes image from local store, the function is used to run different
|
||||
// container image architectures
|
||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
||||
if exists, err := ImageExistsLocally(ctx, imageName, "any"); !exists {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", imageName)
|
||||
|
||||
imageListOptions := types.ImageListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
images, err := cli.ImageList(ctx, imageListOptions)
|
||||
if err != nil {
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(images) > 0 {
|
||||
for _, v := range images {
|
||||
if _, err = cli.ImageRemove(ctx, v.ID, types.ImageRemoveOptions{
|
||||
if _, err = cli.ImageRemove(ctx, inspectImage.ID, types.ImageRemoveOptions{
|
||||
Force: force,
|
||||
PruneChildren: pruneChildren,
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return true, nil
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
|
40
pkg/container/docker_network.go
Normal file
40
pkg/container/docker_network.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cli.NetworkRemove(ctx, name)
|
||||
}
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -12,15 +14,6 @@ import (
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
|
||||
type NewDockerPullExecutorInput struct {
|
||||
Image string
|
||||
ForcePull bool
|
||||
Platform string
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// NewDockerPullExecutor function to create a run executor for the container
|
||||
func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
@@ -38,53 +41,6 @@ import (
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewContainerInput the input for the New function
|
||||
type NewContainerInput struct {
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
|
||||
AutoRemove bool
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
type FileEntry struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}
|
||||
|
||||
// Container for managing docker run containers
|
||||
type Container interface {
|
||||
Create(capAdd []string, capDrop []string) common.Executor
|
||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
||||
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
||||
Pull(forcePull bool) common.Executor
|
||||
Start(attach bool) common.Executor
|
||||
Exec(command []string, env map[string]string, user, workdir string) common.Executor
|
||||
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
|
||||
UpdateFromImageEnv(env *map[string]string) common.Executor
|
||||
UpdateFromPath(env *map[string]string) common.Executor
|
||||
Remove() common.Executor
|
||||
Close() common.Executor
|
||||
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
||||
}
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
cr := new(containerReference)
|
||||
@@ -92,6 +48,25 @@ func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
return cr
|
||||
}
|
||||
|
||||
func (cr *containerReference) ConnectToNetwork(name string) common.Executor {
|
||||
return common.
|
||||
NewDebugExecutor("%sdocker network connect %s %s", logPrefix, name, cr.input.Name).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
cr.connectToNetwork(name, cr.input.NetworkAliases),
|
||||
).IfNot(common.Dryrun),
|
||||
)
|
||||
}
|
||||
|
||||
func (cr *containerReference) connectToNetwork(name string, aliases []string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return cr.cli.NetworkConnect(ctx, name, cr.input.Name, &networktypes.EndpointSettings{
|
||||
Aliases: aliases,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// supportsContainerImagePlatform returns true if the underlying Docker server
|
||||
// API version is 1.41 and beyond
|
||||
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
|
||||
@@ -190,17 +165,13 @@ func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath s
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
return cr.extractEnv(srcPath, env).IfNot(common.Dryrun)
|
||||
return parseEnvFile(cr, srcPath, env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromImageEnv(env *map[string]string) common.Executor {
|
||||
return cr.extractFromImageEnv(env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
return cr.extractPath(env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) Exec(command []string, env map[string]string, user, workdir string) common.Executor {
|
||||
return common.NewPipelineExecutor(
|
||||
common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir),
|
||||
@@ -239,9 +210,6 @@ type containerReference struct {
|
||||
}
|
||||
|
||||
func GetDockerClient(ctx context.Context) (cli client.APIClient, err error) {
|
||||
// TODO: this should maybe need to be a global option, not hidden in here?
|
||||
// though i'm not sure how that works out when there's another Executor :D
|
||||
// I really would like something that works on OSX native for eg
|
||||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
|
||||
if strings.HasPrefix(dockerHost, "ssh://") {
|
||||
@@ -398,6 +366,12 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
|
||||
if len(copts.netMode.Value()) == 0 {
|
||||
if err = copts.netMode.Set("host"); err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
|
||||
}
|
||||
}
|
||||
|
||||
containerConfig, err := parse(flags, copts, "")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err)
|
||||
@@ -413,10 +387,16 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
|
||||
logger.Debugf("Custom container.HostConfig from options ==> %+v", containerConfig.HostConfig)
|
||||
|
||||
hostConfig.Binds = append(hostConfig.Binds, containerConfig.HostConfig.Binds...)
|
||||
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
|
||||
binds := hostConfig.Binds
|
||||
mounts := hostConfig.Mounts
|
||||
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
hostConfig.Binds = binds
|
||||
hostConfig.Mounts = mounts
|
||||
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
return config, hostConfig, nil
|
||||
@@ -500,59 +480,6 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
}
|
||||
}
|
||||
|
||||
var singleLineEnvPattern, multiLineEnvPattern *regexp.Regexp
|
||||
|
||||
func (cr *containerReference) extractEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
if singleLineEnvPattern == nil {
|
||||
// Single line pattern matches:
|
||||
// SOME_VAR=data=moredata
|
||||
// SOME_VAR=datamoredata
|
||||
singleLineEnvPattern = regexp.MustCompile(`^([^=]*)\=(.*)$`)
|
||||
multiLineEnvPattern = regexp.MustCompile(`^([^<]+)<<([\w-]+)$`)
|
||||
}
|
||||
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("failed to read tar archive: %w", err)
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
multiLineEnvKey := ""
|
||||
multiLineEnvDelimiter := ""
|
||||
multiLineEnvContent := ""
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if singleLineEnv := singleLineEnvPattern.FindStringSubmatch(line); singleLineEnv != nil {
|
||||
localEnv[singleLineEnv[1]] = singleLineEnv[2]
|
||||
}
|
||||
if line == multiLineEnvDelimiter {
|
||||
localEnv[multiLineEnvKey] = multiLineEnvContent
|
||||
multiLineEnvKey, multiLineEnvDelimiter, multiLineEnvContent = "", "", ""
|
||||
}
|
||||
if multiLineEnvKey != "" && multiLineEnvDelimiter != "" {
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += line
|
||||
}
|
||||
if multiLineEnvStart := multiLineEnvPattern.FindStringSubmatch(line); multiLineEnvStart != nil {
|
||||
multiLineEnvKey = multiLineEnvStart[1]
|
||||
multiLineEnvDelimiter = multiLineEnvStart[2]
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) extractFromImageEnv(env *map[string]string) common.Executor {
|
||||
envMap := *env
|
||||
return func(ctx context.Context) error {
|
||||
@@ -585,31 +512,6 @@ func (cr *containerReference) extractFromImageEnv(env *map[string]string) common
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) extractPath(env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
pathTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, localEnv["GITHUB_PATH"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy from container: %w", err)
|
||||
}
|
||||
defer pathTar.Close()
|
||||
|
||||
reader := tar.NewReader(pathTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("failed to read tar archive: %w", err)
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
localEnv["PATH"] = fmt.Sprintf("%s:%s", line, localEnv["PATH"])
|
||||
}
|
||||
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) exec(cmd []string, env map[string]string, user, workdir string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -706,7 +608,7 @@ func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Exe
|
||||
}
|
||||
exp := regexp.MustCompile(`\d+\n`)
|
||||
found := exp.FindString(sid)
|
||||
id, err := strconv.ParseInt(found[:len(found)-1], 10, 32)
|
||||
id, err := strconv.ParseInt(strings.TrimSpace(found), 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
69
pkg/container/docker_stub.go
Normal file
69
pkg/container/docker_stub.go
Normal file
@@ -0,0 +1,69 @@
|
||||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows)
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||
// requested name, tag and architecture exists in the local docker image store
|
||||
func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {
|
||||
return false, errors.New("Unsupported Operation")
|
||||
}
|
||||
|
||||
// RemoveImage removes image from local store, the function is used to run different
|
||||
// container image architectures
|
||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
||||
return false, errors.New("Unsupported Operation")
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutor function to create a run executor for the container
|
||||
func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return errors.New("Unsupported Operation")
|
||||
}
|
||||
}
|
||||
|
||||
// NewDockerPullExecutor function to create a run executor for the container
|
||||
func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return errors.New("Unsupported Operation")
|
||||
}
|
||||
}
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunnerArch(ctx context.Context) string {
|
||||
return runtime.GOOS
|
||||
}
|
||||
|
||||
func GetHostInfo(ctx context.Context) (info types.Info, err error) {
|
||||
return types.Info{}, nil
|
||||
}
|
||||
|
||||
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
|
@@ -10,4 +10,6 @@ type ExecutionsEnvironment interface {
|
||||
DefaultPathVariable() string
|
||||
JoinPathVariable(...string) string
|
||||
GetRunnerContext(ctx context.Context) map[string]interface{}
|
||||
// On windows PATH and Path are the same key
|
||||
IsEnvironmentCaseInsensitive() bool
|
||||
}
|
||||
|
@@ -65,7 +65,7 @@ type copyCollector struct {
|
||||
|
||||
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
fdestpath := filepath.Join(cc.DstDir, fpath)
|
||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0777); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
if f == nil {
|
||||
|
@@ -76,7 +76,7 @@ func (mfs *memoryFs) Readlink(path string) (string, error) {
|
||||
|
||||
func TestIgnoredTrackedfile(t *testing.T) {
|
||||
fs := memfs.New()
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0777)
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
|
||||
dotgit, _ := fs.Chroot("mygitrepo/.git")
|
||||
worktree, _ := fs.Chroot("mygitrepo")
|
||||
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
|
||||
|
@@ -2,9 +2,9 @@ package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
@@ -15,14 +15,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/lookpath"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
type HostEnvironment struct {
|
||||
@@ -41,6 +40,12 @@ func (e *HostEnvironment) Create(capAdd []string, capDrop []string) common.Execu
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ConnectToNetwork(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Close() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
@@ -50,7 +55,7 @@ func (e *HostEnvironment) Close() common.Executor {
|
||||
func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
for _, f := range files {
|
||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0777); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(destPath, f.Name), []byte(f.Body), fs.FileMode(f.Mode)); err != nil {
|
||||
@@ -341,77 +346,7 @@ func (e *HostEnvironment) Exec(command []string /*cmdline string, */, env map[st
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, err := e.GetContainerArchive(ctx, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
singleLineEnv := strings.Index(line, "=")
|
||||
multiLineEnv := strings.Index(line, "<<")
|
||||
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
||||
localEnv[line[:singleLineEnv]] = line[singleLineEnv+1:]
|
||||
} else if multiLineEnv != -1 {
|
||||
multiLineEnvContent := ""
|
||||
multiLineEnvDelimiter := line[multiLineEnv+2:]
|
||||
delimiterFound := false
|
||||
for s.Scan() {
|
||||
content := s.Text()
|
||||
if content == multiLineEnvDelimiter {
|
||||
delimiterFound = true
|
||||
break
|
||||
}
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += content
|
||||
}
|
||||
if !delimiterFound {
|
||||
return fmt.Errorf("invalid format delimiter '%v' not found before end of file", multiLineEnvDelimiter)
|
||||
}
|
||||
localEnv[line[:multiLineEnv]] = multiLineEnvContent
|
||||
} else {
|
||||
return fmt.Errorf("invalid format '%v', expected a line with '=' or '<<'", line)
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
pathTar, err := e.GetContainerArchive(ctx, localEnv["GITHUB_PATH"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pathTar.Close()
|
||||
|
||||
reader := tar.NewReader(pathTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
pathSep := string(filepath.ListSeparator)
|
||||
localEnv[e.GetPathVariableName()] = fmt.Sprintf("%s%s%s", line, pathSep, localEnv[e.GetPathVariableName()])
|
||||
}
|
||||
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
return parseEnvFile(e, srcPath, env)
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Remove() common.Executor {
|
||||
@@ -454,10 +389,32 @@ func (*HostEnvironment) JoinPathVariable(paths ...string) string {
|
||||
return strings.Join(paths, string(filepath.ListSeparator))
|
||||
}
|
||||
|
||||
func goArchToActionArch(arch string) string {
|
||||
archMapper := map[string]string{
|
||||
"x86_64": "X64",
|
||||
"386": "x86",
|
||||
"aarch64": "arm64",
|
||||
}
|
||||
if arch, ok := archMapper[arch]; ok {
|
||||
return arch
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
func goOsToActionOs(os string) string {
|
||||
osMapper := map[string]string{
|
||||
"darwin": "macOS",
|
||||
}
|
||||
if os, ok := osMapper[os]; ok {
|
||||
return os
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetRunnerContext(ctx context.Context) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
"os": goOsToActionOs(runtime.GOOS),
|
||||
"arch": goArchToActionArch(runtime.GOARCH),
|
||||
"temp": e.TmpDir,
|
||||
"tool_cache": e.ToolCache,
|
||||
}
|
||||
@@ -468,3 +425,7 @@ func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (
|
||||
e.StdOut = stdout
|
||||
return org, org
|
||||
}
|
||||
|
||||
func (*HostEnvironment) IsEnvironmentCaseInsensitive() bool {
|
||||
return runtime.GOOS == "windows"
|
||||
}
|
||||
|
@@ -71,3 +71,7 @@ func (*LinuxContainerEnvironmentExtensions) GetRunnerContext(ctx context.Context
|
||||
"tool_cache": "/opt/hostedtoolcache",
|
||||
}
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) IsEnvironmentCaseInsensitive() bool {
|
||||
return false
|
||||
}
|
||||
|
60
pkg/container/parse_env_file.go
Normal file
60
pkg/container/parse_env_file.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, err := e.GetContainerArchive(ctx, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
singleLineEnv := strings.Index(line, "=")
|
||||
multiLineEnv := strings.Index(line, "<<")
|
||||
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
||||
localEnv[line[:singleLineEnv]] = line[singleLineEnv+1:]
|
||||
} else if multiLineEnv != -1 {
|
||||
multiLineEnvContent := ""
|
||||
multiLineEnvDelimiter := line[multiLineEnv+2:]
|
||||
delimiterFound := false
|
||||
for s.Scan() {
|
||||
content := s.Text()
|
||||
if content == multiLineEnvDelimiter {
|
||||
delimiterFound = true
|
||||
break
|
||||
}
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += content
|
||||
}
|
||||
if !delimiterFound {
|
||||
return fmt.Errorf("invalid format delimiter '%v' not found before end of file", multiLineEnvDelimiter)
|
||||
}
|
||||
localEnv[line[:multiLineEnv]] = multiLineEnvContent
|
||||
} else {
|
||||
return fmt.Errorf("invalid format '%v', expected a line with '=' or '<<'", line)
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/rhysd/actionlint"
|
||||
)
|
||||
@@ -202,6 +203,9 @@ func (impl *interperterImpl) hashFiles(paths ...reflect.Value) (string, error) {
|
||||
|
||||
var files []string
|
||||
if err := filepath.Walk(impl.config.WorkingDir, func(path string, fi fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sansPrefix := strings.TrimPrefix(path, impl.config.WorkingDir+string(filepath.Separator))
|
||||
parts := strings.Split(sansPrefix, string(filepath.Separator))
|
||||
if fi.IsDir() || !matcher.Match(parts, fi.IsDir()) {
|
||||
|
@@ -15,13 +15,21 @@ type EvaluationEnvironment struct {
|
||||
Github *model.GithubContext
|
||||
Env map[string]string
|
||||
Job *model.JobContext
|
||||
Jobs *map[string]*model.WorkflowCallResult
|
||||
Steps map[string]*model.StepResult
|
||||
Runner map[string]interface{}
|
||||
Secrets map[string]string
|
||||
Strategy map[string]interface{}
|
||||
Matrix map[string]interface{}
|
||||
Needs map[string]map[string]map[string]string
|
||||
Needs map[string]Needs
|
||||
Inputs map[string]interface{}
|
||||
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type Needs struct {
|
||||
Outputs map[string]string `json:"outputs"`
|
||||
Result string `json:"result"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@@ -146,10 +154,17 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
|
||||
switch strings.ToLower(variableNode.Name) {
|
||||
case "github":
|
||||
return impl.env.Github, nil
|
||||
case "gitea": // compatible with Gitea
|
||||
return impl.env.Github, nil
|
||||
case "env":
|
||||
return impl.env.Env, nil
|
||||
case "job":
|
||||
return impl.env.Job, nil
|
||||
case "jobs":
|
||||
if impl.env.Jobs == nil {
|
||||
return nil, fmt.Errorf("Unavailable context: jobs")
|
||||
}
|
||||
return impl.env.Jobs, nil
|
||||
case "steps":
|
||||
return impl.env.Steps, nil
|
||||
case "runner":
|
||||
@@ -168,6 +183,8 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
|
||||
return math.Inf(1), nil
|
||||
case "nan":
|
||||
return math.NaN(), nil
|
||||
case "vars":
|
||||
return impl.env.Vars, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unavailable context: %s", variableNode.Name)
|
||||
}
|
||||
@@ -361,8 +378,16 @@ func (impl *interperterImpl) compareValues(leftValue reflect.Value, rightValue r
|
||||
|
||||
return impl.compareNumber(leftValue.Float(), rightValue.Float(), kind)
|
||||
|
||||
case reflect.Invalid:
|
||||
if rightValue.Kind() == reflect.Invalid {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// not possible situation - params are converted to the same type in code above
|
||||
return nil, fmt.Errorf("Compare params of Invalid type: left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("TODO: evaluateCompare not implemented! left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
return nil, fmt.Errorf("Compare not implemented for types: left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -69,6 +69,11 @@ func TestOperators(t *testing.T) {
|
||||
{`true || false`, true, "or", ""},
|
||||
{`fromJSON('{}') && true`, true, "and-boolean-object", ""},
|
||||
{`fromJSON('{}') || false`, make(map[string]interface{}), "or-boolean-object", ""},
|
||||
{"github.event.commits[0].author.username != github.event.commits[1].author.username", true, "property-comparison1", ""},
|
||||
{"github.event.commits[0].author.username1 != github.event.commits[1].author.username", true, "property-comparison2", ""},
|
||||
{"github.event.commits[0].author.username != github.event.commits[1].author.username1", true, "property-comparison3", ""},
|
||||
{"github.event.commits[0].author.username1 != github.event.commits[1].author.username2", true, "property-comparison4", ""},
|
||||
{"secrets != env", nil, "property-comparison5", "Compare not implemented for types: left: map, right: map"},
|
||||
}
|
||||
|
||||
env := &EvaluationEnvironment{
|
||||
@@ -555,6 +560,7 @@ func TestContexts(t *testing.T) {
|
||||
{"strategy.fail-fast", true, "strategy-context"},
|
||||
{"matrix.os", "Linux", "matrix-context"},
|
||||
{"needs.job-id.outputs.output-name", "value", "needs-context"},
|
||||
{"needs.job-id.result", "success", "needs-context"},
|
||||
{"inputs.name", "value", "inputs-context"},
|
||||
}
|
||||
|
||||
@@ -593,11 +599,12 @@ func TestContexts(t *testing.T) {
|
||||
Matrix: map[string]interface{}{
|
||||
"os": "Linux",
|
||||
},
|
||||
Needs: map[string]map[string]map[string]string{
|
||||
Needs: map[string]Needs{
|
||||
"job-id": {
|
||||
"outputs": {
|
||||
Outputs: map[string]string{
|
||||
"output-name": "value",
|
||||
},
|
||||
Result: "success",
|
||||
},
|
||||
},
|
||||
Inputs: map[string]interface{}{
|
||||
|
@@ -41,11 +41,12 @@ func NewInterpeter(
|
||||
jobs := run.Workflow.Jobs
|
||||
jobNeeds := run.Job().Needs()
|
||||
|
||||
using := map[string]map[string]map[string]string{}
|
||||
using := map[string]exprparser.Needs{}
|
||||
for _, need := range jobNeeds {
|
||||
if v, ok := jobs[need]; ok {
|
||||
using[need] = map[string]map[string]string{
|
||||
"outputs": v.Outputs,
|
||||
using[need] = exprparser.Needs{
|
||||
Outputs: v.Outputs,
|
||||
Result: v.Result,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -61,6 +62,8 @@ func NewInterpeter(
|
||||
Matrix: matrix,
|
||||
Needs: using,
|
||||
Inputs: nil, // not supported yet
|
||||
|
||||
Vars: nil,
|
||||
}
|
||||
|
||||
config := exprparser.Config{
|
||||
|
@@ -36,8 +36,17 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
}
|
||||
|
||||
var ret []*SingleWorkflow
|
||||
for id, job := range workflow.Jobs {
|
||||
for _, matrix := range getMatrixes(origin.GetJob(id)) {
|
||||
ids, jobs, err := workflow.jobs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid jobs: %w", err)
|
||||
}
|
||||
for i, id := range ids {
|
||||
job := jobs[i]
|
||||
matricxes, err := getMatrixes(origin.GetJob(id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getMatrixes: %w", err)
|
||||
}
|
||||
for _, matrix := range matricxes {
|
||||
job := job.Clone()
|
||||
if job.Name == "" {
|
||||
job.Name = id
|
||||
@@ -50,17 +59,18 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
runsOn[i] = evaluator.Interpolate(v)
|
||||
}
|
||||
job.RawRunsOn = encodeRunsOn(runsOn)
|
||||
job.EraseNeeds() // there will be only one job in SingleWorkflow, it cannot have needs
|
||||
ret = append(ret, &SingleWorkflow{
|
||||
swf := &SingleWorkflow{
|
||||
Name: workflow.Name,
|
||||
RawOn: workflow.RawOn,
|
||||
Env: workflow.Env,
|
||||
Jobs: map[string]*Job{id: job},
|
||||
Defaults: workflow.Defaults,
|
||||
})
|
||||
}
|
||||
if err := swf.SetJob(id, job); err != nil {
|
||||
return nil, fmt.Errorf("SetJob: %w", err)
|
||||
}
|
||||
ret = append(ret, swf)
|
||||
}
|
||||
}
|
||||
sortWorkflows(ret)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
@@ -83,12 +93,15 @@ type parseContext struct {
|
||||
|
||||
type ParseOption func(c *parseContext)
|
||||
|
||||
func getMatrixes(job *model.Job) []map[string]interface{} {
|
||||
ret := job.GetMatrixes()
|
||||
func getMatrixes(job *model.Job) ([]map[string]interface{}, error) {
|
||||
ret, err := job.GetMatrixes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetMatrixes: %w", err)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return matrixName(ret[i]) < matrixName(ret[j])
|
||||
})
|
||||
return ret
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func encodeMatrix(matrix map[string]interface{}) yaml.Node {
|
||||
@@ -135,19 +148,3 @@ func matrixName(m map[string]interface{}) string {
|
||||
|
||||
return fmt.Sprintf("(%s)", strings.Join(vs, ", "))
|
||||
}
|
||||
|
||||
func sortWorkflows(wfs []*SingleWorkflow) {
|
||||
sort.Slice(wfs, func(i, j int) bool {
|
||||
ki := ""
|
||||
for k := range wfs[i].Jobs {
|
||||
ki = k
|
||||
break
|
||||
}
|
||||
kj := ""
|
||||
for k := range wfs[j].Jobs {
|
||||
kj = k
|
||||
break
|
||||
}
|
||||
return ki < kj
|
||||
})
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -13,9 +11,6 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var f embed.FS
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -37,13 +32,26 @@ func TestParse(t *testing.T) {
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_with",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_secrets",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty_step",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
content, err := f.ReadFile(filepath.Join("testdata", tt.name+".in.yaml"))
|
||||
require.NoError(t, err)
|
||||
want, err := f.ReadFile(filepath.Join("testdata", tt.name+".out.yaml"))
|
||||
require.NoError(t, err)
|
||||
content := ReadTestdata(t, tt.name+".in.yaml")
|
||||
want := ReadTestdata(t, tt.name+".out.yaml")
|
||||
got, err := Parse(content, tt.options...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
@@ -57,7 +65,10 @@ func TestParse(t *testing.T) {
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
_ = encoder.Encode(v)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
id, job := v.Job()
|
||||
assert.NotEmpty(t, id)
|
||||
assert.NotNil(t, job)
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
|
@@ -12,17 +12,56 @@ type SingleWorkflow struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawOn yaml.Node `yaml:"on,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
Jobs map[string]*Job `yaml:"jobs,omitempty"`
|
||||
RawJobs yaml.Node `yaml:"jobs,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Job() (string, *Job) {
|
||||
for k, v := range w.Jobs {
|
||||
return k, v
|
||||
ids, jobs, _ := w.jobs()
|
||||
if len(ids) >= 1 {
|
||||
return ids[0], jobs[0]
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) jobs() ([]string, []*Job, error) {
|
||||
ids, jobs, err := parseMappingNode[*Job](&w.RawJobs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
steps := make([]*Step, 0, len(job.Steps))
|
||||
for _, s := range job.Steps {
|
||||
if s != nil {
|
||||
steps = append(steps, s)
|
||||
}
|
||||
}
|
||||
job.Steps = steps
|
||||
}
|
||||
|
||||
return ids, jobs, nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) SetJob(id string, job *Job) error {
|
||||
m := map[string]*Job{
|
||||
id: job,
|
||||
}
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node := yaml.Node{}
|
||||
if err := yaml.Unmarshal(out, &node); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(node.Content) != 1 || node.Content[0].Kind != yaml.MappingNode {
|
||||
return fmt.Errorf("can not set job: %q", out)
|
||||
}
|
||||
w.RawJobs = *node.Content[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Marshal() ([]byte, error) {
|
||||
return yaml.Marshal(w)
|
||||
}
|
||||
@@ -41,6 +80,8 @@ type Job struct {
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
Outputs map[string]string `yaml:"outputs,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
With map[string]interface{} `yaml:"with,omitempty"`
|
||||
RawSecrets yaml.Node `yaml:"secrets,omitempty"`
|
||||
}
|
||||
|
||||
func (j *Job) Clone() *Job {
|
||||
@@ -61,6 +102,8 @@ func (j *Job) Clone() *Job {
|
||||
Defaults: j.Defaults,
|
||||
Outputs: j.Outputs,
|
||||
Uses: j.Uses,
|
||||
With: j.With,
|
||||
RawSecrets: j.RawSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,8 +111,9 @@ func (j *Job) Needs() []string {
|
||||
return (&model.Job{RawNeeds: j.RawNeeds}).Needs()
|
||||
}
|
||||
|
||||
func (j *Job) EraseNeeds() {
|
||||
func (j *Job) EraseNeeds() *Job {
|
||||
j.RawNeeds = yaml.Node{}
|
||||
return j
|
||||
}
|
||||
|
||||
func (j *Job) RunsOn() []string {
|
||||
@@ -92,6 +136,9 @@ type Step struct {
|
||||
|
||||
// String gets the name of step
|
||||
func (s *Step) String() string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return (&model.Step{
|
||||
ID: s.ID,
|
||||
Name: s.Name,
|
||||
@@ -107,6 +154,7 @@ type ContainerSpec struct {
|
||||
Volumes []string `yaml:"volumes,omitempty"`
|
||||
Options string `yaml:"options,omitempty"`
|
||||
Credentials map[string]string `yaml:"credentials,omitempty"`
|
||||
Cmd []string `yaml:"cmd,omitempty"`
|
||||
}
|
||||
|
||||
type Strategy struct {
|
||||
@@ -126,7 +174,20 @@ type RunDefaults struct {
|
||||
|
||||
type Event struct {
|
||||
Name string
|
||||
Acts map[string][]string
|
||||
acts map[string][]string
|
||||
schedules []map[string]string
|
||||
}
|
||||
|
||||
func (evt *Event) IsSchedule() bool {
|
||||
return evt.schedules != nil
|
||||
}
|
||||
|
||||
func (evt *Event) Acts() map[string][]string {
|
||||
return evt.acts
|
||||
}
|
||||
|
||||
func (evt *Event) Schedules() []map[string]string {
|
||||
return evt.schedules
|
||||
}
|
||||
|
||||
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
@@ -157,23 +218,30 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
}
|
||||
return res, nil
|
||||
case yaml.MappingNode:
|
||||
var val map[string]interface{}
|
||||
err := rawOn.Decode(&val)
|
||||
events, triggers, err := parseMappingNode[interface{}](rawOn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for k, v := range val {
|
||||
res := make([]*Event, 0, len(events))
|
||||
for i, k := range events {
|
||||
v := triggers[i]
|
||||
if v == nil {
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
continue
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case []string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case map[string]interface{}:
|
||||
acts := make(map[string][]string, len(t))
|
||||
@@ -186,7 +254,10 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
case []interface{}:
|
||||
acts[act] = make([]string, len(b))
|
||||
for i, v := range b {
|
||||
acts[act][i] = v.(string)
|
||||
var ok bool
|
||||
if acts[act][i], ok = v.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
@@ -194,7 +265,29 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: acts,
|
||||
acts: acts,
|
||||
})
|
||||
case []interface{}:
|
||||
if k != "schedule" {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules := make([]map[string]string, len(t))
|
||||
for i, tt := range t {
|
||||
vv, ok := tt.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules[i] = make(map[string]string, len(vv))
|
||||
for k, vvv := range vv {
|
||||
var ok bool
|
||||
if schedules[i][k], ok = vvv.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
schedules: schedules,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
@@ -205,3 +298,36 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// parseMappingNode parse a mapping node and preserve order.
|
||||
func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return nil, nil, fmt.Errorf("input node is not a mapping node")
|
||||
}
|
||||
|
||||
var scalars []string
|
||||
var datas []T
|
||||
expectKey := true
|
||||
for _, item := range node.Content {
|
||||
if expectKey {
|
||||
if item.Kind != yaml.ScalarNode {
|
||||
return nil, nil, fmt.Errorf("not a valid scalar node: %v", item.Value)
|
||||
}
|
||||
scalars = append(scalars, item.Value)
|
||||
expectKey = false
|
||||
} else {
|
||||
var val T
|
||||
if err := item.Decode(&val); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
datas = append(datas, val)
|
||||
expectKey = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(scalars) != len(datas) {
|
||||
return nil, nil, fmt.Errorf("invalid definition of on: %v", node.Value)
|
||||
}
|
||||
|
||||
return scalars, datas, nil
|
||||
}
|
||||
|
@@ -6,7 +6,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseRawOn(t *testing.T) {
|
||||
@@ -47,7 +50,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"master",
|
||||
},
|
||||
@@ -60,7 +63,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "branch_protection_rule",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
@@ -74,7 +77,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "project",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
@@ -83,7 +86,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "milestone",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
"deleted",
|
||||
@@ -97,7 +100,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
@@ -113,7 +116,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
},
|
||||
@@ -121,7 +124,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
@@ -137,7 +140,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
"releases/**",
|
||||
@@ -151,7 +154,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"tags": {
|
||||
"v1.**",
|
||||
},
|
||||
@@ -170,6 +173,19 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "schedule",
|
||||
schedules: []map[string]string{
|
||||
{
|
||||
"cron": "20 6 * * *",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
@@ -182,3 +198,109 @@ func TestParseRawOn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleWorkflow_SetJob(t *testing.T) {
|
||||
t.Run("erase needs", func(t *testing.T) {
|
||||
content := ReadTestdata(t, "erase_needs.in.yaml")
|
||||
want := ReadTestdata(t, "erase_needs.out.yaml")
|
||||
swf, err := Parse(content)
|
||||
require.NoError(t, err)
|
||||
builder := &strings.Builder{}
|
||||
for _, v := range swf {
|
||||
id, job := v.Job()
|
||||
require.NoError(t, v.SetJob(id, job.EraseNeeds()))
|
||||
|
||||
if builder.Len() > 0 {
|
||||
builder.WriteString("---\n")
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMappingNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
scalars []string
|
||||
datas []interface{}
|
||||
}{
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
scalars: []string{"push"},
|
||||
datas: []interface {
|
||||
}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"master"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
|
||||
scalars: []string{"branch_protection_rule"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
scalars: []string{"project", "milestone"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
|
||||
scalars: []string{"pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"releases/**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
|
||||
scalars: []string{"push", "pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"main"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
scalars: []string{"schedule"},
|
||||
datas: []interface{}{
|
||||
[]interface{}{map[string]interface{}{
|
||||
"cron": "20 6 * * *",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
workflow, err := model.ReadWorkflow(strings.NewReader(test.input))
|
||||
assert.NoError(t, err)
|
||||
|
||||
scalars, datas, err := parseMappingNode[interface{}](&workflow.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, test.scalars, scalars, fmt.Sprintf("%#v", scalars))
|
||||
assert.EqualValues(t, test.datas, datas, fmt.Sprintf("%#v", datas))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
Normal file
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
||||
-
|
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
Normal file
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
Normal file
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: job1
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: [job1, job2]
|
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
Normal file
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
2
pkg/jobparser/testdata/has_needs.out.yaml
vendored
2
pkg/jobparser/testdata/has_needs.out.yaml
vendored
@@ -10,6 +10,7 @@ name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
needs: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
@@ -18,6 +19,7 @@ name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
needs: [job1, job2]
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
|
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
Normal file
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
Normal file
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
Normal file
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
Normal file
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
8
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
8
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
@@ -1,5 +1,9 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
@@ -12,3 +16,7 @@ jobs:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
aaa:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
|
16
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
16
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
@@ -1,4 +1,12 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
name: zzz
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
@@ -21,3 +29,11 @@ jobs:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
aaa:
|
||||
name: aaa
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
|
18
pkg/jobparser/testdata_test.go
Normal file
18
pkg/jobparser/testdata_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var testdata embed.FS
|
||||
|
||||
func ReadTestdata(t *testing.T, name string) []byte {
|
||||
content, err := testdata.ReadFile(filepath.Join("testdata", name))
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
@@ -20,7 +20,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
// Force input to lowercase for case insensitive comparison
|
||||
format := ActionRunsUsing(strings.ToLower(using))
|
||||
switch format {
|
||||
case ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite:
|
||||
case ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo:
|
||||
*a = format
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{
|
||||
@@ -28,6 +28,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
ActionRunsUsingDocker,
|
||||
ActionRunsUsingNode12,
|
||||
ActionRunsUsingNode16,
|
||||
ActionRunsUsingGo,
|
||||
}, format))
|
||||
}
|
||||
return nil
|
||||
|
@@ -3,6 +3,7 @@ package model
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
@@ -35,6 +36,9 @@ type GithubContext struct {
|
||||
RetentionDays string `json:"retention_days"`
|
||||
RunnerPerflog string `json:"runner_perflog"`
|
||||
RunnerTrackingID string `json:"runner_tracking_id"`
|
||||
ServerURL string `json:"server_url"`
|
||||
APIURL string `json:"api_url"`
|
||||
GraphQLURL string `json:"graphql_url"`
|
||||
}
|
||||
|
||||
func asString(v interface{}) string {
|
||||
@@ -89,26 +93,22 @@ func withDefaultBranch(ctx context.Context, b string, event map[string]interface
|
||||
var findGitRef = git.FindGitRef
|
||||
var findGitRevision = git.FindGitRevision
|
||||
|
||||
func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string, repoPath string) {
|
||||
func (ghc *GithubContext) SetRef(ctx context.Context, defaultBranch string, repoPath string) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows
|
||||
// https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
|
||||
switch ghc.EventName {
|
||||
case "pull_request_target":
|
||||
ghc.Ref = fmt.Sprintf("refs/heads/%s", ghc.BaseRef)
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha"))
|
||||
case "pull_request", "pull_request_review", "pull_request_review_comment":
|
||||
ghc.Ref = fmt.Sprintf("refs/pull/%.0f/merge", ghc.Event["number"])
|
||||
case "deployment", "deployment_status":
|
||||
ghc.Ref = asString(nestedMapLookup(ghc.Event, "deployment", "ref"))
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha"))
|
||||
case "release":
|
||||
ghc.Ref = asString(nestedMapLookup(ghc.Event, "release", "tag_name"))
|
||||
ghc.Ref = fmt.Sprintf("refs/tags/%s", asString(nestedMapLookup(ghc.Event, "release", "tag_name")))
|
||||
case "push", "create", "workflow_dispatch":
|
||||
ghc.Ref = asString(ghc.Event["ref"])
|
||||
if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted {
|
||||
ghc.Sha = asString(ghc.Event["after"])
|
||||
}
|
||||
default:
|
||||
defaultBranch := asString(nestedMapLookup(ghc.Event, "repository", "default_branch"))
|
||||
if defaultBranch != "" {
|
||||
@@ -136,6 +136,23 @@ func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string
|
||||
ghc.Ref = fmt.Sprintf("refs/heads/%s", asString(nestedMapLookup(ghc.Event, "repository", "default_branch")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetSha(ctx context.Context, repoPath string) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows
|
||||
// https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
|
||||
switch ghc.EventName {
|
||||
case "pull_request_target":
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha"))
|
||||
case "deployment", "deployment_status":
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha"))
|
||||
case "push", "create", "workflow_dispatch":
|
||||
if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted {
|
||||
ghc.Sha = asString(ghc.Event["after"])
|
||||
}
|
||||
}
|
||||
|
||||
if ghc.Sha == "" {
|
||||
_, sha, err := findGitRevision(ctx, repoPath)
|
||||
@@ -146,3 +163,51 @@ func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInstance string, remoteName string, repoPath string) {
|
||||
if ghc.Repository == "" {
|
||||
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
|
||||
if err != nil {
|
||||
common.Logger(ctx).Warningf("unable to get git repo: %v", err)
|
||||
return
|
||||
}
|
||||
ghc.Repository = repo
|
||||
}
|
||||
ghc.RepositoryOwner = strings.Split(ghc.Repository, "/")[0]
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetRefTypeAndName() {
|
||||
var refType, refName string
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
if strings.HasPrefix(ghc.Ref, "refs/tags/") {
|
||||
refType = "tag"
|
||||
refName = ghc.Ref[len("refs/tags/"):]
|
||||
} else if strings.HasPrefix(ghc.Ref, "refs/heads/") {
|
||||
refType = "branch"
|
||||
refName = ghc.Ref[len("refs/heads/"):]
|
||||
} else if strings.HasPrefix(ghc.Ref, "refs/pull/") {
|
||||
refType = ""
|
||||
refName = ghc.Ref[len("refs/pull/"):]
|
||||
}
|
||||
|
||||
if ghc.RefType == "" {
|
||||
ghc.RefType = refType
|
||||
}
|
||||
|
||||
if ghc.RefName == "" {
|
||||
ghc.RefName = refName
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetBaseAndHeadRef() {
|
||||
if ghc.EventName == "pull_request" || ghc.EventName == "pull_request_target" {
|
||||
if ghc.BaseRef == "" {
|
||||
ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref"))
|
||||
}
|
||||
|
||||
if ghc.HeadRef == "" {
|
||||
ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetRefAndSha(t *testing.T) {
|
||||
func TestSetRef(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
oldFindGitRef := findGitRef
|
||||
@@ -29,19 +29,13 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
eventName string
|
||||
event map[string]interface{}
|
||||
ref string
|
||||
sha string
|
||||
refName string
|
||||
}{
|
||||
{
|
||||
eventName: "pull_request_target",
|
||||
event: map[string]interface{}{
|
||||
"pull_request": map[string]interface{}{
|
||||
"base": map[string]interface{}{
|
||||
"sha": "pr-base-sha",
|
||||
},
|
||||
},
|
||||
},
|
||||
event: map[string]interface{}{},
|
||||
ref: "refs/heads/master",
|
||||
sha: "pr-base-sha",
|
||||
refName: "master",
|
||||
},
|
||||
{
|
||||
eventName: "pull_request",
|
||||
@@ -49,18 +43,17 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
"number": 1234.,
|
||||
},
|
||||
ref: "refs/pull/1234/merge",
|
||||
sha: "1234fakesha",
|
||||
refName: "1234/merge",
|
||||
},
|
||||
{
|
||||
eventName: "deployment",
|
||||
event: map[string]interface{}{
|
||||
"deployment": map[string]interface{}{
|
||||
"ref": "refs/heads/somebranch",
|
||||
"sha": "deployment-sha",
|
||||
},
|
||||
},
|
||||
ref: "refs/heads/somebranch",
|
||||
sha: "deployment-sha",
|
||||
refName: "somebranch",
|
||||
},
|
||||
{
|
||||
eventName: "release",
|
||||
@@ -69,18 +62,16 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
"tag_name": "v1.0.0",
|
||||
},
|
||||
},
|
||||
ref: "v1.0.0",
|
||||
sha: "1234fakesha",
|
||||
ref: "refs/tags/v1.0.0",
|
||||
refName: "v1.0.0",
|
||||
},
|
||||
{
|
||||
eventName: "push",
|
||||
event: map[string]interface{}{
|
||||
"ref": "refs/heads/somebranch",
|
||||
"after": "push-sha",
|
||||
"deleted": false,
|
||||
},
|
||||
ref: "refs/heads/somebranch",
|
||||
sha: "push-sha",
|
||||
refName: "somebranch",
|
||||
},
|
||||
{
|
||||
eventName: "unknown",
|
||||
@@ -90,13 +81,13 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
},
|
||||
},
|
||||
ref: "refs/heads/main",
|
||||
sha: "1234fakesha",
|
||||
refName: "main",
|
||||
},
|
||||
{
|
||||
eventName: "no-event",
|
||||
event: map[string]interface{}{},
|
||||
ref: "refs/heads/master",
|
||||
sha: "1234fakesha",
|
||||
refName: "master",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -108,10 +99,11 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
Event: table.event,
|
||||
}
|
||||
|
||||
ghc.SetRefAndSha(context.Background(), "main", "/some/dir")
|
||||
ghc.SetRef(context.Background(), "main", "/some/dir")
|
||||
ghc.SetRefTypeAndName()
|
||||
|
||||
assert.Equal(t, table.ref, ghc.Ref)
|
||||
assert.Equal(t, table.sha, ghc.Sha)
|
||||
assert.Equal(t, table.refName, ghc.RefName)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -125,9 +117,96 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
Event: map[string]interface{}{},
|
||||
}
|
||||
|
||||
ghc.SetRefAndSha(context.Background(), "", "/some/dir")
|
||||
ghc.SetRef(context.Background(), "", "/some/dir")
|
||||
|
||||
assert.Equal(t, "refs/heads/master", ghc.Ref)
|
||||
assert.Equal(t, "1234fakesha", ghc.Sha)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetSha(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
oldFindGitRef := findGitRef
|
||||
oldFindGitRevision := findGitRevision
|
||||
defer func() { findGitRef = oldFindGitRef }()
|
||||
defer func() { findGitRevision = oldFindGitRevision }()
|
||||
|
||||
findGitRef = func(ctx context.Context, file string) (string, error) {
|
||||
return "refs/heads/master", nil
|
||||
}
|
||||
|
||||
findGitRevision = func(ctx context.Context, file string) (string, string, error) {
|
||||
return "", "1234fakesha", nil
|
||||
}
|
||||
|
||||
tables := []struct {
|
||||
eventName string
|
||||
event map[string]interface{}
|
||||
sha string
|
||||
}{
|
||||
{
|
||||
eventName: "pull_request_target",
|
||||
event: map[string]interface{}{
|
||||
"pull_request": map[string]interface{}{
|
||||
"base": map[string]interface{}{
|
||||
"sha": "pr-base-sha",
|
||||
},
|
||||
},
|
||||
},
|
||||
sha: "pr-base-sha",
|
||||
},
|
||||
{
|
||||
eventName: "pull_request",
|
||||
event: map[string]interface{}{
|
||||
"number": 1234.,
|
||||
},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "deployment",
|
||||
event: map[string]interface{}{
|
||||
"deployment": map[string]interface{}{
|
||||
"sha": "deployment-sha",
|
||||
},
|
||||
},
|
||||
sha: "deployment-sha",
|
||||
},
|
||||
{
|
||||
eventName: "release",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "push",
|
||||
event: map[string]interface{}{
|
||||
"after": "push-sha",
|
||||
"deleted": false,
|
||||
},
|
||||
sha: "push-sha",
|
||||
},
|
||||
{
|
||||
eventName: "unknown",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "no-event",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
t.Run(table.eventName, func(t *testing.T) {
|
||||
ghc := &GithubContext{
|
||||
EventName: table.eventName,
|
||||
BaseRef: "master",
|
||||
Event: table.event,
|
||||
}
|
||||
|
||||
ghc.SetSha(context.Background(), "/some/dir")
|
||||
|
||||
assert.Equal(t, table.sha, ghc.Sha)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -15,9 +15,9 @@ import (
|
||||
|
||||
// WorkflowPlanner contains methods for creating plans
|
||||
type WorkflowPlanner interface {
|
||||
PlanEvent(eventName string) *Plan
|
||||
PlanJob(jobName string) *Plan
|
||||
PlanAll() *Plan
|
||||
PlanEvent(eventName string) (*Plan, error)
|
||||
PlanJob(jobName string) (*Plan, error)
|
||||
PlanAll() (*Plan, error)
|
||||
GetEvents() []string
|
||||
}
|
||||
|
||||
@@ -176,47 +176,76 @@ type workflowPlanner struct {
|
||||
}
|
||||
|
||||
// PlanEvent builds a new list of runs to execute in parallel for an event name
|
||||
func (wp *workflowPlanner) PlanEvent(eventName string) *Plan {
|
||||
func (wp *workflowPlanner) PlanEvent(eventName string) (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no events found for workflow: %s", eventName)
|
||||
log.Debug("no workflows found by planner")
|
||||
return plan, nil
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
for _, e := range w.On() {
|
||||
events := w.On()
|
||||
if len(events) == 0 {
|
||||
log.Debugf("no events found for workflow: %s", w.File)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
if e == eventName {
|
||||
plan.mergeStages(createStages(w, w.GetJobIDs()...))
|
||||
stages, err := createStages(w, w.GetJobIDs()...)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
}
|
||||
}
|
||||
return plan
|
||||
}
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// PlanJob builds a new run to execute in parallel for a job name
|
||||
func (wp *workflowPlanner) PlanJob(jobName string) *Plan {
|
||||
func (wp *workflowPlanner) PlanJob(jobName string) (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no jobs found for workflow: %s", jobName)
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
plan.mergeStages(createStages(w, jobName))
|
||||
stages, err := createStages(w, jobName)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
return plan
|
||||
}
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// PlanAll builds a new run to execute in parallel all
|
||||
func (wp *workflowPlanner) PlanAll() *Plan {
|
||||
func (wp *workflowPlanner) PlanAll() (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no jobs found for loaded workflows")
|
||||
log.Debug("no workflows found by planner")
|
||||
return plan, nil
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
plan.mergeStages(createStages(w, w.GetJobIDs()...))
|
||||
stages, err := createStages(w, w.GetJobIDs()...)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
}
|
||||
|
||||
return plan
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// GetEvents gets all the events in the workflows file
|
||||
@@ -289,7 +318,7 @@ func (p *Plan) mergeStages(stages []*Stage) {
|
||||
p.Stages = newStages
|
||||
}
|
||||
|
||||
func createStages(w *Workflow, jobIDs ...string) []*Stage {
|
||||
func createStages(w *Workflow, jobIDs ...string) ([]*Stage, error) {
|
||||
// first, build a list of all the necessary jobs to run, and their dependencies
|
||||
jobDependencies := make(map[string][]string)
|
||||
for len(jobIDs) > 0 {
|
||||
@@ -306,6 +335,8 @@ func createStages(w *Workflow, jobIDs ...string) []*Stage {
|
||||
jobIDs = newJobIDs
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// next, build an execution graph
|
||||
stages := make([]*Stage, 0)
|
||||
for len(jobDependencies) > 0 {
|
||||
@@ -321,12 +352,16 @@ func createStages(w *Workflow, jobIDs ...string) []*Stage {
|
||||
}
|
||||
}
|
||||
if len(stage.Runs) == 0 {
|
||||
log.Fatalf("Unable to build dependency graph!")
|
||||
return nil, fmt.Errorf("unable to build dependency graph for %s (%s)", w.Name, w.File)
|
||||
}
|
||||
stages = append(stages, stage)
|
||||
}
|
||||
|
||||
return stages
|
||||
if len(stages) == 0 && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stages, nil
|
||||
}
|
||||
|
||||
// return true iff all strings in srcList exist in at least one of the stages
|
||||
|
@@ -42,5 +42,4 @@ type StepResult struct {
|
||||
Outputs map[string]string `json:"outputs"`
|
||||
Conclusion stepStatus `json:"conclusion"`
|
||||
Outcome stepStatus `json:"outcome"`
|
||||
State map[string]string
|
||||
}
|
||||
|
@@ -58,9 +58,8 @@ func (w *Workflow) On() []string {
|
||||
func (w *Workflow) OnEvent(event string) interface{} {
|
||||
if w.RawOn.Kind == yaml.MappingNode {
|
||||
var val map[string]interface{}
|
||||
err := w.RawOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
return val[event]
|
||||
}
|
||||
@@ -109,16 +108,55 @@ func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
|
||||
}
|
||||
|
||||
var val map[string]yaml.Node
|
||||
err := w.RawOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config WorkflowDispatch
|
||||
node := val["workflow_dispatch"]
|
||||
err = node.Decode(&config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(node, &config) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &config
|
||||
}
|
||||
|
||||
type WorkflowCallInput struct {
|
||||
Description string `yaml:"description"`
|
||||
Required bool `yaml:"required"`
|
||||
Default string `yaml:"default"`
|
||||
Type string `yaml:"type"`
|
||||
}
|
||||
|
||||
type WorkflowCallOutput struct {
|
||||
Description string `yaml:"description"`
|
||||
Value string `yaml:"value"`
|
||||
}
|
||||
|
||||
type WorkflowCall struct {
|
||||
Inputs map[string]WorkflowCallInput `yaml:"inputs"`
|
||||
Outputs map[string]WorkflowCallOutput `yaml:"outputs"`
|
||||
}
|
||||
|
||||
type WorkflowCallResult struct {
|
||||
Outputs map[string]string
|
||||
}
|
||||
|
||||
func (w *Workflow) WorkflowCallConfig() *WorkflowCall {
|
||||
if w.RawOn.Kind != yaml.MappingNode {
|
||||
// The callers expect for "on: workflow_call" and "on: [ workflow_call ]" a non nil return value
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
var val map[string]yaml.Node
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
var config WorkflowCall
|
||||
node := val["workflow_call"]
|
||||
if !decodeNode(node, &config) {
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
return &config
|
||||
@@ -139,6 +177,8 @@ type Job struct {
|
||||
Defaults Defaults `yaml:"defaults"`
|
||||
Outputs map[string]string `yaml:"outputs"`
|
||||
Uses string `yaml:"uses"`
|
||||
With map[string]interface{} `yaml:"with"`
|
||||
RawSecrets yaml.Node `yaml:"secrets"`
|
||||
Result string
|
||||
}
|
||||
|
||||
@@ -193,21 +233,45 @@ func (s Strategy) GetFailFast() bool {
|
||||
return failFast
|
||||
}
|
||||
|
||||
func (j *Job) InheritSecrets() bool {
|
||||
if j.RawSecrets.Kind != yaml.ScalarNode {
|
||||
return false
|
||||
}
|
||||
|
||||
var val string
|
||||
if !decodeNode(j.RawSecrets, &val) {
|
||||
return false
|
||||
}
|
||||
|
||||
return val == "inherit"
|
||||
}
|
||||
|
||||
func (j *Job) Secrets() map[string]string {
|
||||
if j.RawSecrets.Kind != yaml.MappingNode {
|
||||
return nil
|
||||
}
|
||||
|
||||
var val map[string]string
|
||||
if !decodeNode(j.RawSecrets, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
// Container details for the job
|
||||
func (j *Job) Container() *ContainerSpec {
|
||||
var val *ContainerSpec
|
||||
switch j.RawContainer.Kind {
|
||||
case yaml.ScalarNode:
|
||||
val = new(ContainerSpec)
|
||||
err := j.RawContainer.Decode(&val.Image)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawContainer, &val.Image) {
|
||||
return nil
|
||||
}
|
||||
case yaml.MappingNode:
|
||||
val = new(ContainerSpec)
|
||||
err := j.RawContainer.Decode(val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawContainer, val) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return val
|
||||
@@ -218,16 +282,14 @@ func (j *Job) Needs() []string {
|
||||
switch j.RawNeeds.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := j.RawNeeds.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawNeeds, &val) {
|
||||
return nil
|
||||
}
|
||||
return []string{val}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
err := j.RawNeeds.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawNeeds, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -239,16 +301,14 @@ func (j *Job) RunsOn() []string {
|
||||
switch j.RawRunsOn.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := j.RawRunsOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
return nil
|
||||
}
|
||||
return []string{val}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
err := j.RawRunsOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -258,8 +318,8 @@ func (j *Job) RunsOn() []string {
|
||||
func environment(yml yaml.Node) map[string]string {
|
||||
env := make(map[string]string)
|
||||
if yml.Kind == yaml.MappingNode {
|
||||
if err := yml.Decode(&env); err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(yml, &env) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return env
|
||||
@@ -274,8 +334,8 @@ func (j *Job) Environment() map[string]string {
|
||||
func (j *Job) Matrix() map[string][]interface{} {
|
||||
if j.Strategy.RawMatrix.Kind == yaml.MappingNode {
|
||||
var val map[string][]interface{}
|
||||
if err := j.Strategy.RawMatrix.Decode(&val); err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.Strategy.RawMatrix, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -286,7 +346,7 @@ func (j *Job) Matrix() map[string][]interface{} {
|
||||
// It skips includes and hard fails excludes for non-existing keys
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
func (j *Job) GetMatrixes() ([]map[string]interface{}, error) {
|
||||
matrixes := make([]map[string]interface{}, 0)
|
||||
if j.Strategy != nil {
|
||||
j.Strategy.FailFast = j.Strategy.GetFailFast()
|
||||
@@ -337,7 +397,7 @@ func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
excludes = append(excludes, e)
|
||||
} else {
|
||||
// We fail completely here because that's what GitHub does for non-existing matrix keys, fail on exclude, silent skip on include
|
||||
log.Fatalf("The workflow is not valid. Matrix exclude key '%s' does not match any key within the matrix", k)
|
||||
return nil, fmt.Errorf("the workflow is not valid. Matrix exclude key %q does not match any key within the matrix", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -382,7 +442,7 @@ func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
} else {
|
||||
matrixes = append(matrixes, make(map[string]interface{}))
|
||||
}
|
||||
return matrixes
|
||||
return matrixes, nil
|
||||
}
|
||||
|
||||
func commonKeysMatch(a map[string]interface{}, b map[string]interface{}) bool {
|
||||
@@ -434,8 +494,12 @@ func (j JobType) String() string {
|
||||
func (j *Job) Type() JobType {
|
||||
if strings.HasPrefix(j.Uses, "./.github/workflows") && (strings.HasSuffix(j.Uses, ".yml") || strings.HasSuffix(j.Uses, ".yaml")) {
|
||||
return JobTypeReusableWorkflowLocal
|
||||
} else if strings.HasPrefix(j.Uses, "./.gitea/workflows") && (strings.HasSuffix(j.Uses, ".yml") || strings.HasSuffix(j.Uses, ".yaml")) {
|
||||
return JobTypeReusableWorkflowLocal
|
||||
} else if !strings.HasPrefix(j.Uses, "./") && strings.Contains(j.Uses, ".github/workflows") && (strings.Contains(j.Uses, ".yml@") || strings.Contains(j.Uses, ".yaml@")) {
|
||||
return JobTypeReusableWorkflowRemote
|
||||
} else if !strings.HasPrefix(j.Uses, "./") && strings.Contains(j.Uses, ".gitea/workflows") && (strings.Contains(j.Uses, ".yml@") || strings.Contains(j.Uses, ".yaml@")) {
|
||||
return JobTypeReusableWorkflowRemote
|
||||
}
|
||||
return JobTypeDefault
|
||||
}
|
||||
@@ -452,6 +516,9 @@ type ContainerSpec struct {
|
||||
Args string
|
||||
Name string
|
||||
Reuse bool
|
||||
|
||||
// Gitea specific
|
||||
Cmd []string `yaml:"cmd"`
|
||||
}
|
||||
|
||||
// Step is the structure of one step in a job
|
||||
@@ -483,16 +550,8 @@ func (s *Step) String() string {
|
||||
}
|
||||
|
||||
// Environments returns string-based key=value map for a step
|
||||
// Note: all keys are uppercase
|
||||
func (s *Step) Environment() map[string]string {
|
||||
env := environment(s.Env)
|
||||
|
||||
for k, v := range env {
|
||||
delete(env, k)
|
||||
env[strings.ToUpper(k)] = v
|
||||
}
|
||||
|
||||
return env
|
||||
return environment(s.Env)
|
||||
}
|
||||
|
||||
// GetEnv gets the env for a step
|
||||
@@ -631,3 +690,17 @@ func (w *Workflow) GetJobIDs() []string {
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
var OnDecodeNodeError = func(node yaml.Node, out interface{}, err error) {
|
||||
log.Fatalf("Failed to decode node %v into %T: %v", node, out, err)
|
||||
}
|
||||
|
||||
func decodeNode(node yaml.Node, out interface{}) bool {
|
||||
if err := node.Decode(out); err != nil {
|
||||
if OnDecodeNodeError != nil {
|
||||
OnDecodeNodeError(node, out, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@@ -323,7 +323,8 @@ func TestReadWorkflow_Strategy(t *testing.T) {
|
||||
w, err := NewWorkflowPlanner("testdata/strategy/push.yml", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p := w.PlanJob("strategy-only-max-parallel")
|
||||
p, err := w.PlanJob("strategy-only-max-parallel")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(p.Stages), 1)
|
||||
assert.Equal(t, len(p.Stages[0].Runs), 1)
|
||||
@@ -331,25 +332,33 @@ func TestReadWorkflow_Strategy(t *testing.T) {
|
||||
wf := p.Stages[0].Runs[0].Workflow
|
||||
|
||||
job := wf.Jobs["strategy-only-max-parallel"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err := job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 2)
|
||||
assert.Equal(t, job.Strategy.FailFast, true)
|
||||
|
||||
job = wf.Jobs["strategy-only-fail-fast"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 4)
|
||||
assert.Equal(t, job.Strategy.FailFast, false)
|
||||
|
||||
job = wf.Jobs["strategy-no-matrix"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 2)
|
||||
assert.Equal(t, job.Strategy.FailFast, false)
|
||||
|
||||
job = wf.Jobs["strategy-all"]
|
||||
assert.Equal(t, job.GetMatrixes(),
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes,
|
||||
[]map[string]interface{}{
|
||||
{"datacenter": "site-c", "node-version": "14.x", "site": "staging"},
|
||||
{"datacenter": "site-c", "node-version": "16.x", "site": "staging"},
|
||||
|
@@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
@@ -29,10 +30,9 @@ type actionStep interface {
|
||||
|
||||
type readAction func(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error)
|
||||
|
||||
type (
|
||||
actionYamlReader func(filename string) (io.Reader, io.Closer, error)
|
||||
fileWriter func(filename string, data []byte, perm fs.FileMode) error
|
||||
)
|
||||
type actionYamlReader func(filename string) (io.Reader, io.Closer, error)
|
||||
|
||||
type fileWriter func(filename string, data []byte, perm fs.FileMode) error
|
||||
|
||||
type runAction func(step actionStep, actionDir string, remoteAction *remoteAction) common.Executor
|
||||
|
||||
@@ -156,6 +156,8 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Main)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
case model.ActionRunsUsingDocker:
|
||||
location := actionLocation
|
||||
@@ -173,15 +175,24 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
containerArgs := []string{"go", "run", path.Join(containerActionDir, action.Runs.Main)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Main)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Main}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key must be one of: %v, got %s", []string{
|
||||
model.ActionRunsUsingDocker,
|
||||
model.ActionRunsUsingNode12,
|
||||
model.ActionRunsUsingNode16,
|
||||
model.ActionRunsUsingComposite,
|
||||
model.ActionRunsUsingGo,
|
||||
}, action.Runs.Using))
|
||||
}
|
||||
}
|
||||
@@ -228,14 +239,17 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
|
||||
var prepImage common.Executor
|
||||
var image string
|
||||
forcePull := false
|
||||
if strings.HasPrefix(action.Runs.Image, "docker://") {
|
||||
image = strings.TrimPrefix(action.Runs.Image, "docker://")
|
||||
// Apply forcePull only for prebuild docker images
|
||||
forcePull = rc.Config.ForcePull
|
||||
} else {
|
||||
// "-dockeraction" enshures that "./", "./test " won't get converted to "act-:latest", "act-test-:latest" which are invalid docker image names
|
||||
image = fmt.Sprintf("%s-dockeraction:%s", regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-"), "latest")
|
||||
image = fmt.Sprintf("act-%s", strings.TrimLeft(image, "-"))
|
||||
image = strings.ToLower(image)
|
||||
contextDir := filepath.Join(basedir, action.Runs.Main)
|
||||
contextDir, fileName := filepath.Split(filepath.Join(basedir, action.Runs.Image))
|
||||
|
||||
anyArchExists, err := container.ImageExistsLocally(ctx, image, "any")
|
||||
if err != nil {
|
||||
@@ -265,6 +279,7 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
}
|
||||
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
ContextDir: contextDir,
|
||||
Dockerfile: fileName,
|
||||
ImageTag: image,
|
||||
Container: actionContainer,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
@@ -296,7 +311,7 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
stepContainer := newStepContainer(ctx, step, image, cmd, entrypoint)
|
||||
return common.NewPipelineExecutor(
|
||||
prepImage,
|
||||
stepContainer.Pull(rc.Config.ForcePull),
|
||||
stepContainer.Pull(forcePull),
|
||||
stepContainer.Remove().IfBool(!rc.Config.ReuseContainers),
|
||||
stepContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||
stepContainer.Start(true),
|
||||
@@ -320,13 +335,13 @@ func evalDockerArgs(ctx context.Context, step step, action *model.Action, cmd *[
|
||||
inputs[k] = eval.Interpolate(ctx, v)
|
||||
}
|
||||
}
|
||||
mergeIntoMap(step.getEnv(), inputs)
|
||||
mergeIntoMap(step, step.getEnv(), inputs)
|
||||
|
||||
stepEE := rc.NewStepExpressionEvaluator(ctx, step)
|
||||
for i, v := range *cmd {
|
||||
(*cmd)[i] = stepEE.Interpolate(ctx, v)
|
||||
}
|
||||
mergeIntoMap(step.getEnv(), action.Runs.Env)
|
||||
mergeIntoMap(step, step.getEnv(), action.Runs.Env)
|
||||
|
||||
ee := rc.NewStepExpressionEvaluator(ctx, step)
|
||||
for k, v := range *step.getEnv() {
|
||||
@@ -357,7 +372,10 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
|
||||
networkMode := fmt.Sprintf("container:%s", rc.jobContainerName())
|
||||
if rc.IsHostEnv(ctx) {
|
||||
networkMode = "default"
|
||||
}
|
||||
stepContainer := container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
@@ -368,22 +386,23 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
|
||||
NetworkMode: networkMode,
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.Config.ContainerOptions,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
|
||||
func populateEnvsFromSavedState(env *map[string]string, step actionStep, rc *RunContext) {
|
||||
stepResult := rc.StepResults[step.getStepModel().ID]
|
||||
if stepResult != nil {
|
||||
for name, value := range stepResult.State {
|
||||
state, ok := rc.IntraActionState[step.getStepModel().ID]
|
||||
if ok {
|
||||
for name, value := range state {
|
||||
envName := fmt.Sprintf("STATE_%s", name)
|
||||
(*env)[envName] = value
|
||||
}
|
||||
@@ -450,7 +469,8 @@ func hasPreStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Pre != "")
|
||||
}
|
||||
}
|
||||
@@ -473,7 +493,7 @@ func runPreStep(step actionStep) common.Executor {
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), strings.ReplaceAll(stepModel.Uses, "/", "-"))
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
@@ -495,6 +515,8 @@ func runPreStep(step actionStep) common.Executor {
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Pre)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
case model.ActionRunsUsingComposite:
|
||||
@@ -502,8 +524,48 @@ func runPreStep(step actionStep) common.Executor {
|
||||
step.getCompositeRunContext(ctx)
|
||||
}
|
||||
|
||||
return step.getCompositeSteps().pre(ctx)
|
||||
if steps := step.getCompositeSteps(); steps != nil && steps.pre != nil {
|
||||
return steps.pre(ctx)
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
// defaults in pre steps were missing, however provided inputs are available
|
||||
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
|
||||
// todo: refactor into step
|
||||
var actionDir string
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
}
|
||||
|
||||
actionLocation := ""
|
||||
if actionPath != "" {
|
||||
actionLocation = path.Join(actionDir, actionPath)
|
||||
} else {
|
||||
actionLocation = actionDir
|
||||
}
|
||||
|
||||
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
|
||||
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Pre)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Pre}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -540,7 +602,8 @@ func hasPostStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Post != "")
|
||||
}
|
||||
}
|
||||
@@ -559,7 +622,7 @@ func runPostStep(step actionStep) common.Executor {
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), strings.ReplaceAll(stepModel.Uses, "/", "-"))
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
@@ -582,6 +645,8 @@ func runPostStep(step actionStep) common.Executor {
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Post)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
case model.ActionRunsUsingComposite:
|
||||
@@ -589,7 +654,23 @@ func runPostStep(step actionStep) common.Executor {
|
||||
return err
|
||||
}
|
||||
|
||||
return step.getCompositeSteps().post(ctx)
|
||||
if steps := step.getCompositeSteps(); steps != nil && steps.post != nil {
|
||||
return steps.post(ctx)
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
populateEnvsFromSavedState(step.getEnv(), step, rc)
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Post)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Post}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
|
||||
default:
|
||||
return nil
|
||||
|
@@ -66,6 +66,7 @@ func newCompositeRunContext(ctx context.Context, parent *RunContext, step action
|
||||
JobContainer: parent.JobContainer,
|
||||
ActionPath: actionPath,
|
||||
Env: env,
|
||||
GlobalEnv: parent.GlobalEnv,
|
||||
Masks: parent.Masks,
|
||||
ExtraPath: parent.ExtraPath,
|
||||
Parent: parent,
|
||||
@@ -85,6 +86,10 @@ func execAsComposite(step actionStep) common.Executor {
|
||||
|
||||
steps := step.getCompositeSteps()
|
||||
|
||||
if steps == nil || steps.main == nil {
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
}
|
||||
|
||||
ctx = WithCompositeLogger(ctx, &compositeRC.Masks)
|
||||
|
||||
err := steps.main(ctx)
|
||||
@@ -99,6 +104,16 @@ func execAsComposite(step actionStep) common.Executor {
|
||||
|
||||
rc.Masks = append(rc.Masks, compositeRC.Masks...)
|
||||
rc.ExtraPath = compositeRC.ExtraPath
|
||||
// compositeRC.Env is dirty, contains INPUT_ and merged step env, only rely on compositeRC.GlobalEnv
|
||||
mergeIntoMap := mergeIntoMapCaseSensitive
|
||||
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
mergeIntoMap = mergeIntoMapCaseInsensitive
|
||||
}
|
||||
if rc.GlobalEnv == nil {
|
||||
rc.GlobalEnv = map[string]string{}
|
||||
}
|
||||
mergeIntoMap(rc.GlobalEnv, compositeRC.GlobalEnv)
|
||||
mergeIntoMap(rc.Env, compositeRC.GlobalEnv)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@@ -201,10 +201,11 @@ func TestActionRunner(t *testing.T) {
|
||||
},
|
||||
CurrentStep: "post-step",
|
||||
StepResults: map[string]*model.StepResult{
|
||||
"step": {
|
||||
State: map[string]string{
|
||||
"name": "state value",
|
||||
"step": {},
|
||||
},
|
||||
IntraActionState: map[string]map[string]string{
|
||||
"step": {
|
||||
"name": "state value",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
65
pkg/runner/command.go
Executable file → Normal file
65
pkg/runner/command.go
Executable file → Normal file
@@ -16,22 +16,27 @@ func init() {
|
||||
commandPatternADO = regexp.MustCompile("^##\\[([^ ]+)( (.+))?]([^\r\n]*)[\r\n]+$")
|
||||
}
|
||||
|
||||
func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
logger := common.Logger(ctx)
|
||||
resumeCommand := ""
|
||||
return func(line string) bool {
|
||||
var command string
|
||||
var kvPairs map[string]string
|
||||
var arg string
|
||||
func tryParseRawActionCommand(line string) (command string, kvPairs map[string]string, arg string, ok bool) {
|
||||
if m := commandPatternGA.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ",")
|
||||
arg = m[4]
|
||||
ok = true
|
||||
} else if m := commandPatternADO.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ";")
|
||||
arg = m[4]
|
||||
} else {
|
||||
ok = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
logger := common.Logger(ctx)
|
||||
resumeCommand := ""
|
||||
return func(line string) bool {
|
||||
command, kvPairs, arg, ok := tryParseRawActionCommand(line)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -66,20 +71,35 @@ func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
case "save-state":
|
||||
logger.Infof(" \U0001f4be %s", line)
|
||||
rc.saveState(ctx, kvPairs, arg)
|
||||
case "add-matcher":
|
||||
logger.Infof(" \U00002753 add-matcher %s", arg)
|
||||
default:
|
||||
logger.Infof(" \U00002753 %s", line)
|
||||
}
|
||||
|
||||
return false
|
||||
// return true to let gitea's logger handle these special outputs also
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) setEnv(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
common.Logger(ctx).Infof(" \U00002699 ::set-env:: %s=%s", kvPairs["name"], arg)
|
||||
name := kvPairs["name"]
|
||||
common.Logger(ctx).Infof(" \U00002699 ::set-env:: %s=%s", name, arg)
|
||||
if rc.Env == nil {
|
||||
rc.Env = make(map[string]string)
|
||||
}
|
||||
rc.Env[kvPairs["name"]] = arg
|
||||
if rc.GlobalEnv == nil {
|
||||
rc.GlobalEnv = map[string]string{}
|
||||
}
|
||||
newenv := map[string]string{
|
||||
name: arg,
|
||||
}
|
||||
mergeIntoMap := mergeIntoMapCaseSensitive
|
||||
if rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
mergeIntoMap = mergeIntoMapCaseInsensitive
|
||||
}
|
||||
mergeIntoMap(rc.Env, newenv)
|
||||
mergeIntoMap(rc.GlobalEnv, newenv)
|
||||
}
|
||||
func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -101,7 +121,13 @@ func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string,
|
||||
}
|
||||
func (rc *RunContext) addPath(ctx context.Context, arg string) {
|
||||
common.Logger(ctx).Infof(" \U00002699 ::add-path:: %s", arg)
|
||||
rc.ExtraPath = append(rc.ExtraPath, arg)
|
||||
extraPath := []string{arg}
|
||||
for _, v := range rc.ExtraPath {
|
||||
if v != arg {
|
||||
extraPath = append(extraPath, v)
|
||||
}
|
||||
}
|
||||
rc.ExtraPath = extraPath
|
||||
}
|
||||
|
||||
func parseKeyValuePairs(kvPairs string, separator string) map[string]string {
|
||||
@@ -147,13 +173,16 @@ func unescapeKvPairs(kvPairs map[string]string) map[string]string {
|
||||
}
|
||||
|
||||
func (rc *RunContext) saveState(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
if rc.CurrentStep != "" {
|
||||
stepResult := rc.StepResults[rc.CurrentStep]
|
||||
if stepResult != nil {
|
||||
if stepResult.State == nil {
|
||||
stepResult.State = map[string]string{}
|
||||
stepID := rc.CurrentStep
|
||||
if stepID != "" {
|
||||
if rc.IntraActionState == nil {
|
||||
rc.IntraActionState = map[string]map[string]string{}
|
||||
}
|
||||
stepResult.State[kvPairs["name"]] = arg
|
||||
state, ok := rc.IntraActionState[stepID]
|
||||
if !ok {
|
||||
state = map[string]string{}
|
||||
rc.IntraActionState[stepID] = state
|
||||
}
|
||||
state[kvPairs["name"]] = arg
|
||||
}
|
||||
}
|
||||
|
@@ -64,7 +64,7 @@ func TestAddpath(t *testing.T) {
|
||||
a.Equal("/zoo", rc.ExtraPath[0])
|
||||
|
||||
handler("::add-path::/boo\n")
|
||||
a.Equal("/boo", rc.ExtraPath[1])
|
||||
a.Equal("/boo", rc.ExtraPath[0])
|
||||
}
|
||||
|
||||
func TestStopCommands(t *testing.T) {
|
||||
@@ -102,7 +102,7 @@ func TestAddpathADO(t *testing.T) {
|
||||
a.Equal("/zoo", rc.ExtraPath[0])
|
||||
|
||||
handler("##[add-path]/boo\n")
|
||||
a.Equal("/boo", rc.ExtraPath[1])
|
||||
a.Equal("/boo", rc.ExtraPath[0])
|
||||
}
|
||||
|
||||
func TestAddmask(t *testing.T) {
|
||||
@@ -177,11 +177,7 @@ func TestAddmaskUsemask(t *testing.T) {
|
||||
func TestSaveState(t *testing.T) {
|
||||
rc := &RunContext{
|
||||
CurrentStep: "step",
|
||||
StepResults: map[string]*model.StepResult{
|
||||
"step": {
|
||||
State: map[string]string{},
|
||||
},
|
||||
},
|
||||
StepResults: map[string]*model.StepResult{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -189,5 +185,5 @@ func TestSaveState(t *testing.T) {
|
||||
handler := rc.commandHandler(ctx)
|
||||
handler("::save-state name=state-name::state-value\n")
|
||||
|
||||
assert.Equal(t, "state-value", rc.StepResults["step"].State["state-name"])
|
||||
assert.Equal(t, "state-value", rc.IntraActionState["step"]["state-name"])
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
@@ -49,11 +50,6 @@ func (cm *containerMock) UpdateFromImageEnv(env *map[string]string) common.Execu
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
args := cm.Called(env)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) Copy(destPath string, files ...*container.FileEntry) common.Executor {
|
||||
args := cm.Called(destPath, files)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
@@ -63,7 +59,17 @@ func (cm *containerMock) CopyDir(destPath string, srcPath string, useGitIgnore b
|
||||
args := cm.Called(destPath, srcPath, useGitIgnore)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) Exec(command []string, env map[string]string, user, workdir string) common.Executor {
|
||||
args := cm.Called(command, env, user, workdir)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) {
|
||||
args := cm.Called(ctx, srcPath)
|
||||
err, hasErr := args.Get(1).(error)
|
||||
if !hasErr {
|
||||
err = nil
|
||||
}
|
||||
return args.Get(0).(io.ReadCloser), err
|
||||
}
|
||||
|
@@ -21,8 +21,14 @@ type ExpressionEvaluator interface {
|
||||
|
||||
// NewExpressionEvaluator creates a new evaluator
|
||||
func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEvaluator {
|
||||
return rc.NewExpressionEvaluatorWithEnv(ctx, rc.GetEnv())
|
||||
}
|
||||
|
||||
func (rc *RunContext) NewExpressionEvaluatorWithEnv(ctx context.Context, env map[string]string) ExpressionEvaluator {
|
||||
var workflowCallResult map[string]*model.WorkflowCallResult
|
||||
|
||||
// todo: cleanup EvaluationEnvironment creation
|
||||
using := make(map[string]map[string]map[string]string)
|
||||
using := make(map[string]exprparser.Needs)
|
||||
strategy := make(map[string]interface{})
|
||||
if rc.Run != nil {
|
||||
job := rc.Run.Job()
|
||||
@@ -35,8 +41,26 @@ func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEval
|
||||
jobNeeds := rc.Run.Job().Needs()
|
||||
|
||||
for _, needs := range jobNeeds {
|
||||
using[needs] = map[string]map[string]string{
|
||||
"outputs": jobs[needs].Outputs,
|
||||
using[needs] = exprparser.Needs{
|
||||
Outputs: jobs[needs].Outputs,
|
||||
Result: jobs[needs].Result,
|
||||
}
|
||||
}
|
||||
|
||||
// only setup jobs context in case of workflow_call
|
||||
// and existing expression evaluator (this means, jobs are at
|
||||
// least ready to run)
|
||||
if rc.caller != nil && rc.ExprEval != nil {
|
||||
workflowCallResult = map[string]*model.WorkflowCallResult{}
|
||||
|
||||
for jobName, job := range jobs {
|
||||
result := model.WorkflowCallResult{
|
||||
Outputs: map[string]string{},
|
||||
}
|
||||
for k, v := range job.Outputs {
|
||||
result.Outputs[k] = v
|
||||
}
|
||||
workflowCallResult[jobName] = &result
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -46,16 +70,19 @@ func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEval
|
||||
|
||||
ee := &exprparser.EvaluationEnvironment{
|
||||
Github: ghc,
|
||||
Env: rc.GetEnv(),
|
||||
Env: env,
|
||||
Job: rc.getJobContext(),
|
||||
Jobs: &workflowCallResult,
|
||||
// todo: should be unavailable
|
||||
// but required to interpolate/evaluate the step outputs on the job
|
||||
Steps: rc.getStepsContext(),
|
||||
Secrets: rc.Config.Secrets,
|
||||
Secrets: getWorkflowSecrets(ctx, rc),
|
||||
Strategy: strategy,
|
||||
Matrix: rc.Matrix,
|
||||
Needs: using,
|
||||
Inputs: inputs,
|
||||
|
||||
Vars: rc.getVarsContext(),
|
||||
}
|
||||
if rc.JobContainer != nil {
|
||||
ee.Runner = rc.JobContainer.GetRunnerContext(ctx)
|
||||
@@ -82,10 +109,11 @@ func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step)
|
||||
jobs := rc.Run.Workflow.Jobs
|
||||
jobNeeds := rc.Run.Job().Needs()
|
||||
|
||||
using := make(map[string]map[string]map[string]string)
|
||||
using := make(map[string]exprparser.Needs)
|
||||
for _, needs := range jobNeeds {
|
||||
using[needs] = map[string]map[string]string{
|
||||
"outputs": jobs[needs].Outputs,
|
||||
using[needs] = exprparser.Needs{
|
||||
Outputs: jobs[needs].Outputs,
|
||||
Result: jobs[needs].Result,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,13 +125,15 @@ func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step)
|
||||
Env: *step.getEnv(),
|
||||
Job: rc.getJobContext(),
|
||||
Steps: rc.getStepsContext(),
|
||||
Secrets: rc.Config.Secrets,
|
||||
Secrets: getWorkflowSecrets(ctx, rc),
|
||||
Strategy: strategy,
|
||||
Matrix: rc.Matrix,
|
||||
Needs: using,
|
||||
// todo: should be unavailable
|
||||
// but required to interpolate/evaluate the inputs in actions/composite
|
||||
Inputs: inputs,
|
||||
|
||||
Vars: rc.getVarsContext(),
|
||||
}
|
||||
if rc.JobContainer != nil {
|
||||
ee.Runner = rc.JobContainer.GetRunnerContext(ctx)
|
||||
@@ -311,6 +341,8 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str
|
||||
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]interface{} {
|
||||
inputs := map[string]interface{}{}
|
||||
|
||||
setupWorkflowInputs(ctx, &inputs, rc)
|
||||
|
||||
var env map[string]string
|
||||
if step != nil {
|
||||
env = *step.getEnv()
|
||||
@@ -343,3 +375,54 @@ func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *mod
|
||||
|
||||
return inputs
|
||||
}
|
||||
|
||||
func setupWorkflowInputs(ctx context.Context, inputs *map[string]interface{}, rc *RunContext) {
|
||||
if rc.caller != nil {
|
||||
config := rc.Run.Workflow.WorkflowCallConfig()
|
||||
|
||||
for name, input := range config.Inputs {
|
||||
value := rc.caller.runContext.Run.Job().With[name]
|
||||
if value != nil {
|
||||
if str, ok := value.(string); ok {
|
||||
// evaluate using the calling RunContext (outside)
|
||||
value = rc.caller.runContext.ExprEval.Interpolate(ctx, str)
|
||||
}
|
||||
}
|
||||
|
||||
if value == nil && config != nil && config.Inputs != nil {
|
||||
value = input.Default
|
||||
if rc.ExprEval != nil {
|
||||
if str, ok := value.(string); ok {
|
||||
// evaluate using the called RunContext (inside)
|
||||
value = rc.ExprEval.Interpolate(ctx, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(*inputs)[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getWorkflowSecrets(ctx context.Context, rc *RunContext) map[string]string {
|
||||
if rc.caller != nil {
|
||||
job := rc.caller.runContext.Run.Job()
|
||||
secrets := job.Secrets()
|
||||
|
||||
if secrets == nil && job.InheritSecrets() {
|
||||
secrets = rc.caller.runContext.Config.Secrets
|
||||
}
|
||||
|
||||
if secrets == nil {
|
||||
secrets = map[string]string{}
|
||||
}
|
||||
|
||||
for k, v := range secrets {
|
||||
secrets[k] = rc.caller.runContext.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
|
||||
return secrets
|
||||
}
|
||||
|
||||
return rc.Config.Secrets
|
||||
}
|
||||
|
@@ -70,7 +70,19 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
|
||||
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, step.pre()))
|
||||
preExec := step.pre()
|
||||
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
preErr := preExec(ctx)
|
||||
if preErr != nil {
|
||||
logger.Errorf("%v", preErr)
|
||||
common.SetJobError(ctx, preErr)
|
||||
} else if ctx.Err() != nil {
|
||||
logger.Errorf("%v", ctx.Err())
|
||||
common.SetJobError(ctx, ctx.Err())
|
||||
}
|
||||
return preErr
|
||||
}))
|
||||
|
||||
stepExec := step.main()
|
||||
steps = append(steps, useStepLogger(rc, stepModel, stepStageMain, func(ctx context.Context) error {
|
||||
@@ -96,21 +108,32 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
}
|
||||
|
||||
postExecutor = postExecutor.Finally(func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
jobError := common.JobError(ctx)
|
||||
if jobError != nil {
|
||||
info.result("failure")
|
||||
logger.WithField("jobResult", "failure").Infof("\U0001F3C1 Job failed")
|
||||
} else {
|
||||
err := info.stopContainer()(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.result("success")
|
||||
logger.WithField("jobResult", "success").Infof("\U0001F3C1 Job succeeded")
|
||||
var err error
|
||||
if rc.Config.AutoRemove || jobError == nil {
|
||||
// always allow 1 min for stopping and removing the runner, even if we were cancelled
|
||||
ctx, cancel := context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
logger := common.Logger(ctx)
|
||||
logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
logger.Infof("Cleaning up container for job %s", rc.JobName)
|
||||
err = info.stopContainer()(ctx)
|
||||
|
||||
logger.Infof("Cleaning up network for job %s", rc.JobName)
|
||||
networkName := fmt.Sprintf("%s-network", rc.jobContainerName())
|
||||
if err := rc.removeNetwork(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
setJobResult(ctx, info, rc, jobError == nil)
|
||||
setJobOutputs(ctx, rc)
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
pipeline := make([]common.Executor, 0)
|
||||
@@ -123,7 +146,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
if ctx.Err() == context.Canceled {
|
||||
// in case of an aborted run, we still should execute the
|
||||
// post steps to allow cleanup.
|
||||
ctx, cancel = context.WithTimeout(WithJobLogger(context.Background(), rc.Run.JobID, rc.String(), rc.Config, &rc.Masks, rc.Matrix), 5*time.Minute)
|
||||
ctx, cancel = context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), 5*time.Minute)
|
||||
defer cancel()
|
||||
}
|
||||
return postExecutor(ctx)
|
||||
@@ -132,6 +155,49 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
Finally(info.closeContainer()))
|
||||
}
|
||||
|
||||
func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success bool) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
jobResult := "success"
|
||||
// we have only one result for a whole matrix build, so we need
|
||||
// to keep an existing result state if we run a matrix
|
||||
if len(info.matrix()) > 0 && rc.Run.Job().Result != "" {
|
||||
jobResult = rc.Run.Job().Result
|
||||
}
|
||||
|
||||
if !success {
|
||||
jobResult = "failure"
|
||||
}
|
||||
|
||||
info.result(jobResult)
|
||||
if rc.caller != nil {
|
||||
// set reusable workflow job result
|
||||
rc.caller.runContext.result(jobResult)
|
||||
}
|
||||
|
||||
jobResultMessage := "succeeded"
|
||||
if jobResult != "success" {
|
||||
jobResultMessage = "failed"
|
||||
}
|
||||
|
||||
logger.WithField("jobResult", jobResult).Infof("\U0001F3C1 Job %s", jobResultMessage)
|
||||
}
|
||||
|
||||
func setJobOutputs(ctx context.Context, rc *RunContext) {
|
||||
if rc.caller != nil {
|
||||
// map outputs for reusable workflows
|
||||
callerOutputs := make(map[string]string)
|
||||
|
||||
ee := rc.NewExpressionEvaluator(ctx)
|
||||
|
||||
for k, v := range rc.Run.Workflow.WorkflowCallConfig().Outputs {
|
||||
callerOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
|
||||
}
|
||||
|
||||
rc.caller.runContext.Run.Job().Outputs = callerOutputs
|
||||
}
|
||||
}
|
||||
|
||||
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
|
||||
|
@@ -15,15 +15,15 @@ import (
|
||||
|
||||
func TestJobExecutor(t *testing.T) {
|
||||
tables := []TestJobFileInfo{
|
||||
{workdir, "uses-and-run-in-one-step", "push", "Invalid run/uses syntax for job:test step:Test", platforms},
|
||||
{workdir, "uses-github-empty", "push", "Expected format {org}/{repo}[/path]@ref", platforms},
|
||||
{workdir, "uses-github-noref", "push", "Expected format {org}/{repo}[/path]@ref", platforms},
|
||||
{workdir, "uses-github-root", "push", "", platforms},
|
||||
{workdir, "uses-github-path", "push", "", platforms},
|
||||
{workdir, "uses-docker-url", "push", "", platforms},
|
||||
{workdir, "uses-github-full-sha", "push", "", platforms},
|
||||
{workdir, "uses-github-short-sha", "push", "Unable to resolve action `actions/hello-world-docker-action@b136eb8`, the provided ref `b136eb8` is the shortened version of a commit SHA, which is not supported. Please use the full commit SHA `b136eb8894c5cb1dd5807da824be97ccdf9b5423` instead", platforms},
|
||||
{workdir, "job-nil-step", "push", "invalid Step 0: missing run or uses key", platforms},
|
||||
{workdir, "uses-and-run-in-one-step", "push", "Invalid run/uses syntax for job:test step:Test", platforms, secrets},
|
||||
{workdir, "uses-github-empty", "push", "Expected format {org}/{repo}[/path]@ref", platforms, secrets},
|
||||
{workdir, "uses-github-noref", "push", "Expected format {org}/{repo}[/path]@ref", platforms, secrets},
|
||||
{workdir, "uses-github-root", "push", "", platforms, secrets},
|
||||
{workdir, "uses-github-path", "push", "", platforms, secrets},
|
||||
{workdir, "uses-docker-url", "push", "", platforms, secrets},
|
||||
{workdir, "uses-github-full-sha", "push", "", platforms, secrets},
|
||||
{workdir, "uses-github-short-sha", "push", "Unable to resolve action `actions/hello-world-docker-action@b136eb8`, the provided ref `b136eb8` is the shortened version of a commit SHA, which is not supported. Please use the full commit SHA `b136eb8894c5cb1dd5807da824be97ccdf9b5423` instead", platforms, secrets},
|
||||
{workdir, "job-nil-step", "push", "invalid Step 0: missing run or uses key", platforms, secrets},
|
||||
}
|
||||
// These tests are sufficient to only check syntax.
|
||||
ctx := common.WithDryrun(context.Background(), true)
|
||||
|
@@ -57,34 +57,59 @@ func WithMasks(ctx context.Context, masks *[]string) context.Context {
|
||||
return context.WithValue(ctx, masksContextKeyVal, masks)
|
||||
}
|
||||
|
||||
type JobLoggerFactory interface {
|
||||
WithJobLogger() *logrus.Logger
|
||||
}
|
||||
|
||||
type jobLoggerFactoryContextKey string
|
||||
|
||||
var jobLoggerFactoryContextKeyVal = (jobLoggerFactoryContextKey)("jobloggerkey")
|
||||
|
||||
func WithJobLoggerFactory(ctx context.Context, factory JobLoggerFactory) context.Context {
|
||||
return context.WithValue(ctx, jobLoggerFactoryContextKeyVal, factory)
|
||||
}
|
||||
|
||||
// WithJobLogger attaches a new logger to context that is aware of steps
|
||||
func WithJobLogger(ctx context.Context, jobID string, jobName string, config *Config, masks *[]string, matrix map[string]interface{}) context.Context {
|
||||
mux.Lock()
|
||||
defer mux.Unlock()
|
||||
|
||||
var formatter logrus.Formatter
|
||||
if config.JSONLogger {
|
||||
formatter = &jobLogJSONFormatter{
|
||||
formatter: &logrus.JSONFormatter{},
|
||||
masker: valueMasker(config.InsecureSecrets, config.Secrets),
|
||||
}
|
||||
} else {
|
||||
formatter = &jobLogFormatter{
|
||||
color: colors[nextColor%len(colors)],
|
||||
masker: valueMasker(config.InsecureSecrets, config.Secrets),
|
||||
}
|
||||
}
|
||||
|
||||
nextColor++
|
||||
ctx = WithMasks(ctx, masks)
|
||||
|
||||
logger := logrus.New()
|
||||
var logger *logrus.Logger
|
||||
if jobLoggerFactory, ok := ctx.Value(jobLoggerFactoryContextKeyVal).(JobLoggerFactory); ok && jobLoggerFactory != nil {
|
||||
logger = jobLoggerFactory.WithJobLogger()
|
||||
} else {
|
||||
var formatter logrus.Formatter
|
||||
if config.JSONLogger {
|
||||
formatter = &logrus.JSONFormatter{}
|
||||
} else {
|
||||
mux.Lock()
|
||||
defer mux.Unlock()
|
||||
nextColor++
|
||||
formatter = &jobLogFormatter{
|
||||
color: colors[nextColor%len(colors)],
|
||||
}
|
||||
}
|
||||
|
||||
logger = logrus.New()
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.SetLevel(logrus.GetLevel())
|
||||
logger.SetFormatter(formatter)
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if hook := common.LoggerHook(ctx); hook != nil {
|
||||
logger.AddHook(hook)
|
||||
}
|
||||
logger.SetFormatter(formatter)
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.SetLevel(logrus.TraceLevel) // to be aware of steps
|
||||
if config.JobLoggerLevel != nil {
|
||||
logger.SetLevel(*config.JobLoggerLevel)
|
||||
} else {
|
||||
logger.SetLevel(logrus.TraceLevel)
|
||||
}
|
||||
}
|
||||
|
||||
logger.SetFormatter(&maskedFormatter{
|
||||
Formatter: logger.Formatter,
|
||||
masker: valueMasker(config.InsecureSecrets, config.Secrets),
|
||||
})
|
||||
rtn := logger.WithFields(logrus.Fields{
|
||||
"job": jobName,
|
||||
"jobID": jobID,
|
||||
@@ -153,16 +178,22 @@ func valueMasker(insecureSecrets bool, secrets map[string]string) entryProcessor
|
||||
}
|
||||
}
|
||||
|
||||
type maskedFormatter struct {
|
||||
logrus.Formatter
|
||||
masker entryProcessor
|
||||
}
|
||||
|
||||
func (f *maskedFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return f.Formatter.Format(f.masker(entry))
|
||||
}
|
||||
|
||||
type jobLogFormatter struct {
|
||||
color int
|
||||
masker entryProcessor
|
||||
}
|
||||
|
||||
func (f *jobLogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
entry = f.masker(entry)
|
||||
|
||||
if f.isColored(entry) {
|
||||
f.printColored(b, entry)
|
||||
} else {
|
||||
@@ -229,12 +260,3 @@ func checkIfTerminal(w io.Writer) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type jobLogJSONFormatter struct {
|
||||
masker entryProcessor
|
||||
formatter *logrus.JSONFormatter
|
||||
}
|
||||
|
||||
func (f *jobLogJSONFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return f.formatter.Format(f.masker(entry))
|
||||
}
|
||||
|
180
pkg/runner/reusable_workflow.go
Normal file
180
pkg/runner/reusable_workflow.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
)
|
||||
|
||||
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||
// ./.gitea/workflows/wf.yml -> .gitea/workflows/wf.yml
|
||||
trimmedUses := strings.TrimPrefix(rc.Run.Job().Uses, "./")
|
||||
// uses string format is {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}
|
||||
uses := fmt.Sprintf("%s/%s@%s", rc.Config.PresetGitHubContext.Repository, trimmedUses, rc.Config.PresetGitHubContext.Sha)
|
||||
|
||||
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(uses)
|
||||
if remoteReusableWorkflow == nil {
|
||||
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
|
||||
}
|
||||
remoteReusableWorkflow.URL = rc.Config.GitHubInstance
|
||||
|
||||
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(uses))
|
||||
|
||||
// If the repository is private, we need a token to clone it
|
||||
token := rc.Config.GetToken()
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||
)
|
||||
}
|
||||
|
||||
func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||
uses := rc.Run.Job().Uses
|
||||
|
||||
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(uses)
|
||||
if remoteReusableWorkflow == nil {
|
||||
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
|
||||
}
|
||||
remoteReusableWorkflow.URL = rc.Config.GitHubInstance
|
||||
|
||||
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(uses))
|
||||
|
||||
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
|
||||
token := ""
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
executorLock sync.Mutex
|
||||
)
|
||||
|
||||
func newMutexExecutor(executor common.Executor) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
executorLock.Lock()
|
||||
defer executorLock.Unlock()
|
||||
|
||||
return executor(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkflow, targetDirectory, token string) common.Executor {
|
||||
return common.NewConditionalExecutor(
|
||||
func(ctx context.Context) bool {
|
||||
_, err := os.Stat(targetDirectory)
|
||||
notExists := errors.Is(err, fs.ErrNotExist)
|
||||
return notExists
|
||||
},
|
||||
git.NewGitCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: remoteReusableWorkflow.CloneURL(),
|
||||
Ref: remoteReusableWorkflow.Ref,
|
||||
Dir: targetDirectory,
|
||||
Token: token,
|
||||
}),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
func newReusableWorkflowExecutor(rc *RunContext, directory string, workflow string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
planner, err := model.NewWorkflowPlanner(path.Join(directory, workflow), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plan, err := planner.PlanEvent("workflow_call")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runner, err := NewReusableWorkflowRunner(rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runner.NewPlanExecutor(plan)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func NewReusableWorkflowRunner(rc *RunContext) (Runner, error) {
|
||||
runner := &runnerImpl{
|
||||
config: rc.Config,
|
||||
eventJSON: rc.EventJSON,
|
||||
caller: &caller{
|
||||
runContext: rc,
|
||||
},
|
||||
}
|
||||
|
||||
return runner.configure()
|
||||
}
|
||||
|
||||
type remoteReusableWorkflow struct {
|
||||
URL string
|
||||
Org string
|
||||
Repo string
|
||||
Filename string
|
||||
Ref string
|
||||
|
||||
GitPlatform string
|
||||
}
|
||||
|
||||
func (r *remoteReusableWorkflow) CloneURL() string {
|
||||
// In Gitea, r.URL always has the protocol prefix, we don't need to add extra prefix in this case.
|
||||
if strings.HasPrefix(r.URL, "http://") || strings.HasPrefix(r.URL, "https://") {
|
||||
return fmt.Sprintf("%s/%s/%s", r.URL, r.Org, r.Repo)
|
||||
}
|
||||
return fmt.Sprintf("https://%s/%s/%s", r.URL, r.Org, r.Repo)
|
||||
}
|
||||
|
||||
func (r *remoteReusableWorkflow) FilePath() string {
|
||||
return fmt.Sprintf("./.%s/workflows/%s", r.GitPlatform, r.Filename)
|
||||
}
|
||||
|
||||
func newRemoteReusableWorkflowWithPlat(uses string) *remoteReusableWorkflow {
|
||||
// GitHub docs:
|
||||
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses
|
||||
r := regexp.MustCompile(`^([^/]+)/([^/]+)/\.([^/]+)/workflows/([^@]+)@(.*)$`)
|
||||
matches := r.FindStringSubmatch(uses)
|
||||
if len(matches) != 6 {
|
||||
return nil
|
||||
}
|
||||
return &remoteReusableWorkflow{
|
||||
Org: matches[1],
|
||||
Repo: matches[2],
|
||||
GitPlatform: matches[3],
|
||||
Filename: matches[4],
|
||||
Ref: matches[5],
|
||||
}
|
||||
}
|
||||
|
||||
// deprecated: use newRemoteReusableWorkflowWithPlat
|
||||
func newRemoteReusableWorkflow(uses string) *remoteReusableWorkflow {
|
||||
// GitHub docs:
|
||||
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses
|
||||
r := regexp.MustCompile(`^([^/]+)/([^/]+)/.github/workflows/([^@]+)@(.*)$`)
|
||||
matches := r.FindStringSubmatch(uses)
|
||||
if len(matches) != 5 {
|
||||
return nil
|
||||
}
|
||||
return &remoteReusableWorkflow{
|
||||
Org: matches[1],
|
||||
Repo: matches[2],
|
||||
Filename: matches[3],
|
||||
Ref: matches[4],
|
||||
URL: "github.com",
|
||||
}
|
||||
}
|
@@ -1,12 +1,16 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -14,12 +18,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
@@ -33,17 +35,21 @@ type RunContext struct {
|
||||
Run *model.Run
|
||||
EventJSON string
|
||||
Env map[string]string
|
||||
GlobalEnv map[string]string // to pass env changes of GITHUB_ENV and set-env correctly, due to dirty Env field
|
||||
ExtraPath []string
|
||||
CurrentStep string
|
||||
StepResults map[string]*model.StepResult
|
||||
IntraActionState map[string]map[string]string
|
||||
ExprEval ExpressionEvaluator
|
||||
JobContainer container.ExecutionsEnvironment
|
||||
ServiceContainers []container.ExecutionsEnvironment
|
||||
OutputMappings map[MappableOutput]MappableOutput
|
||||
JobName string
|
||||
ActionPath string
|
||||
Parent *RunContext
|
||||
Masks []string
|
||||
cleanUpJobContainer common.Executor
|
||||
caller *caller // job calling this RunContext (reusable workflows)
|
||||
}
|
||||
|
||||
func (rc *RunContext) AddMask(mask string) {
|
||||
@@ -56,7 +62,13 @@ type MappableOutput struct {
|
||||
}
|
||||
|
||||
func (rc *RunContext) String() string {
|
||||
return fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name)
|
||||
name := fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name)
|
||||
if rc.caller != nil {
|
||||
// prefix the reusable workflow with the caller job
|
||||
// this is required to create unique container names
|
||||
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Run.JobID, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// GetEnv returns the env for the context
|
||||
@@ -78,6 +90,24 @@ func (rc *RunContext) jobContainerName() string {
|
||||
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
|
||||
}
|
||||
|
||||
func getDockerDaemonSocketMountPath(daemonPath string) string {
|
||||
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
|
||||
scheme := daemonPath[:protoIndex]
|
||||
if strings.EqualFold(scheme, "npipe") {
|
||||
// linux container mount on windows, use the default socket path of the VM / wsl2
|
||||
return "/var/run/docker.sock"
|
||||
} else if strings.EqualFold(scheme, "unix") {
|
||||
return daemonPath[protoIndex+3:]
|
||||
} else if strings.IndexFunc(scheme, func(r rune) bool {
|
||||
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
|
||||
}) == -1 {
|
||||
// unknown protocol use default
|
||||
return "/var/run/docker.sock"
|
||||
}
|
||||
}
|
||||
return daemonPath
|
||||
}
|
||||
|
||||
// Returns the binds and mounts for the container, resolving paths as appopriate
|
||||
func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
||||
name := rc.jobContainerName()
|
||||
@@ -86,8 +116,10 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
||||
rc.Config.ContainerDaemonSocket = "/var/run/docker.sock"
|
||||
}
|
||||
|
||||
binds := []string{
|
||||
fmt.Sprintf("%s:%s", rc.Config.ContainerDaemonSocket, "/var/run/docker.sock"),
|
||||
binds := []string{}
|
||||
if rc.Config.ContainerDaemonSocket != "-" {
|
||||
daemonPath := getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket)
|
||||
binds = append(binds, fmt.Sprintf("%s:%s", daemonPath, "/var/run/docker.sock"))
|
||||
}
|
||||
|
||||
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||
@@ -145,15 +177,15 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
||||
_, _ = rand.Read(randBytes)
|
||||
miscpath := filepath.Join(cacheDir, hex.EncodeToString(randBytes))
|
||||
actPath := filepath.Join(miscpath, "act")
|
||||
if err := os.MkdirAll(actPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(actPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
path := filepath.Join(miscpath, "hostexecutor")
|
||||
if err := os.MkdirAll(path, 0777); err != nil {
|
||||
if err := os.MkdirAll(path, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
runnerTmp := filepath.Join(miscpath, "tmp")
|
||||
if err := os.MkdirAll(runnerTmp, 0777); err != nil {
|
||||
if err := os.MkdirAll(runnerTmp, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
toolCache := filepath.Join(cacheDir, "tool_cache")
|
||||
@@ -169,29 +201,28 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
|
||||
StdOut: logWriter,
|
||||
}
|
||||
rc.cleanUpJobContainer = rc.JobContainer.Remove()
|
||||
rc.Env["RUNNER_TOOL_CACHE"] = toolCache
|
||||
rc.Env["RUNNER_OS"] = runtime.GOOS
|
||||
rc.Env["RUNNER_ARCH"] = runtime.GOARCH
|
||||
rc.Env["RUNNER_TEMP"] = runnerTmp
|
||||
for k, v := range rc.JobContainer.GetRunnerContext(ctx) {
|
||||
if v, ok := v.(string); ok {
|
||||
rc.Env[fmt.Sprintf("RUNNER_%s", strings.ToUpper(k))] = v
|
||||
}
|
||||
}
|
||||
for _, env := range os.Environ() {
|
||||
i := strings.Index(env, "=")
|
||||
if i > 0 {
|
||||
rc.Env[env[0:i]] = env[i+1:]
|
||||
if k, v, ok := strings.Cut(env, "="); ok {
|
||||
// don't override
|
||||
if _, ok := rc.Env[k]; !ok {
|
||||
rc.Env[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/event.json",
|
||||
Mode: 0644,
|
||||
Mode: 0o644,
|
||||
Body: rc.EventJSON,
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/envs.txt",
|
||||
Mode: 0666,
|
||||
Body: "",
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/paths.txt",
|
||||
Mode: 0666,
|
||||
Mode: 0o666,
|
||||
Body: "",
|
||||
}),
|
||||
)(ctx)
|
||||
@@ -226,10 +257,59 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx)))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions
|
||||
|
||||
ext := container.LinuxContainerEnvironmentExtensions{}
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
|
||||
// add service containers
|
||||
for name, spec := range rc.Run.Job().Services {
|
||||
// interpolate env
|
||||
interpolatedEnvs := make(map[string]string, len(spec.Env))
|
||||
for k, v := range spec.Env {
|
||||
interpolatedEnvs[k] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
envs := make([]string, 0, len(interpolatedEnvs))
|
||||
for k, v := range interpolatedEnvs {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
// interpolate cmd
|
||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||
for _, v := range spec.Cmd {
|
||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||
}
|
||||
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", name, err)
|
||||
}
|
||||
serviceContainerName := createSimpleContainerName(rc.jobContainerName(), name)
|
||||
c := container.NewContainer(&container.NewContainerInput{
|
||||
Name: serviceContainerName,
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: spec.Image,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Cmd: interpolatedCmd,
|
||||
Env: envs,
|
||||
Mounts: map[string]string{
|
||||
// TODO merge volumes
|
||||
name: ext.ToContainerPath(rc.Config.Workdir),
|
||||
"act-toolcache": "/toolcache",
|
||||
"act-actions": "/actions",
|
||||
},
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
Options: spec.Options,
|
||||
NetworkAliases: []string{name},
|
||||
})
|
||||
rc.ServiceContainers = append(rc.ServiceContainers, c)
|
||||
}
|
||||
|
||||
rc.cleanUpJobContainer = func(ctx context.Context) error {
|
||||
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
|
||||
return rc.JobContainer.Remove().
|
||||
@@ -263,36 +343,109 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
return errors.New("Failed to create job container")
|
||||
}
|
||||
|
||||
networkName := fmt.Sprintf("%s-network", rc.jobContainerName())
|
||||
return common.NewPipelineExecutor(
|
||||
rc.pullServicesImages(rc.Config.ForcePull),
|
||||
rc.JobContainer.Pull(rc.Config.ForcePull),
|
||||
rc.stopServiceContainers(),
|
||||
rc.stopJobContainer(),
|
||||
func(ctx context.Context) error {
|
||||
err := rc.removeNetwork(networkName)(ctx)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
},
|
||||
rc.createNetwork(networkName),
|
||||
rc.startServiceContainers(networkName),
|
||||
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||
rc.JobContainer.Start(false),
|
||||
rc.JobContainer.UpdateFromImageEnv(&rc.Env),
|
||||
rc.JobContainer.UpdateFromEnv("/etc/environment", &rc.Env),
|
||||
rc.JobContainer.ConnectToNetwork(networkName),
|
||||
rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{
|
||||
Name: "workflow/event.json",
|
||||
Mode: 0644,
|
||||
Mode: 0o644,
|
||||
Body: rc.EventJSON,
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/envs.txt",
|
||||
Mode: 0666,
|
||||
Body: "",
|
||||
}, &container.FileEntry{
|
||||
Name: "workflow/paths.txt",
|
||||
Mode: 0666,
|
||||
Mode: 0o666,
|
||||
Body: "",
|
||||
}),
|
||||
)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) createNetwork(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return container.NewDockerNetworkCreateExecutor(name)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) removeNetwork(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return container.NewDockerNetworkRemoveExecutor(name)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) execJobContainer(cmd []string, env map[string]string, user, workdir string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return rc.JobContainer.Exec(cmd, env, user, workdir)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) ApplyExtraPath(ctx context.Context, env *map[string]string) {
|
||||
if rc.ExtraPath != nil && len(rc.ExtraPath) > 0 {
|
||||
path := rc.JobContainer.GetPathVariableName()
|
||||
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
// On windows system Path and PATH could also be in the map
|
||||
for k := range *env {
|
||||
if strings.EqualFold(path, k) {
|
||||
path = k
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if (*env)[path] == "" {
|
||||
cenv := map[string]string{}
|
||||
var cpath string
|
||||
if err := rc.JobContainer.UpdateFromImageEnv(&cenv)(ctx); err == nil {
|
||||
if p, ok := cenv[path]; ok {
|
||||
cpath = p
|
||||
}
|
||||
}
|
||||
if len(cpath) == 0 {
|
||||
cpath = rc.JobContainer.DefaultPathVariable()
|
||||
}
|
||||
(*env)[path] = cpath
|
||||
}
|
||||
(*env)[path] = rc.JobContainer.JoinPathVariable(append(rc.ExtraPath, (*env)[path])...)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string) error {
|
||||
if common.Dryrun(ctx) {
|
||||
return nil
|
||||
}
|
||||
pathTar, err := rc.JobContainer.GetContainerArchive(ctx, githubEnvPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pathTar.Close()
|
||||
|
||||
reader := tar.NewReader(pathTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if len(line) > 0 {
|
||||
rc.addPath(ctx, line)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
|
||||
func (rc *RunContext) stopJobContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -303,6 +456,41 @@ func (rc *RunContext) stopJobContainer() common.Executor {
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
execs := []common.Executor{}
|
||||
for _, c := range rc.ServiceContainers {
|
||||
execs = append(execs, c.Pull(forcePull))
|
||||
}
|
||||
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) startServiceContainers(networkName string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
execs := []common.Executor{}
|
||||
for _, c := range rc.ServiceContainers {
|
||||
execs = append(execs, common.NewPipelineExecutor(
|
||||
c.Pull(false),
|
||||
c.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||
c.Start(false),
|
||||
c.ConnectToNetwork(networkName),
|
||||
))
|
||||
}
|
||||
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) stopServiceContainers() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
execs := []common.Executor{}
|
||||
for _, c := range rc.ServiceContainers {
|
||||
execs = append(execs, c.Remove())
|
||||
}
|
||||
return common.NewParallelExecutor(len(execs), execs...)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare the mounts and binds for the worker
|
||||
|
||||
// ActionCacheDir is for rc
|
||||
@@ -310,10 +498,11 @@ func (rc *RunContext) ActionCacheDir() string {
|
||||
var xdgCache string
|
||||
var ok bool
|
||||
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
|
||||
if home, err := homedir.Dir(); err == nil {
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
xdgCache = filepath.Join(home, ".cache")
|
||||
} else if xdgCache, err = filepath.Abs("."); err != nil {
|
||||
log.Fatal(err)
|
||||
// It's almost impossible to get here, so the temp dir is a good fallback
|
||||
xdgCache = os.TempDir()
|
||||
}
|
||||
}
|
||||
return filepath.Join(xdgCache, "act")
|
||||
@@ -335,14 +524,18 @@ func (rc *RunContext) interpolateOutputs() common.Executor {
|
||||
|
||||
func (rc *RunContext) startContainer() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
image := rc.platformImage(ctx)
|
||||
if strings.EqualFold(image, "-self-hosted") {
|
||||
if rc.IsHostEnv(ctx) {
|
||||
return rc.startHostEnvironment()(ctx)
|
||||
}
|
||||
return rc.startJobContainer()(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) IsHostEnv(ctx context.Context) bool {
|
||||
image := rc.platformImage(ctx)
|
||||
return strings.EqualFold(image, "-self-hosted")
|
||||
}
|
||||
|
||||
func (rc *RunContext) stopContainer() common.Executor {
|
||||
return rc.stopJobContainer()
|
||||
}
|
||||
@@ -370,16 +563,25 @@ func (rc *RunContext) steps() []*model.Step {
|
||||
|
||||
// Executor returns a pipeline executor for all the steps in the job
|
||||
func (rc *RunContext) Executor() common.Executor {
|
||||
var executor common.Executor
|
||||
|
||||
switch rc.Run.Job().Type() {
|
||||
case model.JobTypeDefault:
|
||||
executor = newJobExecutor(rc, &stepFactoryImpl{}, rc)
|
||||
case model.JobTypeReusableWorkflowLocal:
|
||||
executor = newLocalReusableWorkflowExecutor(rc)
|
||||
case model.JobTypeReusableWorkflowRemote:
|
||||
executor = newRemoteReusableWorkflowExecutor(rc)
|
||||
}
|
||||
|
||||
return func(ctx context.Context) error {
|
||||
isEnabled, err := rc.isEnabled(ctx)
|
||||
res, err := rc.isEnabled(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isEnabled {
|
||||
return newJobExecutor(rc, &stepFactoryImpl{}, rc)(ctx)
|
||||
if res {
|
||||
return executor(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -421,7 +623,7 @@ func (rc *RunContext) options(ctx context.Context) string {
|
||||
job := rc.Run.Job()
|
||||
c := job.Container()
|
||||
if c == nil {
|
||||
return ""
|
||||
return rc.Config.ContainerOptions
|
||||
}
|
||||
|
||||
return c.Options
|
||||
@@ -439,6 +641,10 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if job.Type() != model.JobTypeDefault {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
img := rc.platformImage(ctx)
|
||||
if img == "" {
|
||||
if job.RunsOn() == nil {
|
||||
@@ -466,26 +672,16 @@ func mergeMaps(maps ...map[string]string) map[string]string {
|
||||
|
||||
// deprecated: use createSimpleContainerName
|
||||
func createContainerName(parts ...string) string {
|
||||
name := make([]string, 0)
|
||||
name := strings.Join(parts, "-")
|
||||
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
partLen := (30 / len(parts)) - 1
|
||||
for i, part := range parts {
|
||||
if i == len(parts)-1 {
|
||||
name = append(name, pattern.ReplaceAllString(part, "-"))
|
||||
} else {
|
||||
// If any part has a '-<number>' on the end it is likely part of a matrix job.
|
||||
// Let's preserve the number to prevent clashes in container names.
|
||||
re := regexp.MustCompile("-[0-9]+$")
|
||||
num := re.FindStringSubmatch(part)
|
||||
if len(num) > 0 {
|
||||
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen-len(num[0])))
|
||||
name = append(name, num[0])
|
||||
} else {
|
||||
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen))
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(strings.Trim(strings.Join(name, "-"), "-"), "--", "-")
|
||||
name = pattern.ReplaceAllString(name, "-")
|
||||
name = strings.ReplaceAll(name, "--", "-")
|
||||
hash := sha256.Sum256([]byte(name))
|
||||
|
||||
// SHA256 is 64 hex characters. So trim name to 63 characters to make room for the hash and separator
|
||||
trimmedName := strings.Trim(trimToLen(name, 63), "-")
|
||||
|
||||
return fmt.Sprintf("%s-%x", trimmedName, hash)
|
||||
}
|
||||
|
||||
func createSimpleContainerName(parts ...string) string {
|
||||
@@ -531,6 +727,10 @@ func (rc *RunContext) getStepsContext() map[string]*model.StepResult {
|
||||
return rc.StepResults
|
||||
}
|
||||
|
||||
func (rc *RunContext) getVarsContext() map[string]string {
|
||||
return rc.Config.Vars
|
||||
}
|
||||
|
||||
func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext {
|
||||
logger := common.Logger(ctx)
|
||||
ghc := &model.GithubContext{
|
||||
@@ -542,11 +742,20 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
EventName: rc.Config.EventName,
|
||||
Action: rc.CurrentStep,
|
||||
Token: rc.Config.Token,
|
||||
Job: rc.Run.JobID,
|
||||
ActionPath: rc.ActionPath,
|
||||
RepositoryOwner: rc.Config.Env["GITHUB_REPOSITORY_OWNER"],
|
||||
RetentionDays: rc.Config.Env["GITHUB_RETENTION_DAYS"],
|
||||
RunnerPerflog: rc.Config.Env["RUNNER_PERFLOG"],
|
||||
RunnerTrackingID: rc.Config.Env["RUNNER_TRACKING_ID"],
|
||||
Repository: rc.Config.Env["GITHUB_REPOSITORY"],
|
||||
Ref: rc.Config.Env["GITHUB_REF"],
|
||||
Sha: rc.Config.Env["SHA_REF"],
|
||||
RefName: rc.Config.Env["GITHUB_REF_NAME"],
|
||||
RefType: rc.Config.Env["GITHUB_REF_TYPE"],
|
||||
BaseRef: rc.Config.Env["GITHUB_BASE_REF"],
|
||||
HeadRef: rc.Config.Env["GITHUB_HEAD_REF"],
|
||||
Workspace: rc.Config.Env["GITHUB_WORKSPACE"],
|
||||
}
|
||||
if rc.JobContainer != nil {
|
||||
ghc.EventPath = rc.JobContainer.GetActPath() + "/workflow/event.json"
|
||||
@@ -575,6 +784,7 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
ghc.Actor = "nektos/act"
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if preset := rc.Config.PresetGitHubContext; preset != nil {
|
||||
ghc.Event = preset.Event
|
||||
ghc.RunID = preset.RunID
|
||||
@@ -593,39 +803,46 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
ghc.RetentionDays = preset.RetentionDays
|
||||
return ghc
|
||||
}
|
||||
|
||||
repoPath := rc.Config.Workdir
|
||||
repo, err := git.FindGithubRepo(ctx, repoPath, rc.Config.GitHubInstance, rc.Config.RemoteName)
|
||||
if err != nil {
|
||||
logger.Warningf("unable to get git repo: %v", err)
|
||||
} else {
|
||||
ghc.Repository = repo
|
||||
if ghc.RepositoryOwner == "" {
|
||||
ghc.RepositoryOwner = strings.Split(repo, "/")[0]
|
||||
}
|
||||
}
|
||||
|
||||
if rc.EventJSON != "" {
|
||||
err = json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
|
||||
err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
|
||||
if err != nil {
|
||||
logger.Errorf("Unable to Unmarshal event '%s': %v", rc.EventJSON, err)
|
||||
}
|
||||
}
|
||||
|
||||
if ghc.EventName == "pull_request" || ghc.EventName == "pull_request_target" {
|
||||
ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref"))
|
||||
ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref"))
|
||||
ghc.SetBaseAndHeadRef()
|
||||
repoPath := rc.Config.Workdir
|
||||
ghc.SetRepositoryAndOwner(ctx, rc.Config.GitHubInstance, rc.Config.RemoteName, repoPath)
|
||||
if ghc.Ref == "" {
|
||||
ghc.SetRef(ctx, rc.Config.DefaultBranch, repoPath)
|
||||
}
|
||||
if ghc.Sha == "" {
|
||||
ghc.SetSha(ctx, repoPath)
|
||||
}
|
||||
|
||||
ghc.SetRefAndSha(ctx, rc.Config.DefaultBranch, repoPath)
|
||||
ghc.SetRefTypeAndName()
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
if strings.HasPrefix(ghc.Ref, "refs/tags/") {
|
||||
ghc.RefType = "tag"
|
||||
ghc.RefName = ghc.Ref[len("refs/tags/"):]
|
||||
} else if strings.HasPrefix(ghc.Ref, "refs/heads/") {
|
||||
ghc.RefType = "branch"
|
||||
ghc.RefName = ghc.Ref[len("refs/heads/"):]
|
||||
// defaults
|
||||
ghc.ServerURL = "https://github.com"
|
||||
ghc.APIURL = "https://api.github.com"
|
||||
ghc.GraphQLURL = "https://api.github.com/graphql"
|
||||
// per GHES
|
||||
if rc.Config.GitHubInstance != "github.com" {
|
||||
ghc.ServerURL = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
|
||||
ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
|
||||
ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
|
||||
}
|
||||
// allow to be overridden by user
|
||||
if rc.Config.Env["GITHUB_SERVER_URL"] != "" {
|
||||
ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"]
|
||||
}
|
||||
if rc.Config.Env["GITHUB_API_URL"] != "" {
|
||||
ghc.APIURL = rc.Config.Env["GITHUB_API_URL"]
|
||||
}
|
||||
if rc.Config.Env["GITHUB_GRAPHQL_URL"] != "" {
|
||||
ghc.GraphQLURL = rc.Config.Env["GITHUB_GRAPHQL_URL"]
|
||||
}
|
||||
|
||||
return ghc
|
||||
@@ -657,15 +874,6 @@ func isLocalCheckout(ghc *model.GithubContext, step *model.Step) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func asString(v interface{}) string {
|
||||
if v == nil {
|
||||
return ""
|
||||
} else if s, ok := v.(string); ok {
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) {
|
||||
var ok bool
|
||||
|
||||
@@ -685,8 +893,6 @@ func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{})
|
||||
|
||||
func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubContext, env map[string]string) map[string]string {
|
||||
env["CI"] = "true"
|
||||
env["GITHUB_ENV"] = rc.JobContainer.GetActPath() + "/workflow/envs.txt"
|
||||
env["GITHUB_PATH"] = rc.JobContainer.GetActPath() + "/workflow/paths.txt"
|
||||
env["GITHUB_WORKFLOW"] = github.Workflow
|
||||
env["GITHUB_RUN_ID"] = github.RunID
|
||||
env["GITHUB_RUN_NUMBER"] = github.RunNumber
|
||||
@@ -705,27 +911,26 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||
env["GITHUB_REF_NAME"] = github.RefName
|
||||
env["GITHUB_REF_TYPE"] = github.RefType
|
||||
env["GITHUB_TOKEN"] = github.Token
|
||||
env["GITHUB_SERVER_URL"] = "https://github.com"
|
||||
env["GITHUB_API_URL"] = "https://api.github.com"
|
||||
env["GITHUB_GRAPHQL_URL"] = "https://api.github.com/graphql"
|
||||
env["GITHUB_BASE_REF"] = github.BaseRef
|
||||
env["GITHUB_HEAD_REF"] = github.HeadRef
|
||||
env["GITHUB_JOB"] = rc.JobName
|
||||
env["GITHUB_JOB"] = github.Job
|
||||
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
|
||||
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
|
||||
env["RUNNER_PERFLOG"] = github.RunnerPerflog
|
||||
env["RUNNER_TRACKING_ID"] = github.RunnerTrackingID
|
||||
if rc.Config.GitHubInstance != "github.com" {
|
||||
hasProtocol := strings.HasPrefix(rc.Config.GitHubInstance, "http://") || strings.HasPrefix(rc.Config.GitHubInstance, "https://")
|
||||
if hasProtocol {
|
||||
env["GITHUB_SERVER_URL"] = rc.Config.GitHubInstance
|
||||
env["GITHUB_API_URL"] = fmt.Sprintf("%s/api/v1", rc.Config.GitHubInstance)
|
||||
env["GITHUB_GRAPHQL_URL"] = "" // disable graphql url because Gitea doesn't support that
|
||||
} else {
|
||||
env["GITHUB_SERVER_URL"] = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
|
||||
env["GITHUB_API_URL"] = fmt.Sprintf("https://%s/api/v1", rc.Config.GitHubInstance)
|
||||
env["GITHUB_GRAPHQL_URL"] = "" // disable graphql url because Gitea doesn't support that
|
||||
env["GITHUB_BASE_REF"] = github.BaseRef
|
||||
env["GITHUB_HEAD_REF"] = github.HeadRef
|
||||
env["GITHUB_SERVER_URL"] = github.ServerURL
|
||||
env["GITHUB_API_URL"] = github.APIURL
|
||||
env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL
|
||||
|
||||
{ // Adapt to Gitea
|
||||
instance := rc.Config.GitHubInstance
|
||||
if !strings.HasPrefix(instance, "http://") &&
|
||||
!strings.HasPrefix(instance, "https://") {
|
||||
instance = "https://" + instance
|
||||
}
|
||||
env["GITHUB_SERVER_URL"] = instance
|
||||
env["GITHUB_API_URL"] = instance + "/api/v1" // the version of Gitea is v1
|
||||
env["GITHUB_GRAPHQL_URL"] = "" // Gitea doesn't support graphql
|
||||
}
|
||||
|
||||
if rc.Config.ArtifactServerPath != "" {
|
||||
@@ -754,7 +959,7 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||
func setActionRuntimeVars(rc *RunContext, env map[string]string) {
|
||||
actionsRuntimeURL := os.Getenv("ACTIONS_RUNTIME_URL")
|
||||
if actionsRuntimeURL == "" {
|
||||
actionsRuntimeURL = fmt.Sprintf("http://%s:%s/", common.GetOutboundIP().String(), rc.Config.ArtifactServerPort)
|
||||
actionsRuntimeURL = fmt.Sprintf("http://%s:%s/", rc.Config.ArtifactServerAddr, rc.Config.ArtifactServerPort)
|
||||
}
|
||||
env["ACTIONS_RUNTIME_URL"] = actionsRuntimeURL
|
||||
|
||||
@@ -797,3 +1002,26 @@ func (rc *RunContext) handleCredentials(ctx context.Context) (username, password
|
||||
|
||||
return username, password, err
|
||||
}
|
||||
|
||||
func (rc *RunContext) handleServiceCredentials(ctx context.Context, creds map[string]string) (username, password string, err error) {
|
||||
if creds == nil {
|
||||
return
|
||||
}
|
||||
if len(creds) != 2 {
|
||||
err = fmt.Errorf("invalid property count for key 'credentials:'")
|
||||
return
|
||||
}
|
||||
|
||||
ee := rc.NewExpressionEvaluator(ctx)
|
||||
if username = ee.Interpolate(ctx, creds["username"]); username == "" {
|
||||
err = fmt.Errorf("failed to interpolate credentials.username")
|
||||
return
|
||||
}
|
||||
|
||||
if password = ee.Interpolate(ctx, creds["password"]); password == "" {
|
||||
err = fmt.Errorf("failed to interpolate credentials.password")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@@ -144,6 +144,7 @@ func TestRunContext_EvalBool(t *testing.T) {
|
||||
// Check github context
|
||||
{in: "github.actor == 'nektos/act'", out: true},
|
||||
{in: "github.actor == 'unknown'", out: false},
|
||||
{in: "github.job == 'job1'", out: true},
|
||||
// The special ACT flag
|
||||
{in: "${{ env.ACT }}", out: true},
|
||||
{in: "${{ !env.ACT }}", out: false},
|
||||
@@ -364,6 +365,7 @@ func TestGetGitHubContext(t *testing.T) {
|
||||
StepResults: map[string]*model.StepResult{},
|
||||
OutputMappings: map[MappableOutput]MappableOutput{},
|
||||
}
|
||||
rc.Run.JobID = "job1"
|
||||
|
||||
ghc := rc.getGithubContext(context.Background())
|
||||
|
||||
@@ -392,6 +394,7 @@ func TestGetGitHubContext(t *testing.T) {
|
||||
assert.Equal(t, ghc.RepositoryOwner, owner)
|
||||
assert.Equal(t, ghc.RunnerPerflog, "/dev/null")
|
||||
assert.Equal(t, ghc.Token, rc.Config.Secrets["GITHUB_TOKEN"])
|
||||
assert.Equal(t, ghc.Job, "job1")
|
||||
}
|
||||
|
||||
func TestGetGithubContextRef(t *testing.T) {
|
||||
@@ -410,7 +413,7 @@ func TestGetGithubContextRef(t *testing.T) {
|
||||
{event: "pull_request_target", json: `{"pull_request":{"base":{"ref": "main"}}}`, ref: "refs/heads/main"},
|
||||
{event: "deployment", json: `{"deployment": {"ref": "tag-name"}}`, ref: "tag-name"},
|
||||
{event: "deployment_status", json: `{"deployment": {"ref": "tag-name"}}`, ref: "tag-name"},
|
||||
{event: "release", json: `{"release": {"tag_name": "tag-name"}}`, ref: "tag-name"},
|
||||
{event: "release", json: `{"release": {"tag_name": "tag-name"}}`, ref: "refs/tags/tag-name"},
|
||||
}
|
||||
|
||||
for _, data := range table {
|
||||
|
@@ -2,9 +2,9 @@ package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
@@ -32,6 +32,7 @@ type Config struct {
|
||||
LogOutput bool // log the output from docker run
|
||||
JSONLogger bool // use json or text logger
|
||||
Env map[string]string // env for containers
|
||||
Inputs map[string]string // manually passed action inputs
|
||||
Secrets map[string]string // list of secrets
|
||||
Token string // GitHub token
|
||||
InsecureSecrets bool // switch hiding output when printing to terminal
|
||||
@@ -40,17 +41,20 @@ type Config struct {
|
||||
UsernsMode string // user namespace to use
|
||||
ContainerArchitecture string // Desired OS/architecture platform for running containers
|
||||
ContainerDaemonSocket string // Path to Docker daemon socket
|
||||
ContainerOptions string // Options for the job container
|
||||
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
|
||||
GitHubInstance string // GitHub instance to use, default "github.com"
|
||||
ContainerCapAdd []string // list of kernel capabilities to add to the containers
|
||||
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
|
||||
AutoRemove bool // controls if the container is automatically removed upon workflow completion
|
||||
ArtifactServerPath string // the path where the artifact server stores uploads
|
||||
ArtifactServerAddr string // the address the artifact server binds to
|
||||
ArtifactServerPort string // the port the artifact server binds to
|
||||
NoSkipCheckout bool // do not skip actions/checkout
|
||||
RemoteName string // remote name in local git repo config
|
||||
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
|
||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||
Matrix map[string]map[string]bool // Matrix config to run
|
||||
|
||||
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
||||
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
||||
@@ -59,11 +63,27 @@ type Config struct {
|
||||
ContainerNetworkMode string // the network mode of job containers
|
||||
DefaultActionInstance string // the default actions web site
|
||||
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
||||
JobLoggerLevel *log.Level // the level of job logger
|
||||
Vars map[string]string // the list of variables set at the repository, environment, or organization levels.
|
||||
}
|
||||
|
||||
// GetToken: Adapt to Gitea
|
||||
func (c Config) GetToken() string {
|
||||
token := c.Secrets["GITHUB_TOKEN"]
|
||||
if c.Secrets["GITEA_TOKEN"] != "" {
|
||||
token = c.Secrets["GITEA_TOKEN"]
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
type caller struct {
|
||||
runContext *RunContext
|
||||
}
|
||||
|
||||
type runnerImpl struct {
|
||||
config *Config
|
||||
eventJSON string
|
||||
caller *caller // the job calling this runner (caller of a reusable workflow)
|
||||
}
|
||||
|
||||
// New Creates a new Runner
|
||||
@@ -72,47 +92,61 @@ func New(runnerConfig *Config) (Runner, error) {
|
||||
config: runnerConfig,
|
||||
}
|
||||
|
||||
return runner.configure()
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) configure() (Runner, error) {
|
||||
runner.eventJSON = "{}"
|
||||
if runnerConfig.EventJSON != "" {
|
||||
runner.eventJSON = runnerConfig.EventJSON
|
||||
} else if runnerConfig.EventPath != "" {
|
||||
if runner.config.EventJSON != "" {
|
||||
runner.eventJSON = runner.config.EventJSON
|
||||
} else if runner.config.EventPath != "" {
|
||||
log.Debugf("Reading event.json from %s", runner.config.EventPath)
|
||||
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.eventJSON = string(eventJSONBytes)
|
||||
} else if len(runner.config.Inputs) != 0 {
|
||||
eventMap := map[string]map[string]string{
|
||||
"inputs": runner.config.Inputs,
|
||||
}
|
||||
eventJSON, err := json.Marshal(eventMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.eventJSON = string(eventJSON)
|
||||
}
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
// NewPlanExecutor ...
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
||||
maxJobNameLen := 0
|
||||
|
||||
stagePipeline := make([]common.Executor, 0)
|
||||
for i := range plan.Stages {
|
||||
s := i
|
||||
stage := plan.Stages[i]
|
||||
stagePipeline = append(stagePipeline, func(ctx context.Context) error {
|
||||
pipeline := make([]common.Executor, 0)
|
||||
for r, run := range stage.Runs {
|
||||
for _, run := range stage.Runs {
|
||||
stageExecutor := make([]common.Executor, 0)
|
||||
job := run.Job()
|
||||
|
||||
if job.Uses != "" {
|
||||
return fmt.Errorf("reusable workflows are currently not supported (see https://github.com/nektos/act/issues/826 for updates)")
|
||||
}
|
||||
|
||||
if job.Strategy != nil {
|
||||
strategyRc := runner.newRunContext(ctx, run, nil)
|
||||
if err := strategyRc.NewExpressionEvaluator(ctx).EvaluateYamlNode(ctx, &job.Strategy.RawMatrix); err != nil {
|
||||
log.Errorf("Error while evaluating matrix: %v", err)
|
||||
}
|
||||
}
|
||||
matrixes := job.GetMatrixes()
|
||||
|
||||
var matrixes []map[string]interface{}
|
||||
if m, err := job.GetMatrixes(); err != nil {
|
||||
log.Errorf("Error while get job's matrix: %v", err)
|
||||
} else {
|
||||
matrixes = selectMatrixes(m, runner.config.Matrix)
|
||||
}
|
||||
log.Debugf("Final matrix after applying user inclusions '%v'", matrixes)
|
||||
|
||||
maxParallel := 4
|
||||
if job.Strategy != nil {
|
||||
maxParallel = job.Strategy.MaxParallel
|
||||
@@ -133,29 +167,8 @@ func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
|
||||
maxJobNameLen = len(rc.String())
|
||||
}
|
||||
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
|
||||
return rc.Executor().Finally(func(ctx context.Context) error {
|
||||
isLastRunningContainer := func(currentStage int, currentRun int) bool {
|
||||
return currentStage == len(plan.Stages)-1 && currentRun == len(stage.Runs)-1
|
||||
}
|
||||
|
||||
if runner.config.AutoRemove && isLastRunningContainer(s, r) {
|
||||
var cancel context.CancelFunc
|
||||
if ctx.Err() == context.Canceled {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
log.Infof("Cleaning up container for job %s", rc.JobName)
|
||||
|
||||
if err := rc.stopJobContainer()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning container: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix)))
|
||||
return rc.Executor()(common.WithJobErrorContainer(WithJobLogger(ctx, rc.Run.JobID, jobName, rc.Config, &rc.Masks, matrix)))
|
||||
})
|
||||
}
|
||||
pipeline = append(pipeline, common.NewParallelExecutor(maxParallel, stageExecutor...))
|
||||
@@ -188,6 +201,25 @@ func handleFailure(plan *model.Plan) common.Executor {
|
||||
}
|
||||
}
|
||||
|
||||
func selectMatrixes(originalMatrixes []map[string]interface{}, targetMatrixValues map[string]map[string]bool) []map[string]interface{} {
|
||||
matrixes := make([]map[string]interface{}, 0)
|
||||
for _, original := range originalMatrixes {
|
||||
flag := true
|
||||
for key, val := range original {
|
||||
if allowedVals, ok := targetMatrixValues[key]; ok {
|
||||
valToString := fmt.Sprintf("%v", val)
|
||||
if _, ok := allowedVals[valToString]; !ok {
|
||||
flag = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if flag {
|
||||
matrixes = append(matrixes, original)
|
||||
}
|
||||
}
|
||||
return matrixes
|
||||
}
|
||||
|
||||
func (runner *runnerImpl) newRunContext(ctx context.Context, run *model.Run, matrix map[string]interface{}) *RunContext {
|
||||
rc := &RunContext{
|
||||
Config: runner.config,
|
||||
@@ -195,8 +227,10 @@ func (runner *runnerImpl) newRunContext(ctx context.Context, run *model.Run, mat
|
||||
EventJSON: runner.eventJSON,
|
||||
StepResults: make(map[string]*model.StepResult),
|
||||
Matrix: matrix,
|
||||
caller: runner.caller,
|
||||
}
|
||||
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
|
||||
rc.Name = rc.ExprEval.Interpolate(ctx, run.String())
|
||||
|
||||
return rc
|
||||
}
|
||||
|
@@ -1,8 +1,10 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -22,6 +24,7 @@ var (
|
||||
platforms map[string]string
|
||||
logLevel = log.DebugLevel
|
||||
workdir = "testdata"
|
||||
secrets map[string]string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -42,14 +45,103 @@ func init() {
|
||||
if wd, err := filepath.Abs(workdir); err == nil {
|
||||
workdir = wd
|
||||
}
|
||||
|
||||
secrets = map[string]string{}
|
||||
}
|
||||
|
||||
func TestNoWorkflowsFoundByPlanner(t *testing.T) {
|
||||
planner, err := model.NewWorkflowPlanner("res", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := log.StandardLogger().Out
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
plan, err := planner.PlanEvent("pull_request")
|
||||
assert.NotNil(t, plan)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), "no workflows found by planner")
|
||||
buf.Reset()
|
||||
plan, err = planner.PlanAll()
|
||||
assert.NotNil(t, plan)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), "no workflows found by planner")
|
||||
log.SetOutput(out)
|
||||
}
|
||||
|
||||
func TestGraphMissingEvent(t *testing.T) {
|
||||
planner, err := model.NewWorkflowPlanner("testdata/issue-1595/no-event.yml", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := log.StandardLogger().Out
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
plan, err := planner.PlanEvent("push")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, plan)
|
||||
assert.Equal(t, 0, len(plan.Stages))
|
||||
|
||||
assert.Contains(t, buf.String(), "no events found for workflow: no-event.yml")
|
||||
log.SetOutput(out)
|
||||
}
|
||||
|
||||
func TestGraphMissingFirst(t *testing.T) {
|
||||
planner, err := model.NewWorkflowPlanner("testdata/issue-1595/no-first.yml", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
plan, err := planner.PlanEvent("push")
|
||||
assert.EqualError(t, err, "unable to build dependency graph for no first (no-first.yml)")
|
||||
assert.NotNil(t, plan)
|
||||
assert.Equal(t, 0, len(plan.Stages))
|
||||
}
|
||||
|
||||
func TestGraphWithMissing(t *testing.T) {
|
||||
planner, err := model.NewWorkflowPlanner("testdata/issue-1595/missing.yml", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := log.StandardLogger().Out
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
plan, err := planner.PlanEvent("push")
|
||||
assert.NotNil(t, plan)
|
||||
assert.Equal(t, 0, len(plan.Stages))
|
||||
assert.EqualError(t, err, "unable to build dependency graph for missing (missing.yml)")
|
||||
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
||||
log.SetOutput(out)
|
||||
}
|
||||
|
||||
func TestGraphWithSomeMissing(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
planner, err := model.NewWorkflowPlanner("testdata/issue-1595/", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := log.StandardLogger().Out
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
plan, err := planner.PlanAll()
|
||||
assert.Error(t, err, "unable to build dependency graph for no first (no-first.yml)")
|
||||
assert.NotNil(t, plan)
|
||||
assert.Equal(t, 1, len(plan.Stages))
|
||||
assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)")
|
||||
assert.Contains(t, buf.String(), "unable to build dependency graph for no first (no-first.yml)")
|
||||
log.SetOutput(out)
|
||||
}
|
||||
|
||||
func TestGraphEvent(t *testing.T) {
|
||||
planner, err := model.NewWorkflowPlanner("testdata/basic", true)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
plan := planner.PlanEvent("push")
|
||||
assert.Nil(t, err)
|
||||
plan, err := planner.PlanEvent("push")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, plan)
|
||||
assert.NotNil(t, plan.Stages)
|
||||
assert.Equal(t, len(plan.Stages), 3, "stages")
|
||||
assert.Equal(t, len(plan.Stages[0].Runs), 1, "stage0.runs")
|
||||
assert.Equal(t, len(plan.Stages[1].Runs), 1, "stage1.runs")
|
||||
@@ -58,8 +150,10 @@ func TestGraphEvent(t *testing.T) {
|
||||
assert.Equal(t, plan.Stages[1].Runs[0].JobID, "build", "jobid")
|
||||
assert.Equal(t, plan.Stages[2].Runs[0].JobID, "test", "jobid")
|
||||
|
||||
plan = planner.PlanEvent("release")
|
||||
assert.Equal(t, len(plan.Stages), 0, "stages")
|
||||
plan, err = planner.PlanEvent("release")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, plan)
|
||||
assert.Equal(t, 0, len(plan.Stages))
|
||||
}
|
||||
|
||||
type TestJobFileInfo struct {
|
||||
@@ -68,6 +162,7 @@ type TestJobFileInfo struct {
|
||||
eventName string
|
||||
errorMessage string
|
||||
platforms map[string]string
|
||||
secrets map[string]string
|
||||
}
|
||||
|
||||
func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config) {
|
||||
@@ -88,8 +183,10 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
|
||||
ReuseContainers: false,
|
||||
Env: cfg.Env,
|
||||
Secrets: cfg.Secrets,
|
||||
Inputs: cfg.Inputs,
|
||||
GitHubInstance: "github.com",
|
||||
ContainerArchitecture: cfg.ContainerArchitecture,
|
||||
Matrix: cfg.Matrix,
|
||||
}
|
||||
|
||||
runner, err := New(runnerConfig)
|
||||
@@ -98,14 +195,16 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
|
||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
|
||||
plan := planner.PlanEvent(j.eventName)
|
||||
|
||||
plan, err := planner.PlanEvent(j.eventName)
|
||||
assert.True(t, (err == nil) != (plan == nil), "PlanEvent should return either a plan or an error")
|
||||
if err == nil && plan != nil {
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
if j.errorMessage == "" {
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
} else {
|
||||
assert.Error(t, err, j.errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("::endgroup::")
|
||||
}
|
||||
@@ -119,81 +218,95 @@ func TestRunEvent(t *testing.T) {
|
||||
|
||||
tables := []TestJobFileInfo{
|
||||
// Shells
|
||||
{workdir, "shells/defaults", "push", "", platforms},
|
||||
{workdir, "shells/defaults", "push", "", platforms, secrets},
|
||||
// TODO: figure out why it fails
|
||||
// {workdir, "shells/custom", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, }, // custom image with pwsh
|
||||
{workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}}, // custom image with pwsh
|
||||
{workdir, "shells/bash", "push", "", platforms},
|
||||
{workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}}, // slim doesn't have python
|
||||
{workdir, "shells/sh", "push", "", platforms},
|
||||
{workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, secrets}, // custom image with pwsh
|
||||
{workdir, "shells/bash", "push", "", platforms, secrets},
|
||||
{workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}, secrets}, // slim doesn't have python
|
||||
{workdir, "shells/sh", "push", "", platforms, secrets},
|
||||
|
||||
// Local action
|
||||
{workdir, "local-action-docker-url", "push", "", platforms},
|
||||
{workdir, "local-action-dockerfile", "push", "", platforms},
|
||||
{workdir, "local-action-via-composite-dockerfile", "push", "", platforms},
|
||||
{workdir, "local-action-js", "push", "", platforms},
|
||||
{workdir, "local-action-docker-url", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-dockerfile", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-js", "push", "", platforms, secrets},
|
||||
|
||||
// Uses
|
||||
{workdir, "uses-composite", "push", "", platforms},
|
||||
{workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms},
|
||||
{workdir, "uses-nested-composite", "push", "", platforms},
|
||||
{workdir, "remote-action-composite-js-pre-with-defaults", "push", "", platforms},
|
||||
{workdir, "uses-workflow", "push", "reusable workflows are currently not supported (see https://github.com/nektos/act/issues/826 for updates)", platforms},
|
||||
{workdir, "uses-docker-url", "push", "", platforms},
|
||||
{workdir, "act-composite-env-test", "push", "", platforms},
|
||||
{workdir, "uses-composite", "push", "", platforms, secrets},
|
||||
{workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms, secrets},
|
||||
{workdir, "uses-nested-composite", "push", "", platforms, secrets},
|
||||
{workdir, "remote-action-composite-js-pre-with-defaults", "push", "", platforms, secrets},
|
||||
{workdir, "uses-workflow", "push", "", platforms, map[string]string{"secret": "keep_it_private"}},
|
||||
{workdir, "uses-workflow", "pull_request", "", platforms, map[string]string{"secret": "keep_it_private"}},
|
||||
{workdir, "uses-docker-url", "push", "", platforms, secrets},
|
||||
{workdir, "act-composite-env-test", "push", "", platforms, secrets},
|
||||
|
||||
// Eval
|
||||
{workdir, "evalmatrix", "push", "", platforms},
|
||||
{workdir, "evalmatrixneeds", "push", "", platforms},
|
||||
{workdir, "evalmatrixneeds2", "push", "", platforms},
|
||||
{workdir, "evalmatrix-merge-map", "push", "", platforms},
|
||||
{workdir, "evalmatrix-merge-array", "push", "", platforms},
|
||||
{workdir, "issue-1195", "push", "", platforms},
|
||||
{workdir, "evalmatrix", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrixneeds", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrixneeds2", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrix-merge-map", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrix-merge-array", "push", "", platforms, secrets},
|
||||
{workdir, "issue-1195", "push", "", platforms, secrets},
|
||||
|
||||
{workdir, "basic", "push", "", platforms},
|
||||
{workdir, "fail", "push", "exit with `FAILURE`: 1", platforms},
|
||||
{workdir, "runs-on", "push", "", platforms},
|
||||
{workdir, "checkout", "push", "", platforms},
|
||||
{workdir, "job-container", "push", "", platforms},
|
||||
{workdir, "job-container-non-root", "push", "", platforms},
|
||||
{workdir, "job-container-invalid-credentials", "push", "failed to handle credentials: failed to interpolate container.credentials.password", platforms},
|
||||
{workdir, "container-hostname", "push", "", platforms},
|
||||
{workdir, "remote-action-docker", "push", "", platforms},
|
||||
{workdir, "remote-action-js", "push", "", platforms},
|
||||
{workdir, "remote-action-js", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:runner-latest"}}, // Test if this works with non root container
|
||||
{workdir, "matrix", "push", "", platforms},
|
||||
{workdir, "matrix-include-exclude", "push", "", platforms},
|
||||
{workdir, "commands", "push", "", platforms},
|
||||
{workdir, "workdir", "push", "", platforms},
|
||||
{workdir, "defaults-run", "push", "", platforms},
|
||||
{workdir, "composite-fail-with-output", "push", "", platforms},
|
||||
{workdir, "issue-597", "push", "", platforms},
|
||||
{workdir, "issue-598", "push", "", platforms},
|
||||
{workdir, "if-env-act", "push", "", platforms},
|
||||
{workdir, "env-and-path", "push", "", platforms},
|
||||
{workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms},
|
||||
{workdir, "outputs", "push", "", platforms},
|
||||
{workdir, "networking", "push", "", platforms},
|
||||
{workdir, "steps-context/conclusion", "push", "", platforms},
|
||||
{workdir, "steps-context/outcome", "push", "", platforms},
|
||||
{workdir, "job-status-check", "push", "job 'fail' failed", platforms},
|
||||
{workdir, "if-expressions", "push", "Job 'mytest' failed", platforms},
|
||||
{workdir, "actions-environment-and-context-tests", "push", "", platforms},
|
||||
{workdir, "uses-action-with-pre-and-post-step", "push", "", platforms},
|
||||
{workdir, "evalenv", "push", "", platforms},
|
||||
{workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms},
|
||||
{workdir, "workflow_dispatch", "workflow_dispatch", "", platforms},
|
||||
{workdir, "workflow_dispatch_no_inputs_mapping", "workflow_dispatch", "", platforms},
|
||||
{workdir, "workflow_dispatch-scalar", "workflow_dispatch", "", platforms},
|
||||
{workdir, "workflow_dispatch-scalar-composite-action", "workflow_dispatch", "", platforms},
|
||||
{"../model/testdata", "strategy", "push", "", platforms}, // TODO: move all testdata into pkg so we can validate it with planner and runner
|
||||
// {"testdata", "issue-228", "push", "", platforms, }, // TODO [igni]: Remove this once everything passes
|
||||
{"../model/testdata", "container-volumes", "push", "", platforms},
|
||||
{workdir, "basic", "push", "", platforms, secrets},
|
||||
{workdir, "fail", "push", "exit with `FAILURE`: 1", platforms, secrets},
|
||||
{workdir, "runs-on", "push", "", platforms, secrets},
|
||||
{workdir, "checkout", "push", "", platforms, secrets},
|
||||
{workdir, "job-container", "push", "", platforms, secrets},
|
||||
{workdir, "job-container-non-root", "push", "", platforms, secrets},
|
||||
{workdir, "job-container-invalid-credentials", "push", "failed to handle credentials: failed to interpolate container.credentials.password", platforms, secrets},
|
||||
{workdir, "container-hostname", "push", "", platforms, secrets},
|
||||
{workdir, "remote-action-docker", "push", "", platforms, secrets},
|
||||
{workdir, "remote-action-js", "push", "", platforms, secrets},
|
||||
{workdir, "remote-action-js-node-user", "push", "", platforms, secrets}, // Test if this works with non root container
|
||||
{workdir, "matrix", "push", "", platforms, secrets},
|
||||
{workdir, "matrix-include-exclude", "push", "", platforms, secrets},
|
||||
{workdir, "matrix-exitcode", "push", "Job 'test' failed", platforms, secrets},
|
||||
{workdir, "commands", "push", "", platforms, secrets},
|
||||
{workdir, "workdir", "push", "", platforms, secrets},
|
||||
{workdir, "defaults-run", "push", "", platforms, secrets},
|
||||
{workdir, "composite-fail-with-output", "push", "", platforms, secrets},
|
||||
{workdir, "issue-597", "push", "", platforms, secrets},
|
||||
{workdir, "issue-598", "push", "", platforms, secrets},
|
||||
{workdir, "if-env-act", "push", "", platforms, secrets},
|
||||
{workdir, "env-and-path", "push", "", platforms, secrets},
|
||||
{workdir, "environment-files", "push", "", platforms, secrets},
|
||||
{workdir, "GITHUB_STATE", "push", "", platforms, secrets},
|
||||
{workdir, "environment-files-parser-bug", "push", "", platforms, secrets},
|
||||
{workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms, secrets},
|
||||
{workdir, "outputs", "push", "", platforms, secrets},
|
||||
{workdir, "networking", "push", "", platforms, secrets},
|
||||
{workdir, "steps-context/conclusion", "push", "", platforms, secrets},
|
||||
{workdir, "steps-context/outcome", "push", "", platforms, secrets},
|
||||
{workdir, "job-status-check", "push", "job 'fail' failed", platforms, secrets},
|
||||
{workdir, "if-expressions", "push", "Job 'mytest' failed", platforms, secrets},
|
||||
{workdir, "actions-environment-and-context-tests", "push", "", platforms, secrets},
|
||||
{workdir, "uses-action-with-pre-and-post-step", "push", "", platforms, secrets},
|
||||
{workdir, "evalenv", "push", "", platforms, secrets},
|
||||
{workdir, "docker-action-custom-path", "push", "", platforms, secrets},
|
||||
{workdir, "GITHUB_ENV-use-in-env-ctx", "push", "", platforms, secrets},
|
||||
{workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms, secrets},
|
||||
{workdir, "workflow_dispatch", "workflow_dispatch", "", platforms, secrets},
|
||||
{workdir, "workflow_dispatch_no_inputs_mapping", "workflow_dispatch", "", platforms, secrets},
|
||||
{workdir, "workflow_dispatch-scalar", "workflow_dispatch", "", platforms, secrets},
|
||||
{workdir, "workflow_dispatch-scalar-composite-action", "workflow_dispatch", "", platforms, secrets},
|
||||
{workdir, "job-needs-context-contains-result", "push", "", platforms, secrets},
|
||||
{"../model/testdata", "strategy", "push", "", platforms, secrets}, // TODO: move all testdata into pkg so we can validate it with planner and runner
|
||||
{"../model/testdata", "container-volumes", "push", "", platforms, secrets},
|
||||
{workdir, "path-handling", "push", "", platforms, secrets},
|
||||
{workdir, "do-not-leak-step-env-in-composite", "push", "", platforms, secrets},
|
||||
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
|
||||
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
|
||||
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
t.Run(table.workflowPath, func(t *testing.T) {
|
||||
config := &Config{}
|
||||
config := &Config{
|
||||
Secrets: table.secrets,
|
||||
}
|
||||
|
||||
eventFile := filepath.Join(workdir, table.workflowPath, "event.json")
|
||||
if _, err := os.Stat(eventFile); err == nil {
|
||||
@@ -221,51 +334,51 @@ func TestRunEventHostEnvironment(t *testing.T) {
|
||||
|
||||
tables = append(tables, []TestJobFileInfo{
|
||||
// Shells
|
||||
{workdir, "shells/defaults", "push", "", platforms},
|
||||
{workdir, "shells/pwsh", "push", "", platforms},
|
||||
{workdir, "shells/bash", "push", "", platforms},
|
||||
{workdir, "shells/python", "push", "", platforms},
|
||||
{workdir, "shells/sh", "push", "", platforms},
|
||||
{workdir, "shells/defaults", "push", "", platforms, secrets},
|
||||
{workdir, "shells/pwsh", "push", "", platforms, secrets},
|
||||
{workdir, "shells/bash", "push", "", platforms, secrets},
|
||||
{workdir, "shells/python", "push", "", platforms, secrets},
|
||||
{workdir, "shells/sh", "push", "", platforms, secrets},
|
||||
|
||||
// Local action
|
||||
{workdir, "local-action-js", "push", "", platforms},
|
||||
{workdir, "local-action-js", "push", "", platforms, secrets},
|
||||
|
||||
// Uses
|
||||
{workdir, "uses-composite", "push", "", platforms},
|
||||
{workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms},
|
||||
{workdir, "uses-nested-composite", "push", "", platforms},
|
||||
{workdir, "act-composite-env-test", "push", "", platforms},
|
||||
{workdir, "uses-composite", "push", "", platforms, secrets},
|
||||
{workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms, secrets},
|
||||
{workdir, "uses-nested-composite", "push", "", platforms, secrets},
|
||||
{workdir, "act-composite-env-test", "push", "", platforms, secrets},
|
||||
|
||||
// Eval
|
||||
{workdir, "evalmatrix", "push", "", platforms},
|
||||
{workdir, "evalmatrixneeds", "push", "", platforms},
|
||||
{workdir, "evalmatrixneeds2", "push", "", platforms},
|
||||
{workdir, "evalmatrix-merge-map", "push", "", platforms},
|
||||
{workdir, "evalmatrix-merge-array", "push", "", platforms},
|
||||
{workdir, "issue-1195", "push", "", platforms},
|
||||
{workdir, "evalmatrix", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrixneeds", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrixneeds2", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrix-merge-map", "push", "", platforms, secrets},
|
||||
{workdir, "evalmatrix-merge-array", "push", "", platforms, secrets},
|
||||
{workdir, "issue-1195", "push", "", platforms, secrets},
|
||||
|
||||
{workdir, "fail", "push", "exit with `FAILURE`: 1", platforms},
|
||||
{workdir, "runs-on", "push", "", platforms},
|
||||
{workdir, "checkout", "push", "", platforms},
|
||||
{workdir, "remote-action-js", "push", "", platforms},
|
||||
{workdir, "matrix", "push", "", platforms},
|
||||
{workdir, "matrix-include-exclude", "push", "", platforms},
|
||||
{workdir, "commands", "push", "", platforms},
|
||||
{workdir, "defaults-run", "push", "", platforms},
|
||||
{workdir, "composite-fail-with-output", "push", "", platforms},
|
||||
{workdir, "issue-597", "push", "", platforms},
|
||||
{workdir, "issue-598", "push", "", platforms},
|
||||
{workdir, "if-env-act", "push", "", platforms},
|
||||
{workdir, "env-and-path", "push", "", platforms},
|
||||
{workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms},
|
||||
{workdir, "outputs", "push", "", platforms},
|
||||
{workdir, "steps-context/conclusion", "push", "", platforms},
|
||||
{workdir, "steps-context/outcome", "push", "", platforms},
|
||||
{workdir, "job-status-check", "push", "job 'fail' failed", platforms},
|
||||
{workdir, "if-expressions", "push", "Job 'mytest' failed", platforms},
|
||||
{workdir, "uses-action-with-pre-and-post-step", "push", "", platforms},
|
||||
{workdir, "evalenv", "push", "", platforms},
|
||||
{workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms},
|
||||
{workdir, "fail", "push", "exit with `FAILURE`: 1", platforms, secrets},
|
||||
{workdir, "runs-on", "push", "", platforms, secrets},
|
||||
{workdir, "checkout", "push", "", platforms, secrets},
|
||||
{workdir, "remote-action-js", "push", "", platforms, secrets},
|
||||
{workdir, "matrix", "push", "", platforms, secrets},
|
||||
{workdir, "matrix-include-exclude", "push", "", platforms, secrets},
|
||||
{workdir, "commands", "push", "", platforms, secrets},
|
||||
{workdir, "defaults-run", "push", "", platforms, secrets},
|
||||
{workdir, "composite-fail-with-output", "push", "", platforms, secrets},
|
||||
{workdir, "issue-597", "push", "", platforms, secrets},
|
||||
{workdir, "issue-598", "push", "", platforms, secrets},
|
||||
{workdir, "if-env-act", "push", "", platforms, secrets},
|
||||
{workdir, "env-and-path", "push", "", platforms, secrets},
|
||||
{workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms, secrets},
|
||||
{workdir, "outputs", "push", "", platforms, secrets},
|
||||
{workdir, "steps-context/conclusion", "push", "", platforms, secrets},
|
||||
{workdir, "steps-context/outcome", "push", "", platforms, secrets},
|
||||
{workdir, "job-status-check", "push", "job 'fail' failed", platforms, secrets},
|
||||
{workdir, "if-expressions", "push", "Job 'mytest' failed", platforms, secrets},
|
||||
{workdir, "uses-action-with-pre-and-post-step", "push", "", platforms, secrets},
|
||||
{workdir, "evalenv", "push", "", platforms, secrets},
|
||||
{workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms, secrets},
|
||||
}...)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -274,16 +387,22 @@ func TestRunEventHostEnvironment(t *testing.T) {
|
||||
}
|
||||
|
||||
tables = append(tables, []TestJobFileInfo{
|
||||
{workdir, "windows-prepend-path", "push", "", platforms},
|
||||
{workdir, "windows-add-env", "push", "", platforms},
|
||||
{workdir, "windows-prepend-path", "push", "", platforms, secrets},
|
||||
{workdir, "windows-add-env", "push", "", platforms, secrets},
|
||||
}...)
|
||||
} else {
|
||||
platforms := map[string]string{
|
||||
"self-hosted": "-self-hosted",
|
||||
"ubuntu-latest": "-self-hosted",
|
||||
}
|
||||
|
||||
tables = append(tables, []TestJobFileInfo{
|
||||
{workdir, "nix-prepend-path", "push", "", platforms},
|
||||
{workdir, "nix-prepend-path", "push", "", platforms, secrets},
|
||||
{workdir, "inputs-via-env-context", "push", "", platforms, secrets},
|
||||
{workdir, "do-not-leak-step-env-in-composite", "push", "", platforms, secrets},
|
||||
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
|
||||
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
|
||||
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
|
||||
}...)
|
||||
}
|
||||
|
||||
@@ -303,17 +422,17 @@ func TestDryrunEvent(t *testing.T) {
|
||||
|
||||
tables := []TestJobFileInfo{
|
||||
// Shells
|
||||
{workdir, "shells/defaults", "push", "", platforms},
|
||||
{workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}}, // custom image with pwsh
|
||||
{workdir, "shells/bash", "push", "", platforms},
|
||||
{workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}}, // slim doesn't have python
|
||||
{workdir, "shells/sh", "push", "", platforms},
|
||||
{workdir, "shells/defaults", "push", "", platforms, secrets},
|
||||
{workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, secrets}, // custom image with pwsh
|
||||
{workdir, "shells/bash", "push", "", platforms, secrets},
|
||||
{workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}, secrets}, // slim doesn't have python
|
||||
{workdir, "shells/sh", "push", "", platforms, secrets},
|
||||
|
||||
// Local action
|
||||
{workdir, "local-action-docker-url", "push", "", platforms},
|
||||
{workdir, "local-action-dockerfile", "push", "", platforms},
|
||||
{workdir, "local-action-via-composite-dockerfile", "push", "", platforms},
|
||||
{workdir, "local-action-js", "push", "", platforms},
|
||||
{workdir, "local-action-docker-url", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-dockerfile", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-js", "push", "", platforms, secrets},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
@@ -323,6 +442,30 @@ func TestDryrunEvent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerActionForcePullForceRebuild(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
config := &Config{
|
||||
ForcePull: true,
|
||||
ForceRebuild: true,
|
||||
}
|
||||
|
||||
tables := []TestJobFileInfo{
|
||||
{workdir, "local-action-dockerfile", "push", "", platforms, secrets},
|
||||
{workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
t.Run(table.workflowPath, func(t *testing.T) {
|
||||
table.runTest(ctx, t, config)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunDifferentArchitecture(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
@@ -339,6 +482,17 @@ func TestRunDifferentArchitecture(t *testing.T) {
|
||||
tjfi.runTest(context.Background(), t, &Config{ContainerArchitecture: "linux/arm64"})
|
||||
}
|
||||
|
||||
type maskJobLoggerFactory struct {
|
||||
Output bytes.Buffer
|
||||
}
|
||||
|
||||
func (f *maskJobLoggerFactory) WithJobLogger() *log.Logger {
|
||||
logger := log.New()
|
||||
logger.SetOutput(io.MultiWriter(&f.Output, os.Stdout))
|
||||
logger.SetLevel(log.DebugLevel)
|
||||
return logger
|
||||
}
|
||||
|
||||
func TestMaskValues(t *testing.T) {
|
||||
assertNoSecret := func(text string, secret string) {
|
||||
index := strings.Index(text, "composite secret")
|
||||
@@ -362,9 +516,9 @@ func TestMaskValues(t *testing.T) {
|
||||
platforms: platforms,
|
||||
}
|
||||
|
||||
output := captureOutput(t, func() {
|
||||
tjfi.runTest(context.Background(), t, &Config{})
|
||||
})
|
||||
logger := &maskJobLoggerFactory{}
|
||||
tjfi.runTest(WithJobLoggerFactory(common.WithLogger(context.Background(), logger.WithJobLogger()), logger), t, &Config{})
|
||||
output := logger.Output.String()
|
||||
|
||||
assertNoSecret(output, "secret value")
|
||||
assertNoSecret(output, "YWJjCg==")
|
||||
@@ -392,6 +546,64 @@ func TestRunEventSecrets(t *testing.T) {
|
||||
tjfi.runTest(context.Background(), t, &Config{Secrets: secrets, Env: env})
|
||||
}
|
||||
|
||||
func TestRunWithService(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
log.SetLevel(log.DebugLevel)
|
||||
ctx := context.Background()
|
||||
|
||||
platforms := map[string]string{
|
||||
"ubuntu-latest": "node:12.20.1-buster-slim",
|
||||
}
|
||||
|
||||
workflowPath := "services"
|
||||
eventName := "push"
|
||||
|
||||
workdir, err := filepath.Abs("testdata")
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
runnerConfig := &Config{
|
||||
Workdir: workdir,
|
||||
EventName: eventName,
|
||||
Platforms: platforms,
|
||||
ReuseContainers: false,
|
||||
}
|
||||
runner, err := New(runnerConfig)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
planner, err := model.NewWorkflowPlanner(fmt.Sprintf("testdata/%s", workflowPath), true)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
plan, err := planner.PlanEvent(eventName)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
}
|
||||
|
||||
func TestRunActionInputs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
workflowPath := "input-from-cli"
|
||||
|
||||
tjfi := TestJobFileInfo{
|
||||
workdir: workdir,
|
||||
workflowPath: workflowPath,
|
||||
eventName: "workflow_dispatch",
|
||||
errorMessage: "",
|
||||
platforms: platforms,
|
||||
}
|
||||
|
||||
inputs := map[string]string{
|
||||
"SOME_INPUT": "input",
|
||||
}
|
||||
|
||||
tjfi.runTest(context.Background(), t, &Config{Inputs: inputs})
|
||||
}
|
||||
|
||||
func TestRunEventPullRequest(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
@@ -409,3 +621,30 @@ func TestRunEventPullRequest(t *testing.T) {
|
||||
|
||||
tjfi.runTest(context.Background(), t, &Config{EventPath: filepath.Join(workdir, workflowPath, "event.json")})
|
||||
}
|
||||
|
||||
func TestRunMatrixWithUserDefinedInclusions(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
workflowPath := "matrix-with-user-inclusions"
|
||||
|
||||
tjfi := TestJobFileInfo{
|
||||
workdir: workdir,
|
||||
workflowPath: workflowPath,
|
||||
eventName: "push",
|
||||
errorMessage: "",
|
||||
platforms: platforms,
|
||||
}
|
||||
|
||||
matrix := map[string]map[string]bool{
|
||||
"node": {
|
||||
"8": true,
|
||||
"8.x": true,
|
||||
},
|
||||
"os": {
|
||||
"ubuntu-18.04": true,
|
||||
},
|
||||
}
|
||||
|
||||
tjfi.runTest(context.Background(), t, &Config{Matrix: matrix})
|
||||
}
|
||||
|
@@ -44,16 +44,16 @@ func (s stepStage) String() string {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
func (s stepStage) getStepName(stepModel *model.Step) string {
|
||||
switch s {
|
||||
case stepStagePre:
|
||||
return fmt.Sprintf("pre-%s", stepModel.ID)
|
||||
case stepStageMain:
|
||||
return stepModel.ID
|
||||
case stepStagePost:
|
||||
return fmt.Sprintf("post-%s", stepModel.ID)
|
||||
func processRunnerEnvFileCommand(ctx context.Context, fileName string, rc *RunContext, setter func(context.Context, map[string]string, string)) error {
|
||||
env := map[string]string{}
|
||||
err := rc.JobContainer.UpdateFromEnv(path.Join(rc.JobContainer.GetActPath(), fileName), &env)(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return "unknown"
|
||||
for k, v := range env {
|
||||
setter(ctx, map[string]string{"name": k}, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runStepExecutor(step step, stage stepStage, executor common.Executor) common.Executor {
|
||||
@@ -63,13 +63,16 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
||||
stepModel := step.getStepModel()
|
||||
|
||||
ifExpression := step.getIfExpression(ctx, stage)
|
||||
rc.CurrentStep = stage.getStepName(stepModel)
|
||||
rc.CurrentStep = stepModel.ID
|
||||
|
||||
rc.StepResults[rc.CurrentStep] = &model.StepResult{
|
||||
stepResult := &model.StepResult{
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outputs: make(map[string]string),
|
||||
}
|
||||
if stage == stepStageMain {
|
||||
rc.StepResults[rc.CurrentStep] = stepResult
|
||||
}
|
||||
|
||||
err := setupEnv(ctx, step)
|
||||
if err != nil {
|
||||
@@ -78,15 +81,15 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
||||
|
||||
runStep, err := isStepEnabled(ctx, ifExpression, step, stage)
|
||||
if err != nil {
|
||||
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusFailure
|
||||
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusFailure
|
||||
stepResult.Conclusion = model.StepStatusFailure
|
||||
stepResult.Outcome = model.StepStatusFailure
|
||||
return err
|
||||
}
|
||||
|
||||
if !runStep {
|
||||
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusSkipped
|
||||
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusSkipped
|
||||
logger.WithField("stepResult", rc.StepResults[rc.CurrentStep].Outcome).Debugf("Skipping step '%s' due to '%s'", stepModel, ifExpression)
|
||||
stepResult.Conclusion = model.StepStatusSkipped
|
||||
stepResult.Outcome = model.StepStatusSkipped
|
||||
logger.WithField("stepResult", stepResult.Outcome).Debugf("Skipping step '%s' due to '%s'", stepModel, ifExpression)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -98,58 +101,79 @@ func runStepExecutor(step step, stage stepStage, executor common.Executor) commo
|
||||
|
||||
// Prepare and clean Runner File Commands
|
||||
actPath := rc.JobContainer.GetActPath()
|
||||
|
||||
outputFileCommand := path.Join("workflow", "outputcmd.txt")
|
||||
stateFileCommand := path.Join("workflow", "statecmd.txt")
|
||||
(*step.getEnv())["GITHUB_OUTPUT"] = path.Join(actPath, outputFileCommand)
|
||||
|
||||
stateFileCommand := path.Join("workflow", "statecmd.txt")
|
||||
(*step.getEnv())["GITHUB_STATE"] = path.Join(actPath, stateFileCommand)
|
||||
|
||||
pathFileCommand := path.Join("workflow", "pathcmd.txt")
|
||||
(*step.getEnv())["GITHUB_PATH"] = path.Join(actPath, pathFileCommand)
|
||||
|
||||
envFileCommand := path.Join("workflow", "envs.txt")
|
||||
(*step.getEnv())["GITHUB_ENV"] = path.Join(actPath, envFileCommand)
|
||||
|
||||
summaryFileCommand := path.Join("workflow", "SUMMARY.md")
|
||||
(*step.getEnv())["GITHUB_STEP_SUMMARY"] = path.Join(actPath, summaryFileCommand)
|
||||
|
||||
_ = rc.JobContainer.Copy(actPath, &container.FileEntry{
|
||||
Name: outputFileCommand,
|
||||
Mode: 0666,
|
||||
Mode: 0o666,
|
||||
}, &container.FileEntry{
|
||||
Name: stateFileCommand,
|
||||
Mode: 0o666,
|
||||
}, &container.FileEntry{
|
||||
Name: pathFileCommand,
|
||||
Mode: 0o666,
|
||||
}, &container.FileEntry{
|
||||
Name: envFileCommand,
|
||||
Mode: 0666,
|
||||
}, &container.FileEntry{
|
||||
Name: summaryFileCommand,
|
||||
Mode: 0o666,
|
||||
})(ctx)
|
||||
|
||||
err = executor(ctx)
|
||||
|
||||
if err == nil {
|
||||
logger.WithField("stepResult", rc.StepResults[rc.CurrentStep].Outcome).Infof(" \u2705 Success - %s %s", stage, stepString)
|
||||
logger.WithField("stepResult", stepResult.Outcome).Infof(" \u2705 Success - %s %s", stage, stepString)
|
||||
} else {
|
||||
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusFailure
|
||||
stepResult.Outcome = model.StepStatusFailure
|
||||
|
||||
continueOnError, parseErr := isContinueOnError(ctx, stepModel.RawContinueOnError, step, stage)
|
||||
if parseErr != nil {
|
||||
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusFailure
|
||||
stepResult.Conclusion = model.StepStatusFailure
|
||||
return parseErr
|
||||
}
|
||||
|
||||
if continueOnError {
|
||||
logger.Infof("Failed but continue next step")
|
||||
err = nil
|
||||
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusSuccess
|
||||
stepResult.Conclusion = model.StepStatusSuccess
|
||||
} else {
|
||||
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusFailure
|
||||
stepResult.Conclusion = model.StepStatusFailure
|
||||
}
|
||||
|
||||
logger.WithField("stepResult", rc.StepResults[rc.CurrentStep].Outcome).Errorf(" \u274C Failure - %s %s", stage, stepString)
|
||||
logger.WithField("stepResult", stepResult.Outcome).Errorf(" \u274C Failure - %s %s", stage, stepString)
|
||||
}
|
||||
// Process Runner File Commands
|
||||
orgerr := err
|
||||
state := map[string]string{}
|
||||
err = rc.JobContainer.UpdateFromEnv(path.Join(actPath, stateFileCommand), &state)(ctx)
|
||||
err = processRunnerEnvFileCommand(ctx, envFileCommand, rc, rc.setEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range state {
|
||||
rc.saveState(ctx, map[string]string{"name": k}, v)
|
||||
}
|
||||
output := map[string]string{}
|
||||
err = rc.JobContainer.UpdateFromEnv(path.Join(actPath, outputFileCommand), &output)(ctx)
|
||||
err = processRunnerEnvFileCommand(ctx, stateFileCommand, rc, rc.saveState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range output {
|
||||
rc.setOutput(ctx, map[string]string{"name": k}, v)
|
||||
err = processRunnerEnvFileCommand(ctx, outputFileCommand, rc, rc.setOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rc.UpdateExtraPath(ctx, path.Join(actPath, pathFileCommand))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if orgerr != nil {
|
||||
return orgerr
|
||||
@@ -162,25 +186,23 @@ func setupEnv(ctx context.Context, step step) error {
|
||||
rc := step.getRunContext()
|
||||
|
||||
mergeEnv(ctx, step)
|
||||
err := rc.JobContainer.UpdateFromImageEnv(step.getEnv())(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rc.JobContainer.UpdateFromEnv((*step.getEnv())["GITHUB_ENV"], step.getEnv())(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rc.JobContainer.UpdateFromPath(step.getEnv())(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// merge step env last, since it should not be overwritten
|
||||
mergeIntoMap(step.getEnv(), step.getStepModel().GetEnv())
|
||||
mergeIntoMap(step, step.getEnv(), step.getStepModel().GetEnv())
|
||||
|
||||
exprEval := rc.NewExpressionEvaluator(ctx)
|
||||
for k, v := range *step.getEnv() {
|
||||
if !strings.HasPrefix(k, "INPUT_") {
|
||||
(*step.getEnv())[k] = exprEval.Interpolate(ctx, v)
|
||||
}
|
||||
}
|
||||
// after we have an evaluated step context, update the expressions evaluator with a new env context
|
||||
// you can use step level env in the with property of a uses construct
|
||||
exprEval = rc.NewExpressionEvaluatorWithEnv(ctx, *step.getEnv())
|
||||
for k, v := range *step.getEnv() {
|
||||
if strings.HasPrefix(k, "INPUT_") {
|
||||
(*step.getEnv())[k] = exprEval.Interpolate(ctx, v)
|
||||
}
|
||||
}
|
||||
|
||||
common.Logger(ctx).Debugf("setupEnv => %v", *step.getEnv())
|
||||
|
||||
@@ -194,17 +216,9 @@ func mergeEnv(ctx context.Context, step step) {
|
||||
|
||||
c := job.Container()
|
||||
if c != nil {
|
||||
mergeIntoMap(env, rc.GetEnv(), c.Env)
|
||||
mergeIntoMap(step, env, rc.GetEnv(), c.Env)
|
||||
} else {
|
||||
mergeIntoMap(env, rc.GetEnv())
|
||||
}
|
||||
|
||||
path := rc.JobContainer.GetPathVariableName()
|
||||
if (*env)[path] == "" {
|
||||
(*env)[path] = rc.JobContainer.DefaultPathVariable()
|
||||
}
|
||||
if rc.ExtraPath != nil && len(rc.ExtraPath) > 0 {
|
||||
(*env)[path] = rc.JobContainer.JoinPathVariable(append(rc.ExtraPath, (*env)[path])...)
|
||||
mergeIntoMap(step, env, rc.GetEnv())
|
||||
}
|
||||
|
||||
rc.withGithubEnv(ctx, step.getGithubContext(ctx), *env)
|
||||
@@ -244,10 +258,38 @@ func isContinueOnError(ctx context.Context, expr string, step step, stage stepSt
|
||||
return continueOnError, nil
|
||||
}
|
||||
|
||||
func mergeIntoMap(target *map[string]string, maps ...map[string]string) {
|
||||
func mergeIntoMap(step step, target *map[string]string, maps ...map[string]string) {
|
||||
if rc := step.getRunContext(); rc != nil && rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
mergeIntoMapCaseInsensitive(*target, maps...)
|
||||
} else {
|
||||
mergeIntoMapCaseSensitive(*target, maps...)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeIntoMapCaseSensitive(target map[string]string, maps ...map[string]string) {
|
||||
for _, m := range maps {
|
||||
for k, v := range m {
|
||||
(*target)[k] = v
|
||||
target[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]string) {
|
||||
foldKeys := make(map[string]string, len(target))
|
||||
for k := range target {
|
||||
foldKeys[strings.ToLower(k)] = k
|
||||
}
|
||||
toKey := func(s string) string {
|
||||
foldKey := strings.ToLower(s)
|
||||
if k, ok := foldKeys[foldKey]; ok {
|
||||
return k
|
||||
}
|
||||
foldKeys[strings.ToLower(foldKey)] = s
|
||||
return s
|
||||
}
|
||||
for _, m := range maps {
|
||||
for k, v := range m {
|
||||
target[toKey(k)] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,9 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -67,7 +69,7 @@ func TestStepActionLocalTest(t *testing.T) {
|
||||
salm.On("readAction", sal.Step, filepath.Clean("/tmp/path/to/action"), "", mock.Anything, mock.Anything).
|
||||
Return(&model.Action{}, nil)
|
||||
|
||||
cm.On("UpdateFromImageEnv", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
cm.On("Copy", "/var/run/act", mock.AnythingOfType("[]*container.FileEntry")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -75,14 +77,6 @@ func TestStepActionLocalTest(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromPath", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("Copy", "/var/run/act", mock.AnythingOfType("[]*container.FileEntry")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -91,6 +85,8 @@ func TestStepActionLocalTest(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
|
||||
salm.On("runAction", sal, filepath.Clean("/tmp/path/to/action"), (*remoteAction)(nil)).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -111,7 +107,6 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
stepModel *model.Step
|
||||
actionModel *model.Action
|
||||
initialStepResults map[string]*model.StepResult
|
||||
expectedPostStepResult *model.StepResult
|
||||
err error
|
||||
mocks struct {
|
||||
env bool
|
||||
@@ -138,11 +133,6 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -171,11 +161,6 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -204,16 +189,11 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSkipped,
|
||||
Outcome: model.StepStatusSkipped,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
}{
|
||||
env: true,
|
||||
env: false,
|
||||
exec: false,
|
||||
},
|
||||
},
|
||||
@@ -238,7 +218,6 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: nil,
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -277,11 +256,6 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
}
|
||||
sal.RunContext.ExprEval = sal.RunContext.NewExpressionEvaluator(ctx)
|
||||
|
||||
if tt.mocks.env {
|
||||
cm.On("UpdateFromImageEnv", &sal.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", &sal.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromPath", &sal.env).Return(func(ctx context.Context) error { return nil })
|
||||
}
|
||||
if tt.mocks.exec {
|
||||
suffixMatcher := func(suffix string) interface{} {
|
||||
return mock.MatchedBy(func(array []string) bool {
|
||||
@@ -294,6 +268,10 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -301,12 +279,14 @@ func TestStepActionLocalPost(t *testing.T) {
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/outputcmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
}
|
||||
|
||||
err := sal.post()(ctx)
|
||||
|
||||
assert.Equal(t, tt.err, err)
|
||||
assert.Equal(t, tt.expectedPostStepResult, sal.RunContext.StepResults["post-step"])
|
||||
assert.Equal(t, sal.RunContext.StepResults["post-step"], (*model.StepResult)(nil))
|
||||
cm.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
@@ -11,11 +11,11 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
gogit "github.com/go-git/go-git/v5"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
|
||||
gogit "github.com/go-git/go-git/v5"
|
||||
)
|
||||
|
||||
type stepActionRemote struct {
|
||||
@@ -57,7 +57,7 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
}
|
||||
}
|
||||
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), strings.ReplaceAll(sar.Step.Uses, "/", "-"))
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: sar.remoteAction.CloneURL(sar.RunContext.Config.DefaultActionInstance),
|
||||
Ref: sar.remoteAction.Ref,
|
||||
@@ -123,7 +123,7 @@ func (sar *stepActionRemote) main() common.Executor {
|
||||
return sar.RunContext.JobContainer.CopyDir(copyToPath, sar.RunContext.Config.Workdir+string(filepath.Separator)+".", sar.RunContext.Config.UseGitIgnore)(ctx)
|
||||
}
|
||||
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), strings.ReplaceAll(sar.Step.Uses, "/", "-"))
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
|
||||
return sar.runAction(sar, actionDir, sar.remoteAction)(ctx)
|
||||
}),
|
||||
@@ -182,7 +182,7 @@ func (sar *stepActionRemote) getActionModel() *model.Action {
|
||||
|
||||
func (sar *stepActionRemote) getCompositeRunContext(ctx context.Context) *RunContext {
|
||||
if sar.compositeRunContext == nil {
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), strings.ReplaceAll(sar.Step.Uses, "/", "-"))
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
actionLocation := path.Join(actionDir, sar.remoteAction.Path)
|
||||
_, containerActionDir := getContainerActionPaths(sar.getStepModel(), actionLocation, sar.RunContext)
|
||||
|
||||
@@ -197,6 +197,7 @@ func (sar *stepActionRemote) getCompositeRunContext(ctx context.Context) *RunCon
|
||||
// was already created during the pre stage)
|
||||
env := evaluateCompositeInputAndEnv(ctx, sar.RunContext, sar)
|
||||
sar.compositeRunContext.Env = env
|
||||
sar.compositeRunContext.ExtraPath = sar.RunContext.ExtraPath
|
||||
}
|
||||
return sar.compositeRunContext
|
||||
}
|
||||
@@ -270,3 +271,17 @@ func parseAction(action string) *remoteAction {
|
||||
URL: "",
|
||||
}
|
||||
}
|
||||
|
||||
func safeFilename(s string) string {
|
||||
return strings.NewReplacer(
|
||||
`<`, "-",
|
||||
`>`, "-",
|
||||
`:`, "-",
|
||||
`"`, "-",
|
||||
`/`, "-",
|
||||
`\`, "-",
|
||||
`|`, "-",
|
||||
`?`, "-",
|
||||
`*`, "-",
|
||||
).Replace(s)
|
||||
}
|
||||
|
@@ -1,18 +1,20 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
)
|
||||
|
||||
type stepActionRemoteMocks struct {
|
||||
@@ -163,11 +165,6 @@ func TestStepActionRemote(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
if tt.mocks.env {
|
||||
cm.On("UpdateFromImageEnv", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromPath", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
}
|
||||
if tt.mocks.read {
|
||||
sarm.On("readAction", sar.Step, suffixMatcher("act/remote-action@v1"), "", mock.Anything, mock.Anything).Return(&model.Action{}, nil)
|
||||
}
|
||||
@@ -178,6 +175,10 @@ func TestStepActionRemote(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -185,6 +186,8 @@ func TestStepActionRemote(t *testing.T) {
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/outputcmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
}
|
||||
|
||||
err := sar.pre()(ctx)
|
||||
@@ -416,8 +419,8 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
stepModel *model.Step
|
||||
actionModel *model.Action
|
||||
initialStepResults map[string]*model.StepResult
|
||||
IntraActionState map[string]map[string]string
|
||||
expectedEnv map[string]string
|
||||
expectedPostStepResult *model.StepResult
|
||||
err error
|
||||
mocks struct {
|
||||
env bool
|
||||
@@ -442,19 +445,16 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Outputs: map[string]string{},
|
||||
State: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
IntraActionState: map[string]map[string]string{
|
||||
"step": {
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
expectedEnv: map[string]string{
|
||||
"STATE_key": "value",
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -483,11 +483,6 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSuccess,
|
||||
Outcome: model.StepStatusSuccess,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -516,11 +511,6 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: &model.StepResult{
|
||||
Conclusion: model.StepStatusSkipped,
|
||||
Outcome: model.StepStatusSkipped,
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -550,7 +540,6 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
Outputs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedPostStepResult: nil,
|
||||
mocks: struct {
|
||||
env bool
|
||||
exec bool
|
||||
@@ -583,17 +572,13 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
},
|
||||
},
|
||||
StepResults: tt.initialStepResults,
|
||||
IntraActionState: tt.IntraActionState,
|
||||
},
|
||||
Step: tt.stepModel,
|
||||
action: tt.actionModel,
|
||||
}
|
||||
sar.RunContext.ExprEval = sar.RunContext.NewExpressionEvaluator(ctx)
|
||||
|
||||
if tt.mocks.env {
|
||||
cm.On("UpdateFromImageEnv", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromPath", &sar.env).Return(func(ctx context.Context) error { return nil })
|
||||
}
|
||||
if tt.mocks.exec {
|
||||
cm.On("Exec", []string{"node", "/var/run/act/actions/remote-action@v1/post.js"}, sar.env, "", "").Return(func(ctx context.Context) error { return tt.err })
|
||||
|
||||
@@ -601,6 +586,10 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -608,6 +597,8 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/outputcmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
}
|
||||
|
||||
err := sar.post()(ctx)
|
||||
@@ -618,7 +609,8 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
assert.Equal(t, value, sar.env[key])
|
||||
}
|
||||
}
|
||||
assert.Equal(t, tt.expectedPostStepResult, sar.RunContext.StepResults["post-step"])
|
||||
// Enshure that StepResults is nil in this test
|
||||
assert.Equal(t, sar.RunContext.StepResults["post-step"], (*model.StepResult)(nil))
|
||||
cm.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
@@ -717,3 +709,24 @@ func Test_newRemoteAction(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_safeFilename(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
s: "https://test.com/test/",
|
||||
want: "https---test.com-test-",
|
||||
},
|
||||
{
|
||||
s: `<>:"/\|?*`,
|
||||
want: "---------",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.s, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, safeFilename(tt.s), "safeFilename(%v)", tt.s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,9 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/container"
|
||||
@@ -55,18 +57,6 @@ func TestStepDockerMain(t *testing.T) {
|
||||
}
|
||||
sd.RunContext.ExprEval = sd.RunContext.NewExpressionEvaluator(ctx)
|
||||
|
||||
cm.On("UpdateFromImageEnv", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromPath", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("Pull", false).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -91,6 +81,10 @@ func TestStepDockerMain(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -99,6 +93,8 @@ func TestStepDockerMain(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
|
||||
err := sd.main()(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
@@ -16,6 +17,7 @@ type stepRun struct {
|
||||
RunContext *RunContext
|
||||
cmd []string
|
||||
env map[string]string
|
||||
WorkingDirectory string
|
||||
}
|
||||
|
||||
func (sr *stepRun) pre() common.Executor {
|
||||
@@ -26,11 +28,11 @@ func (sr *stepRun) pre() common.Executor {
|
||||
|
||||
func (sr *stepRun) main() common.Executor {
|
||||
sr.env = map[string]string{}
|
||||
|
||||
return runStepExecutor(sr, stepStageMain, common.NewPipelineExecutor(
|
||||
sr.setupShellCommandExecutor(),
|
||||
func(ctx context.Context) error {
|
||||
return sr.getRunContext().JobContainer.Exec(sr.cmd, sr.env, "", sr.Step.WorkingDirectory)(ctx)
|
||||
sr.getRunContext().ApplyExtraPath(ctx, &sr.env)
|
||||
return sr.getRunContext().JobContainer.Exec(sr.cmd, sr.env, "", sr.WorkingDirectory)(ctx)
|
||||
},
|
||||
))
|
||||
}
|
||||
@@ -71,7 +73,7 @@ func (sr *stepRun) setupShellCommandExecutor() common.Executor {
|
||||
rc := sr.getRunContext()
|
||||
return rc.JobContainer.Copy(rc.JobContainer.GetActPath(), &container.FileEntry{
|
||||
Name: scriptName,
|
||||
Mode: 0755,
|
||||
Mode: 0o755,
|
||||
Body: script,
|
||||
})(ctx)
|
||||
}
|
||||
@@ -165,16 +167,20 @@ func (sr *stepRun) setupShell(ctx context.Context) {
|
||||
func (sr *stepRun) setupWorkingDirectory(ctx context.Context) {
|
||||
rc := sr.RunContext
|
||||
step := sr.Step
|
||||
workingdirectory := ""
|
||||
|
||||
if step.WorkingDirectory == "" {
|
||||
step.WorkingDirectory = rc.Run.Job().Defaults.Run.WorkingDirectory
|
||||
workingdirectory = rc.Run.Job().Defaults.Run.WorkingDirectory
|
||||
} else {
|
||||
workingdirectory = step.WorkingDirectory
|
||||
}
|
||||
|
||||
// jobs can receive context values, so we interpolate
|
||||
step.WorkingDirectory = rc.NewExpressionEvaluator(ctx).Interpolate(ctx, step.WorkingDirectory)
|
||||
workingdirectory = rc.NewExpressionEvaluator(ctx).Interpolate(ctx, workingdirectory)
|
||||
|
||||
// but top level keys in workflow file like `defaults` or `env` can't
|
||||
if step.WorkingDirectory == "" {
|
||||
step.WorkingDirectory = rc.Run.Workflow.Defaults.Run.WorkingDirectory
|
||||
if workingdirectory == "" {
|
||||
workingdirectory = rc.Run.Workflow.Defaults.Run.WorkingDirectory
|
||||
}
|
||||
sr.WorkingDirectory = workingdirectory
|
||||
}
|
||||
|
@@ -1,20 +1,23 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
func TestStepRun(t *testing.T) {
|
||||
cm := &containerMock{}
|
||||
fileEntry := &container.FileEntry{
|
||||
Name: "workflow/1.sh",
|
||||
Mode: 0755,
|
||||
Mode: 0o755,
|
||||
Body: "\ncmd\n",
|
||||
}
|
||||
|
||||
@@ -53,7 +56,7 @@ func TestStepRun(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromImageEnv", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
cm.On("Copy", "/var/run/act", mock.AnythingOfType("[]*container.FileEntry")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -61,14 +64,6 @@ func TestStepRun(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromPath", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("Copy", "/var/run/act", mock.AnythingOfType("[]*container.FileEntry")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(ctx context.Context) error {
|
||||
return nil
|
||||
})
|
||||
@@ -79,6 +74,8 @@ func TestStepRun(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil)
|
||||
|
||||
err := sr.main()(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@@ -63,7 +63,9 @@ func TestMergeIntoMap(t *testing.T) {
|
||||
|
||||
for _, tt := range table {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mergeIntoMap(&tt.target, tt.maps...)
|
||||
mergeIntoMapCaseSensitive(tt.target, tt.maps...)
|
||||
assert.Equal(t, tt.expected, tt.target)
|
||||
mergeIntoMapCaseInsensitive(tt.target, tt.maps...)
|
||||
assert.Equal(t, tt.expected, tt.target)
|
||||
})
|
||||
}
|
||||
@@ -134,7 +136,6 @@ func TestSetupEnv(t *testing.T) {
|
||||
Env: map[string]string{
|
||||
"RC_KEY": "rcvalue",
|
||||
},
|
||||
ExtraPath: []string{"/path/to/extra/file"},
|
||||
JobContainer: cm,
|
||||
}
|
||||
step := &model.Step{
|
||||
@@ -142,19 +143,13 @@ func TestSetupEnv(t *testing.T) {
|
||||
"STEP_WITH": "with-value",
|
||||
},
|
||||
}
|
||||
env := map[string]string{
|
||||
"PATH": "",
|
||||
}
|
||||
env := map[string]string{}
|
||||
|
||||
sm.On("getRunContext").Return(rc)
|
||||
sm.On("getGithubContext").Return(rc)
|
||||
sm.On("getStepModel").Return(step)
|
||||
sm.On("getEnv").Return(&env)
|
||||
|
||||
cm.On("UpdateFromImageEnv", &env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", &env).Return(func(ctx context.Context) error { return nil })
|
||||
cm.On("UpdateFromPath", &env).Return(func(ctx context.Context) error { return nil })
|
||||
|
||||
err := setupEnv(context.Background(), sm)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -178,13 +173,11 @@ func TestSetupEnv(t *testing.T) {
|
||||
"GITHUB_ACTION_REPOSITORY": "",
|
||||
"GITHUB_API_URL": "https:///api/v3",
|
||||
"GITHUB_BASE_REF": "",
|
||||
"GITHUB_ENV": "/var/run/act/workflow/envs.txt",
|
||||
"GITHUB_EVENT_NAME": "",
|
||||
"GITHUB_EVENT_PATH": "/var/run/act/workflow/event.json",
|
||||
"GITHUB_GRAPHQL_URL": "https:///api/graphql",
|
||||
"GITHUB_HEAD_REF": "",
|
||||
"GITHUB_JOB": "",
|
||||
"GITHUB_PATH": "/var/run/act/workflow/paths.txt",
|
||||
"GITHUB_JOB": "1",
|
||||
"GITHUB_RETENTION_DAYS": "0",
|
||||
"GITHUB_RUN_ID": "runId",
|
||||
"GITHUB_RUN_NUMBER": "1",
|
||||
@@ -192,7 +185,6 @@ func TestSetupEnv(t *testing.T) {
|
||||
"GITHUB_TOKEN": "",
|
||||
"GITHUB_WORKFLOW": "",
|
||||
"INPUT_STEP_WITH": "with-value",
|
||||
"PATH": "/path/to/extra/file:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"RC_KEY": "rcvalue",
|
||||
"RUNNER_PERFLOG": "/dev/null",
|
||||
"RUNNER_TRACKING_ID": "",
|
||||
|
10
pkg/runner/testdata/.github/workflows/local-reusable-workflow-no-inputs-array.yml
vendored
Normal file
10
pkg/runner/testdata/.github/workflows/local-reusable-workflow-no-inputs-array.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: reusable
|
||||
|
||||
on:
|
||||
- workflow_call
|
||||
|
||||
jobs:
|
||||
reusable_workflow_job:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo Test
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user