Compare commits
404 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e6fec7324e | ||
|
73f32886f2 | ||
|
b7a8145d09 | ||
|
12c0c4277a | ||
|
3ed38d8e8b | ||
|
0dbf44c657 | ||
|
6dcf9bc6e6 | ||
|
df61c7fcdb | ||
|
36e0261150 | ||
|
46dc2ffe80 | ||
|
054caec791 | ||
|
5a80a044f9 | ||
|
4ca35d2192 | ||
|
5e0d29d665 | ||
|
6dd67253bc | ||
|
09d4b5d6ad | ||
|
a6ec2c129a | ||
|
424fd5e02b | ||
|
6a8c42ac53 | ||
|
c215e0888a | ||
|
6091094e14 | ||
|
8072a00a77 | ||
|
7f7d84b10f | ||
|
15bb54f14e | ||
|
f055d4ae60 | ||
|
f7a846d2f5 | ||
|
cd40f3fe9b | ||
|
adbe229fcb | ||
|
96d6cf8b2c | ||
|
ef5746ba74 | ||
|
4fae81efe4 | ||
|
238a495579 | ||
|
74dcce467d | ||
|
0c60f9749a | ||
|
6b0ef97c52 | ||
|
0806c8b109 | ||
|
0dfb06748e | ||
|
603b44b585 | ||
|
00fbfa754c | ||
|
1b10028447 | ||
|
9cecf94039 | ||
|
29f4123b5c | ||
|
85c3b3b541 | ||
|
2b47c99bb7 | ||
|
3c405a0d94 | ||
|
899a1f206e | ||
|
95ff5bf299 | ||
|
bd10c9a801 | ||
|
3d0cb3d82b | ||
|
7693697f4c | ||
|
4dcb9b7a13 | ||
|
55477899e7 | ||
|
04011b6b78 | ||
|
c8f847d82d | ||
|
74b0fe8ba9 | ||
|
18b4714e38 | ||
|
610358e1c3 | ||
|
1c16fd1967 | ||
|
55b09a04cd | ||
|
5a79256ee4 | ||
|
1bb2ee7098 | ||
|
15045b4fc0 | ||
|
67918333fa | ||
|
84a4025bc8 | ||
|
fb4f29fd6d | ||
|
3e5c62977f | ||
|
83bfbcdafd | ||
|
3d65b0f73f | ||
|
854e3e9ec5 | ||
|
db71c41d17 | ||
|
db6e477e25 | ||
|
ceeb6c160c | ||
|
ace4cd47c7 | ||
|
c93462e19f | ||
|
99067a9c1e | ||
|
f3264cac20 | ||
|
e7e158cd7e | ||
|
3c730d7924 | ||
|
976df8bae5 | ||
|
7c7d80ebdd | ||
|
2f479ba024 | ||
|
5718555f7a | ||
|
44ea01c209 | ||
|
2be4def7be | ||
|
3d47885894 | ||
|
4699c3b689 | ||
|
c241ecda31 | ||
|
b637d79ec3 | ||
|
83af8f8767 | ||
|
60060a7c9c | ||
|
d134079807 | ||
|
1891bef433 | ||
|
2911b2172c | ||
|
935e37c25b | ||
|
19764bcb06 | ||
|
c84a3ef6d0 | ||
|
bd9032de0a | ||
|
1d32507b52 | ||
|
80b0955303 | ||
|
0c12273eba | ||
|
323bee9ab5 | ||
|
7286b43b0e | ||
|
f64c267dac | ||
|
7ba9f30f37 | ||
|
2a0a0a1a62 | ||
|
f55ae1a0bc | ||
|
9f06ca75e4 | ||
|
a00fd960a5 | ||
|
8a9e4f9f38 | ||
|
a42f3cf1cd | ||
|
83140951bf | ||
|
6468dd7fc8 | ||
|
f0ca0abc40 | ||
|
73d5f78294 | ||
|
0b4c67a4aa | ||
|
3939f48e6d | ||
|
74b74e847b | ||
|
c8127155bc | ||
|
22d91e3ac3 | ||
|
d5d8548546 | ||
|
8bf10cf876 | ||
|
8e6c7c11fe | ||
|
d720ff09a2 | ||
|
17bf4fc5af | ||
|
67f4baa618 | ||
|
83b0a5b1f2 | ||
|
4810f69367 | ||
|
cdc6d4bc6a | ||
|
e343ea9d5f | ||
|
94bc8b319c | ||
|
808cf5a2c3 | ||
|
8c7c0f53c1 | ||
|
2069b04779 | ||
|
724ec918c9 | ||
|
79f93beef2 | ||
|
70956f2929 | ||
|
ef79bb284d | ||
|
3a0a6425a8 | ||
|
4c8da8558d | ||
|
f40d0b873d | ||
|
15618d1187 | ||
|
e597046195 | ||
|
3813f40cba | ||
|
310cb79e81 | ||
|
eb19987893 | ||
|
545802b97b | ||
|
e60018a6d9 | ||
|
70e1e37280 | ||
|
3d1d8a9aca | ||
|
f9dcb58db2 | ||
|
a0307d3b7c | ||
|
515c2c429d | ||
|
4fc176f556 | ||
|
df44dffd30 | ||
|
b2191ae204 | ||
|
ef608854d0 | ||
|
a165e17878 | ||
|
56e103b4ba | ||
|
422cbdf446 | ||
|
8c56bd3aa5 | ||
|
74c27db4dd | ||
|
24348ff1ee | ||
|
a94498b482 | ||
|
fe76a035ad | ||
|
b92d95f899 | ||
|
3555d65d08 | ||
|
e912ab32c2 | ||
|
80f6de51d5 | ||
|
6ce45e3f24 | ||
|
3ac2b726f2 | ||
|
6ce5c93cc8 | ||
|
92b4d73376 | ||
|
183bb7af1b | ||
|
c70a6743f6 | ||
|
04b5c739d2 | ||
|
e48c07988e | ||
|
0b05ee8e0b | ||
|
e150310da7 | ||
|
34db86138b | ||
|
b0d0cec71f | ||
|
26f1b1f4b6 | ||
|
9c1f1f8d84 | ||
|
3cfc2cf9c3 | ||
|
a72822b3f8 | ||
|
b0996e0577 | ||
|
481999f59d | ||
|
11dd2ac745 | ||
|
16c574cd26 | ||
|
06054d2931 | ||
|
1371215aab | ||
|
9283cfc9b1 | ||
|
27846050ae | ||
|
1eacf23dcb | ||
|
c00810bdf4 | ||
|
39abbce4e6 | ||
|
b5fa24537f | ||
|
046f5aa715 | ||
|
ed9b6643ca | ||
|
65ef31f102 | ||
|
a94a01bff2 | ||
|
229dbaf153 | ||
|
f84a566ded | ||
|
8913375af8 | ||
|
ca9b783491 | ||
|
568124ca69 | ||
|
aa21277380 | ||
|
4721abfa6d | ||
|
3eb6e83ea4 | ||
|
15eb1fa92a | ||
|
1f9bbe12dd | ||
|
f1df2ca5d6 | ||
|
d77991c95a | ||
|
c81a770bc5 | ||
|
7cbb1a910e | ||
|
a18648ee73 | ||
|
baf3bcf48b | ||
|
518d8c96f3 | ||
|
2ea7891787 | ||
|
16f35f64eb | ||
|
ded31bdbc0 | ||
|
60bcfb5d4f | ||
|
7c6237d93f | ||
|
b6718fdf5d | ||
|
0c1f2edb99 | ||
|
721857e4a0 | ||
|
6b1010ad07 | ||
|
e12252a43a | ||
|
8609522aa4 | ||
|
6a876c4f99 | ||
|
de529139af | ||
|
3715266494 | ||
|
35d6e9f71e | ||
|
d3a56cdb69 | ||
|
d970056601 | ||
|
9884da0122 | ||
|
de0644499a | ||
|
816a7d410a | ||
|
50dcc57e4b | ||
|
9bdddf18e0 | ||
|
6d527bf1a3 | ||
|
ac1ba34518 | ||
|
97749a27b9 | ||
|
d70b225e85 | ||
|
c0130ed030 | ||
|
148a545021 | ||
|
65a925b4ed | ||
|
c5ce502f9f | ||
|
f2bd194c7f | ||
|
4ab812c1b1 | ||
|
5a331d2d99 | ||
|
3ae0a9dfb7 | ||
|
24b04dfa55 | ||
|
5c4a96bcb7 | ||
|
62abf4fe11 | ||
|
cfedc518ca | ||
|
68c72b9a51 | ||
|
ad7a8a3559 | ||
|
220d6f1251 | ||
|
6745999c10 | ||
|
8518d70bdf | ||
|
d3dfde055a | ||
|
5e76853b55 | ||
|
2eb4de02ee | ||
|
75ffa205c4 | ||
|
342ad6a51a | ||
|
568f053723 | ||
|
351ae99bc1 | ||
|
385d71a215 | ||
|
f764ecb283 | ||
|
d65cf869e6 | ||
|
775a128cd4 | ||
|
8f12a6c947 | ||
|
83fb85f702 | ||
|
3daf313205 | ||
|
7c5400d75b | ||
|
929ea6df75 | ||
|
f6a8a0e643 | ||
|
9fab59954c | ||
|
8c28c9fd8f | ||
|
2fa0a5f769 | ||
|
a6c95ef2a7 | ||
|
5a2112a7f8 | ||
|
fad986af5f | ||
|
35cac27f4c | ||
|
636c8a34ae | ||
|
556fd20aed | ||
|
a8298365fe | ||
|
1dda0aec69 | ||
|
49e204166d | ||
|
09de42f067 | ||
|
a36b003f7a | ||
|
6744e68ee2 | ||
|
0671d16694 | ||
|
ac5dd8feb8 | ||
|
24440d9f15 | ||
|
f3c88b5091 | ||
|
881dbdb81b | ||
|
aeee2052de | ||
|
7ee2138418 | ||
|
8dbd151fa7 | ||
|
6601d8d8e8 | ||
|
19abab6375 | ||
|
973dd7f7ef | ||
|
44b510f48c | ||
|
5500c928eb | ||
|
04d12b0206 | ||
|
1252e551b8 | ||
|
c614d8b96c | ||
|
84b6649b8b | ||
|
89cb558558 | ||
|
53095d76f4 | ||
|
4b56aced15 | ||
|
05eaeaa528 | ||
|
4eba04b229 | ||
|
8790c9b8e6 | ||
|
b7a9eb9fbf | ||
|
2f55276cea | ||
|
be4a1477a5 | ||
|
21ea3d0d5f | ||
|
1316307313 | ||
|
b0a5068f6d | ||
|
34ab8150bf | ||
|
8048baf435 | ||
|
7c3c5349ab | ||
|
98ad62fcef | ||
|
c378a7d28b | ||
|
44333c758a | ||
|
36dbbc1dfa | ||
|
f41e9127a8 | ||
|
460c78da07 | ||
|
22dc1e0e7e | ||
|
281a52f0d6 | ||
|
e775fea265 | ||
|
24c16fbf25 | ||
|
ce168f9595 | ||
|
c4b64ec1c1 | ||
|
f91b2aa5db | ||
|
72d03214c5 | ||
|
42b9b73d38 | ||
|
787388daf5 | ||
|
b91e4b0d55 | ||
|
e259dc2835 | ||
|
70ae63bd10 | ||
|
01952d05c8 | ||
|
3748772201 | ||
|
932863bef5 | ||
|
09e9f6a1ac | ||
|
a2856aed75 | ||
|
612e08f113 | ||
|
1b7212f5fd | ||
|
75d19d0ff5 | ||
|
7ada9d3f74 | ||
|
78e1ceb0a8 | ||
|
d690a5fee9 | ||
|
19e6929398 | ||
|
b00babd0d9 | ||
|
82a8c1e80d | ||
|
566b9d843e | ||
|
7ebcc1c816 | ||
|
3e23b4fbb5 | ||
|
63ae215071 | ||
|
efb12b7f12 | ||
|
b3c06dc19d | ||
|
0d0780580c | ||
|
872a69548c | ||
|
33cb8bca64 | ||
|
d8ba8cbf17 | ||
|
003947ea33 | ||
|
0988b47752 | ||
|
d064863f9b | ||
|
93907931df | ||
|
767e6a8696 | ||
|
b2fb9e64ac | ||
|
8b4f210872 | ||
|
3f3b25ae84 | ||
|
3ac756b37a | ||
|
7e8d070811 | ||
|
a53a1c2000 | ||
|
3f4a6dc462 | ||
|
b14398eff3 | ||
|
f0c6fa11be | ||
|
cfc1b91aa1 | ||
|
0d388572a8 | ||
|
9c7085195c | ||
|
e3a722e7c6 | ||
|
d3ce2b0a46 | ||
|
4989f444f1 | ||
|
c044035d9e | ||
|
767969502a | ||
|
469d024c1f | ||
|
6ab71ecebb | ||
|
bef9b5c3c7 | ||
|
0c8c082ac0 | ||
|
b8d7e947cf | ||
|
a8e05cded6 | ||
|
d281230cce | ||
|
7d9951200f | ||
|
0f8861b9e3 | ||
|
8c5748a55c | ||
|
57bf4d27a2 | ||
|
4c2524ab4d | ||
|
d9fe63ec24 | ||
|
7073eac240 | ||
|
1797775be3 |
44
.gitea/workflows/test.yml
Normal file
44
.gitea/workflows/test.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
name: checks
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
GOPROXY: https://goproxy.io,direct
|
||||
GOPATH: /go_path
|
||||
GOCACHE: /go_cache
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: check and test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: cache go path
|
||||
id: cache-go-path
|
||||
uses: https://github.com/actions/cache@v3
|
||||
with:
|
||||
path: /go_path
|
||||
key: go_path-${{ github.repository }}-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
go_path-${{ github.repository }}-
|
||||
go_path-
|
||||
- name: cache go cache
|
||||
id: cache-go-cache
|
||||
uses: https://github.com/actions/cache@v3
|
||||
with:
|
||||
path: /go_cache
|
||||
key: go_cache-${{ github.repository }}-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
go_cache-${{ github.repository }}-
|
||||
go_cache-
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20
|
||||
- uses: actions/checkout@v3
|
||||
- name: vet checks
|
||||
run: go vet -v ./...
|
||||
- name: build
|
||||
run: go build -v ./...
|
||||
- name: test
|
||||
run: go test -v ./pkg/jobparser
|
||||
# TODO test more packages
|
2
.github/actions/choco/Dockerfile
vendored
2
.github/actions/choco/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.16
|
||||
FROM alpine:3.17
|
||||
|
||||
ARG CHOCOVERSION=1.1.0
|
||||
|
||||
|
77
.github/actions/run-tests/action.yml
vendored
Normal file
77
.github/actions/run-tests/action.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: 'run-tests'
|
||||
description: 'Runs go test and upload a step summary'
|
||||
inputs:
|
||||
filter:
|
||||
description: 'The go test pattern for the tests to run'
|
||||
required: false
|
||||
default: ''
|
||||
upload-logs-name:
|
||||
description: 'Choose the name of the log artifact'
|
||||
required: false
|
||||
default: logs-${{ github.job }}-${{ strategy.job-index }}
|
||||
upload-logs:
|
||||
description: 'If true uploads logs of each tests as an artifact'
|
||||
required: false
|
||||
default: 'true'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: none # No reason to grant access to the GITHUB_TOKEN
|
||||
script: |
|
||||
let myOutput = '';
|
||||
var fs = require('fs');
|
||||
var uploadLogs = process.env.UPLOAD_LOGS === 'true';
|
||||
if(uploadLogs) {
|
||||
await io.mkdirP('logs');
|
||||
}
|
||||
var filename = null;
|
||||
const options = {};
|
||||
options.ignoreReturnCode = true;
|
||||
options.env = Object.assign({}, process.env);
|
||||
delete options.env.ACTIONS_RUNTIME_URL;
|
||||
delete options.env.ACTIONS_RUNTIME_TOKEN;
|
||||
delete options.env.ACTIONS_CACHE_URL;
|
||||
options.listeners = {
|
||||
stdout: (data) => {
|
||||
for(line of data.toString().split('\n')) {
|
||||
if(/^\s*(===\s[^\s]+\s|---\s[^\s]+:\s)/.test(line)) {
|
||||
if(uploadLogs) {
|
||||
var runprefix = "=== RUN ";
|
||||
if(line.startsWith(runprefix)) {
|
||||
filename = "logs/" + line.substring(runprefix.length).replace(/[^A-Za-z0-9]/g, '-') + ".txt";
|
||||
fs.writeFileSync(filename, line + "\n");
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
filename = null;
|
||||
}
|
||||
}
|
||||
myOutput += line + "\n";
|
||||
} else if(filename) {
|
||||
fs.appendFileSync(filename, line + "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
var args = ['test', '-v', '-cover', '-coverprofile=coverage.txt', '-covermode=atomic', '-timeout', '20m'];
|
||||
var filter = process.env.FILTER;
|
||||
if(filter) {
|
||||
args.push('-run');
|
||||
args.push(filter);
|
||||
}
|
||||
args.push('./...');
|
||||
var exitcode = await exec.exec('go', args, options);
|
||||
if(process.env.GITHUB_STEP_SUMMARY) {
|
||||
core.summary.addCodeBlock(myOutput);
|
||||
await core.summary.write();
|
||||
}
|
||||
process.exit(exitcode);
|
||||
env:
|
||||
FILTER: ${{ inputs.filter }}
|
||||
UPLOAD_LOGS: ${{ inputs.upload-logs }}
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && inputs.upload-logs == 'true' && !env.ACT
|
||||
with:
|
||||
name: ${{ inputs.upload-logs-name }}
|
||||
path: logs
|
80
.github/workflows/checks.yml
vendored
80
.github/workflows/checks.yml
vendored
@@ -1,10 +1,13 @@
|
||||
name: checks
|
||||
on: [pull_request, workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
env:
|
||||
ACT_OWNER: ${{ github.repository_owner }}
|
||||
ACT_REPOSITORY: ${{ github.repository }}
|
||||
GO_VERSION: 1.18
|
||||
CGO_ENABLED: 0
|
||||
|
||||
jobs:
|
||||
@@ -12,17 +15,18 @@ jobs:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: golangci/golangci-lint-action@v3.3.1
|
||||
- uses: golangci/golangci-lint-action@v3.7.0
|
||||
with:
|
||||
version: v1.47.2
|
||||
- uses: megalinter/megalinter/flavors/go@v6.15.0
|
||||
version: v1.53
|
||||
only-new-issues: true
|
||||
- uses: megalinter/megalinter/flavors/go@v7.8.0
|
||||
env:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -34,14 +38,14 @@ jobs:
|
||||
name: test-linux
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- uses: actions/setup-go@v3
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
@@ -50,9 +54,14 @@ jobs:
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- run: go test -v -cover -coverprofile=coverage.txt -covermode=atomic -timeout 15m ./...
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
upload-logs-name: logs-linux
|
||||
- name: Run act from cli
|
||||
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
|
||||
- name: Upload Codecov report
|
||||
uses: codecov/codecov-action@v3.1.1
|
||||
uses: codecov/codecov-action@v3.1.5
|
||||
with:
|
||||
files: coverage.txt
|
||||
fail_ci_if_error: true # optional (default = false)
|
||||
@@ -66,24 +75,27 @@ jobs:
|
||||
name: test-${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- run: go test -v -run ^TestRunEventHostEnvironment$ ./...
|
||||
# TODO merge coverage with test-linux
|
||||
- name: Run Tests
|
||||
uses: ./.github/actions/run-tests
|
||||
with:
|
||||
filter: '^TestRunEventHostEnvironment$'
|
||||
upload-logs-name: logs-${{ matrix.os }}
|
||||
|
||||
snapshot:
|
||||
name: snapshot
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
@@ -93,73 +105,73 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
version: latest
|
||||
args: release --snapshot --rm-dist
|
||||
args: release --snapshot --clean
|
||||
- name: Capture x86_64 (64-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-linux-amd64
|
||||
path: dist/act_linux_amd64_v1/act
|
||||
- name: Capture i386 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-linux-i386
|
||||
path: dist/act_linux_386/act
|
||||
- name: Capture arm64 (64-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-linux-arm64
|
||||
path: dist/act_linux_arm64/act
|
||||
- name: Capture armv6 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-linux-armv6
|
||||
path: dist/act_linux_arm_6/act
|
||||
- name: Capture armv7 (32-bit) Linux binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-linux-armv7
|
||||
path: dist/act_linux_arm_7/act
|
||||
- name: Capture x86_64 (64-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-windows-amd64
|
||||
path: dist/act_windows_amd64_v1/act.exe
|
||||
- name: Capture i386 (32-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-windows-i386
|
||||
path: dist/act_windows_386/act.exe
|
||||
- name: Capture arm64 (64-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-windows-arm64
|
||||
path: dist/act_windows_arm64/act.exe
|
||||
- name: Capture armv7 (32-bit) Windows binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-windows-armv7
|
||||
path: dist/act_windows_arm_7/act.exe
|
||||
- name: Capture x86_64 (64-bit) MacOS binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-macos-amd64
|
||||
path: dist/act_darwin_amd64_v1/act
|
||||
- name: Capture arm64 (64-bit) MacOS binary
|
||||
if: ${{ !env.ACT }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: act-macos-arm64
|
||||
path: dist/act_darwin_arm64/act
|
||||
|
11
.github/workflows/promote.yml
vendored
11
.github/workflows/promote.yml
vendored
@@ -4,23 +4,20 @@ on:
|
||||
- cron: '0 2 1 * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.18
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: promote
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: master
|
||||
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
- uses: fregante/setup-git-user@v1
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: fregante/setup-git-user@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
|
36
.github/workflows/release.yml
vendored
36
.github/workflows/release.yml
vendored
@@ -4,20 +4,17 @@ on:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.18
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
- uses: actions/cache@v3
|
||||
if: ${{ !env.ACT }}
|
||||
@@ -27,15 +24,38 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
- name: Winget
|
||||
uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: nektos.act
|
||||
installers-regex: '_Windows_\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
- name: Chocolatey
|
||||
uses: ./.github/actions/choco
|
||||
with:
|
||||
version: ${{ github.ref }}
|
||||
apiKey: ${{ secrets.CHOCO_APIKEY }}
|
||||
push: true
|
||||
- name: GitHub CLI extension
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
|
||||
script: |
|
||||
const mainRef = (await github.rest.git.getRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: 'heads/main',
|
||||
})).data;
|
||||
console.log(mainRef);
|
||||
github.rest.git.createRef({
|
||||
owner: 'nektos',
|
||||
repo: 'gh-act',
|
||||
ref: context.ref,
|
||||
sha: mainRef.object.sha,
|
||||
});
|
||||
|
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
name: Stale
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
|
||||
@@ -19,5 +19,5 @@ jobs:
|
||||
exempt-pr-labels: 'stale-exempt'
|
||||
remove-stale-when-updated: 'True'
|
||||
operations-per-run: 500
|
||||
days-before-stale: 30
|
||||
days-before-stale: 180
|
||||
days-before-close: 14
|
||||
|
@@ -19,17 +19,15 @@ linters-settings:
|
||||
- pkg: 'github.com/stretchr/testify/assert'
|
||||
alias: assert
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: true
|
||||
packages:
|
||||
- github.com/pkg/errors
|
||||
- gotest.tools/v3/assert
|
||||
- log
|
||||
packages-with-error-message:
|
||||
- github.com/pkg/errors: 'Please use "errors" package from standard library'
|
||||
- gotest.tools/v3: 'Please keep tests unified using only github.com/stretchr/testify'
|
||||
- log: 'Please keep logging unified using only github.com/sirupsen/logrus'
|
||||
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: github.com/pkg/errors
|
||||
desc: Please use "errors" package from standard library
|
||||
- pkg: gotest.tools/v3
|
||||
desc: Please keep tests unified using only github.com/stretchr/testify
|
||||
- pkg: log
|
||||
desc: Please keep logging unified using only github.com/sirupsen/logrus
|
||||
linters:
|
||||
enable:
|
||||
- megacheck
|
||||
|
@@ -22,13 +22,13 @@ builds:
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
archives:
|
||||
- name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}'
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
- name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
@@ -14,7 +14,7 @@ DISABLE_LINTERS:
|
||||
- MARKDOWN_MARKDOWN_LINK_CHECK
|
||||
- REPOSITORY_CHECKOV
|
||||
- REPOSITORY_TRIVY
|
||||
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE)
|
||||
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE|VERSION)
|
||||
MARKDOWN_MARKDOWNLINT_CONFIG_FILE: .markdownlint.yml
|
||||
PARALLEL: false
|
||||
PRINT_ALPACA: false
|
||||
|
@@ -71,6 +71,10 @@ pull_request_rules:
|
||||
- and:
|
||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||
- '#approved-reviews-by>=2'
|
||||
- and:
|
||||
- 'author=@nektos/act-maintainers'
|
||||
- 'approved-reviews-by=@nektos/act-maintainers'
|
||||
- '#approved-reviews-by>=1'
|
||||
- -draft
|
||||
- -merged
|
||||
- -closed
|
||||
|
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fix"],
|
||||
"go.testTimeout": "300s",
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
|
@@ -35,7 +35,7 @@ New issues can be created with in our [GitHub repo](https://github.com/nektos/ac
|
||||
|
||||
### <a id="pr"></a>Pull Requests
|
||||
|
||||
Pull requests should target the `master` branch. Please also reference the issue from the description of the pull request using [special keyword syntax](https://help.github.com/articles/closing-issues-via-commit-messages/) to auto close the issue when the PR is merged. For example, include the phrase `fixes #14` in the PR description to have issue #14 auto close.
|
||||
Pull requests should target the `master` branch. Please also reference the issue from the description of the pull request using [special keyword syntax](https://help.github.com/articles/closing-issues-via-commit-messages/) to auto close the issue when the PR is merged. For example, include the phrase `fixes #14` in the PR description to have issue #14 auto close. Please send documentation updates for the [act user guide](https://nektosact.com) to [nektos/act-docs](https://github.com/nektos/act-docs).
|
||||
|
||||
### <a id="style"></a> Styleguide
|
||||
|
||||
|
8
Makefile
8
Makefile
@@ -96,12 +96,18 @@ ifneq ($(shell git status -s),)
|
||||
@echo "Unable to promote a dirty workspace"
|
||||
@exit 1
|
||||
endif
|
||||
echo -n $(NEW_VERSION) > VERSION
|
||||
git add VERSION
|
||||
git commit -m "chore: bump VERSION to $(NEW_VERSION)"
|
||||
git tag -a -m "releasing v$(NEW_VERSION)" v$(NEW_VERSION)
|
||||
git push origin master
|
||||
git push origin v$(NEW_VERSION)
|
||||
|
||||
.PHONY: snapshot
|
||||
snapshot:
|
||||
goreleaser build \
|
||||
--rm-dist \
|
||||
--clean \
|
||||
--single-target \
|
||||
--snapshot
|
||||
|
||||
.PHONY: clean all
|
||||
|
153
README.md
153
README.md
@@ -1,14 +1,25 @@
|
||||
## Naming rules:
|
||||
## Forking rules
|
||||
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
|
||||
|
||||
It cannot be used as command line tool anymore, but only as a library.
|
||||
|
||||
It's a soft fork, which means that it will tracking the latest release of nektos/act.
|
||||
|
||||
Branches:
|
||||
|
||||
- `main`: default branch, contains custom changes.
|
||||
- `nektos/master`: mirror for `master` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `main`: default branch, contains custom changes, based on the latest release(not the latest of the master branch) of nektos/act.
|
||||
- `nektos/master`: mirror for the master branch of nektos/act.
|
||||
|
||||
Tags:
|
||||
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- `nektos/vX.Y.Z`: mirror for `vX.Y.Z` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- Examples:
|
||||
- `nektos/v0.2.23` -> `v0.223.*`
|
||||
- `nektos/v0.3.1` -> `v0.301.*`, not ~~`v0.31.*`~~
|
||||
- `nektos/v0.10.1` -> `v0.1001.*`, not ~~`v0.101.*`~~
|
||||
- `nektos/v0.3.100` -> not ~~`v0.3100.*`~~, I don't think it's really going to happen, if it does, we can find a way to handle it.
|
||||
|
||||
---
|
||||
|
||||
@@ -31,6 +42,10 @@ Let's see it in action with a [sample repo](https://github.com/cplee/github-acti
|
||||
|
||||

|
||||
|
||||
# Act User Guide
|
||||
|
||||
Please look at the [act user guide](https://nektosact.com) for more documentation.
|
||||
|
||||
# Installation
|
||||
|
||||
## Necessary prerequisites for running `act`
|
||||
@@ -85,6 +100,14 @@ choco install act-cli
|
||||
scoop install act
|
||||
```
|
||||
|
||||
### [Winget](https://learn.microsoft.com/en-us/windows/package-manager/) (Windows)
|
||||
|
||||
[](https://repology.org/project/act-run-github-actions/versions)
|
||||
|
||||
```shell
|
||||
winget install nektos.act
|
||||
```
|
||||
|
||||
### [AUR](https://aur.archlinux.org/packages/act/) (Linux)
|
||||
|
||||
[](https://aur.archlinux.org/packages/act/)
|
||||
@@ -122,6 +145,14 @@ Using the latest [Nix command](https://nixos.wiki/wiki/Nix_command), you can run
|
||||
nix run nixpkgs#act
|
||||
```
|
||||
|
||||
## Installation as GitHub CLI extension
|
||||
|
||||
Act can be installed as a [GitHub CLI](https://cli.github.com/) extension:
|
||||
|
||||
```sh
|
||||
gh extension install https://github.com/nektos/gh-act
|
||||
```
|
||||
|
||||
## Other install options
|
||||
|
||||
### Bash script
|
||||
@@ -129,7 +160,7 @@ nix run nixpkgs#act
|
||||
Run this command in your terminal:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
curl -s https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
|
||||
```
|
||||
|
||||
### Manual download
|
||||
@@ -162,6 +193,9 @@ act pull_request
|
||||
# Run a specific job:
|
||||
act -j test
|
||||
|
||||
# Collect artifacts to the /tmp/artifacts folder:
|
||||
act --artifact-server-path /tmp/artifacts
|
||||
|
||||
# Run a job in a specific workflow (useful if you have duplicate job names)
|
||||
act -j lint -W .github/workflows/checks.yml
|
||||
|
||||
@@ -177,49 +211,6 @@ act -v
|
||||
When running `act` for the first time, it will ask you to choose image to be used as default.
|
||||
It will save that information to `~/.actrc`, please refer to [Configuration](#configuration) for more information about `.actrc` and to [Runners](#runners) for information about used/available Docker images.
|
||||
|
||||
# Flags
|
||||
|
||||
```none
|
||||
-a, --actor string user that triggered the event (default "nektos/act")
|
||||
--replace-ghe-action-with-github-com If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com=github/super-linter)
|
||||
--replace-ghe-action-token-with-github-com If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token
|
||||
--artifact-server-path string Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.
|
||||
--artifact-server-port string Defines the port where the artifact server listens (will only bind to localhost). (default "34567")
|
||||
-b, --bind bind working directory to container, rather than copy
|
||||
--container-architecture string Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.
|
||||
--container-cap-add stringArray kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)
|
||||
--container-cap-drop stringArray kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)
|
||||
--container-daemon-socket string Path to Docker daemon socket which will be mounted to containers (default "/var/run/docker.sock")
|
||||
--defaultbranch string the name of the main branch
|
||||
--detect-event Use first event type from workflow as event that triggered the workflow
|
||||
-C, --directory string working directory (default ".")
|
||||
-n, --dryrun dryrun mode
|
||||
--env stringArray env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)
|
||||
--env-file string environment file to read and use as env in the containers (default ".env")
|
||||
-e, --eventpath string path to event JSON file
|
||||
--github-instance string GitHub instance to use. Don't use this if you are not using GitHub Enterprise Server. (default "github.com")
|
||||
-g, --graph draw workflows
|
||||
-h, --help help for act
|
||||
--insecure-secrets NOT RECOMMENDED! Doesn't hide secrets while printing logs.
|
||||
-j, --job string run job
|
||||
-l, --list list workflows
|
||||
--no-recurse Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag
|
||||
-P, --platform stringArray custom image to use per platform (e.g. -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04)
|
||||
--privileged use privileged mode
|
||||
-p, --pull pull docker image(s) even if already present
|
||||
-q, --quiet disable logging of output from steps
|
||||
--rebuild rebuild local action docker image(s) even if already present
|
||||
-r, --reuse don't remove container(s) on successfully completed workflow(s) to maintain state between runs
|
||||
--rm automatically remove container(s)/volume(s) after a workflow(s) failure
|
||||
-s, --secret stringArray secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)
|
||||
--secret-file string file with list of secrets to read from (e.g. --secret-file .secrets) (default ".secrets")
|
||||
--use-gitignore Controls whether paths specified in .gitignore should be copied into container (default true)
|
||||
--userns string user namespace to use
|
||||
-v, --verbose verbose output
|
||||
-w, --watch watch the contents of the local repo and run when files change
|
||||
-W, --workflows string path to workflow file(s) (default "./.github/workflows/")
|
||||
```
|
||||
|
||||
## `GITHUB_TOKEN`
|
||||
|
||||
GitHub [automatically provides](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret) a `GITHUB_TOKEN` secret when running workflows inside GitHub.
|
||||
@@ -230,6 +221,12 @@ If your workflow depends on this token, you need to create a [personal access to
|
||||
act -s GITHUB_TOKEN=[insert token or leave blank and omit equals for secure input]
|
||||
```
|
||||
|
||||
If [GitHub CLI](https://cli.github.com/) is installed, the [`gh auth token`](https://cli.github.com/manual/gh_auth_token) command can be used to automatically pass the token to act
|
||||
|
||||
```bash
|
||||
act -s GITHUB_TOKEN="$(gh auth token)"
|
||||
```
|
||||
|
||||
**WARNING**: `GITHUB_TOKEN` will be logged in shell history if not inserted through secure input or (depending on your shell config) the command is prefixed with a whitespace.
|
||||
|
||||
# Known Issues
|
||||
@@ -304,6 +301,15 @@ If you need an environment that works just like the corresponding GitHub runner
|
||||
|
||||
- [`catthehacker/ubuntu:full-*`](https://github.com/catthehacker/docker_images/pkgs/container/ubuntu) - built from Packer template provided by GitHub, see [catthehacker/virtual-environments-fork](https://github.com/catthehacker/virtual-environments-fork) or [catthehacker/docker_images](https://github.com/catthehacker/docker_images) for more information
|
||||
|
||||
## Using local runner images
|
||||
|
||||
The `--pull` flag is set to true by default due to a breaking on older default docker images. This would pull the docker image everytime act is executed.
|
||||
|
||||
Set `--pull` to false if a local docker image is needed
|
||||
```sh
|
||||
act --pull=false
|
||||
```
|
||||
|
||||
## Use an alternative runner image
|
||||
|
||||
To use a different image for the runner, use the `-P` option.
|
||||
@@ -334,6 +340,14 @@ To run `act` with secrets, you can enter them interactively, supply them as envi
|
||||
- `act --secret-file my.secrets` - load secrets values from `my.secrets` file.
|
||||
- secrets file format is the same as `.env` format
|
||||
|
||||
# Vars
|
||||
|
||||
To run `act` with repository variables that are acessible inside the workflow via ${{ vars.VARIABLE }}, you can enter them interactively or load them from a file. The following options are available for providing github repository variables:
|
||||
|
||||
- `act --var VARIABLE=somevalue` - use `somevalue` as the value for `VARIABLE`.
|
||||
- `act --var-file my.variables` - load variables values from `my.variables` file.
|
||||
- variables file format is the same as `.env` format
|
||||
|
||||
# Configuration
|
||||
|
||||
You can provide default configuration flags to `act` by either creating a `./.actrc` or a `~/.actrc` file. Any flags in the files will be applied before any flags provided directly on the command line. For example, a file like below will always use the `nektos/act-environments-ubuntu:18.04` image for the `ubuntu-latest` runner:
|
||||
@@ -356,10 +370,41 @@ MY_ENV_VAR=MY_ENV_VAR_VALUE
|
||||
MY_2ND_ENV_VAR="my 2nd env var value"
|
||||
```
|
||||
|
||||
# Skipping jobs
|
||||
|
||||
You cannot use the `env` context in job level if conditions, but you can add a custom event property to the `github` context. You can use this method also on step level if conditions.
|
||||
|
||||
```yml
|
||||
on: push
|
||||
jobs:
|
||||
deploy:
|
||||
if: ${{ !github.event.act }} # skip during local actions testing
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: exit 0
|
||||
```
|
||||
|
||||
And use this `event.json` file with act otherwise the Job will run:
|
||||
|
||||
```json
|
||||
{
|
||||
"act": true
|
||||
}
|
||||
```
|
||||
|
||||
Run act like
|
||||
|
||||
```sh
|
||||
act -e event.json
|
||||
```
|
||||
|
||||
_Hint: you can add / append `-e event.json` as a line into `./.actrc`_
|
||||
|
||||
# Skipping steps
|
||||
|
||||
Act adds a special environment variable `ACT` that can be used to skip a step that you
|
||||
don't want to run locally. E.g. a step that posts a Slack message or bumps a version number.
|
||||
**You cannot use this method in job level if conditions, see [Skipping jobs](#skipping-jobs)**
|
||||
|
||||
```yml
|
||||
- name: Some step
|
||||
@@ -391,7 +436,7 @@ act pull_request -e pull-request.json
|
||||
|
||||
Act will properly provide `github.head_ref` and `github.base_ref` to the action as expected.
|
||||
|
||||
## Pass Inputs to Manually Triggered Workflows
|
||||
# Pass Inputs to Manually Triggered Workflows
|
||||
|
||||
Example workflow file
|
||||
|
||||
@@ -417,6 +462,14 @@ jobs:
|
||||
echo "Hello ${{ github.event.inputs.NAME }} and ${{ github.event.inputs.SOME_VALUE }}!"
|
||||
```
|
||||
|
||||
## via input or input-file flag
|
||||
|
||||
- `act --input NAME=somevalue` - use `somevalue` as the value for `NAME` input.
|
||||
- `act --input-file my.input` - load input values from `my.input` file.
|
||||
- input file format is the same as `.env` format
|
||||
|
||||
## via JSON
|
||||
|
||||
Example JSON payload file conveniently named `payload.json`
|
||||
|
||||
```json
|
||||
@@ -453,7 +506,7 @@ Want to contribute to act? Awesome! Check out the [contributing guidelines](CONT
|
||||
|
||||
## Manually building from source
|
||||
|
||||
- Install Go tools 1.18+ - (<https://golang.org/doc/install>)
|
||||
- Install Go tools 1.20+ - (<https://golang.org/doc/install>)
|
||||
- Clone this repo `git clone git@github.com:nektos/act.git`
|
||||
- Run unit tests with `make test`
|
||||
- Build and install: `make install`
|
||||
|
27
cmd/dir.go
Normal file
27
cmd/dir.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
UserHomeDir string
|
||||
CacheHomeDir string
|
||||
)
|
||||
|
||||
func init() {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
UserHomeDir = home
|
||||
|
||||
if v := os.Getenv("XDG_CACHE_HOME"); v != "" {
|
||||
CacheHomeDir = v
|
||||
} else {
|
||||
CacheHomeDir = filepath.Join(UserHomeDir, ".cache")
|
||||
}
|
||||
}
|
25
cmd/input.go
25
cmd/input.go
@@ -16,20 +16,25 @@ type Input struct {
|
||||
reuseContainers bool
|
||||
bindWorkdir bool
|
||||
secrets []string
|
||||
vars []string
|
||||
envs []string
|
||||
inputs []string
|
||||
platforms []string
|
||||
dryrun bool
|
||||
forcePull bool
|
||||
forceRebuild bool
|
||||
noOutput bool
|
||||
envfile string
|
||||
inputfile string
|
||||
secretfile string
|
||||
varfile string
|
||||
insecureSecrets bool
|
||||
defaultBranch string
|
||||
privileged bool
|
||||
usernsMode string
|
||||
containerArchitecture string
|
||||
containerDaemonSocket string
|
||||
containerOptions string
|
||||
noWorkflowRecurse bool
|
||||
useGitIgnore bool
|
||||
githubInstance string
|
||||
@@ -37,12 +42,23 @@ type Input struct {
|
||||
containerCapDrop []string
|
||||
autoRemove bool
|
||||
artifactServerPath string
|
||||
artifactServerAddr string
|
||||
artifactServerPort string
|
||||
noCacheServer bool
|
||||
cacheServerPath string
|
||||
cacheServerAddr string
|
||||
cacheServerPort uint16
|
||||
jsonLogger bool
|
||||
noSkipCheckout bool
|
||||
remoteName string
|
||||
replaceGheActionWithGithubCom []string
|
||||
replaceGheActionTokenWithGithubCom string
|
||||
matrix []string
|
||||
actionCachePath string
|
||||
actionOfflineMode bool
|
||||
logPrefixJobID bool
|
||||
networkName string
|
||||
useNewActionCache bool
|
||||
}
|
||||
|
||||
func (i *Input) resolve(path string) string {
|
||||
@@ -69,6 +85,10 @@ func (i *Input) Secretfile() string {
|
||||
return i.resolve(i.secretfile)
|
||||
}
|
||||
|
||||
func (i *Input) Varfile() string {
|
||||
return i.resolve(i.varfile)
|
||||
}
|
||||
|
||||
// Workdir returns path to workdir
|
||||
func (i *Input) Workdir() string {
|
||||
return i.resolve(".")
|
||||
@@ -83,3 +103,8 @@ func (i *Input) WorkflowsPath() string {
|
||||
func (i *Input) EventPath() string {
|
||||
return i.resolve(i.eventPath)
|
||||
}
|
||||
|
||||
// Inputfile returns the path to the input file
|
||||
func (i *Input) Inputfile() string {
|
||||
return i.resolve(i.inputfile)
|
||||
}
|
||||
|
140
cmd/notices.go
Normal file
140
cmd/notices.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Notice struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func displayNotices(input *Input) {
|
||||
select {
|
||||
case notices := <-noticesLoaded:
|
||||
if len(notices) > 0 {
|
||||
noticeLogger := log.New()
|
||||
if input.jsonLogger {
|
||||
noticeLogger.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
noticeLogger.SetFormatter(&log.TextFormatter{
|
||||
DisableQuote: true,
|
||||
DisableTimestamp: true,
|
||||
PadLevelText: true,
|
||||
})
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
for _, notice := range notices {
|
||||
level, err := log.ParseLevel(notice.Level)
|
||||
if err != nil {
|
||||
level = log.InfoLevel
|
||||
}
|
||||
noticeLogger.Log(level, notice.Message)
|
||||
}
|
||||
}
|
||||
case <-time.After(time.Second * 1):
|
||||
log.Debugf("Timeout waiting for notices")
|
||||
}
|
||||
}
|
||||
|
||||
var noticesLoaded = make(chan []Notice)
|
||||
|
||||
func loadVersionNotices(version string) {
|
||||
go func() {
|
||||
noticesLoaded <- getVersionNotices(version)
|
||||
}()
|
||||
}
|
||||
|
||||
const NoticeURL = "https://api.nektosact.com/notices"
|
||||
|
||||
func getVersionNotices(version string) []Notice {
|
||||
if os.Getenv("ACT_DISABLE_VERSION_CHECK") == "1" {
|
||||
return nil
|
||||
}
|
||||
|
||||
noticeURL, err := url.Parse(NoticeURL)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil
|
||||
}
|
||||
query := noticeURL.Query()
|
||||
query.Add("os", runtime.GOOS)
|
||||
query.Add("arch", runtime.GOARCH)
|
||||
query.Add("version", version)
|
||||
|
||||
noticeURL.RawQuery = query.Encode()
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", noticeURL.String(), nil)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
etag := loadNoticesEtag()
|
||||
if etag != "" {
|
||||
log.Debugf("Conditional GET for notices etag=%s", etag)
|
||||
req.Header.Set("If-None-Match", etag)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
newEtag := resp.Header.Get("Etag")
|
||||
if newEtag != "" {
|
||||
log.Debugf("Saving notices etag=%s", newEtag)
|
||||
saveNoticesEtag(newEtag)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
notices := []Notice{}
|
||||
if resp.StatusCode == 304 {
|
||||
log.Debug("No new notices")
|
||||
return nil
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(¬ices); err != nil {
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return notices
|
||||
}
|
||||
|
||||
func loadNoticesEtag() string {
|
||||
p := etagPath()
|
||||
content, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to load etag from %s: %e", p, err)
|
||||
}
|
||||
return strings.TrimSuffix(string(content), "\n")
|
||||
}
|
||||
|
||||
func saveNoticesEtag(etag string) {
|
||||
p := etagPath()
|
||||
err := os.WriteFile(p, []byte(strings.TrimSuffix(etag, "\n")), 0o600)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to save etag to %s: %e", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
func etagPath() string {
|
||||
dir := filepath.Join(CacheHomeDir, "act")
|
||||
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return filepath.Join(dir, ".notices.etag")
|
||||
}
|
@@ -15,7 +15,7 @@ func (i *Input) newPlatforms() map[string]string {
|
||||
for _, p := range i.platforms {
|
||||
pParts := strings.Split(p, "=")
|
||||
if len(pParts) == 2 {
|
||||
platforms[pParts[0]] = pParts[1]
|
||||
platforms[strings.ToLower(pParts[0])] = pParts[1]
|
||||
}
|
||||
}
|
||||
return platforms
|
||||
|
390
cmd/root.go
390
cmd/root.go
@@ -12,13 +12,16 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/adrg/xdg"
|
||||
"github.com/andreaskoch/go-fswatch"
|
||||
docker_container "github.com/docker/docker/api/types/container"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
gitignore "github.com/sabhiram/go-gitignore"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/nektos/act/pkg/artifactcache"
|
||||
"github.com/nektos/act/pkg/artifacts"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
@@ -30,13 +33,14 @@ import (
|
||||
func Execute(ctx context.Context, version string) {
|
||||
input := new(Input)
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "act [event name to run] [flags]\n\nIf no event name passed, will default to \"on: push\"\nIf actions handles only one event it will be used as default instead of \"on: push\"",
|
||||
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: newRunCommand(ctx, input),
|
||||
PersistentPreRun: setupLogging,
|
||||
Version: version,
|
||||
SilenceUsage: true,
|
||||
Use: "act [event name to run] [flags]\n\nIf no event name passed, will default to \"on: push\"\nIf actions handles only one event it will be used as default instead of \"on: push\"",
|
||||
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: newRunCommand(ctx, input),
|
||||
PersistentPreRun: setup(input),
|
||||
PersistentPostRun: cleanup(input),
|
||||
Version: version,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
rootCmd.Flags().BoolP("watch", "w", false, "watch the contents of the local repo and run when files change")
|
||||
rootCmd.Flags().BoolP("list", "l", false, "list workflows")
|
||||
@@ -46,12 +50,14 @@ func Execute(ctx context.Context, version string) {
|
||||
|
||||
rootCmd.Flags().StringVar(&input.remoteName, "remote-name", "origin", "git remote name that will be used to retrieve url of git repo")
|
||||
rootCmd.Flags().StringArrayVarP(&input.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
|
||||
rootCmd.Flags().StringArrayVar(&input.vars, "var", []string{}, "variable to make available to actions with optional value (e.g. --var myvar=foo or --var myvar)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.inputs, "input", "", []string{}, "action input to make available to actions (e.g. --input myinput=foo)")
|
||||
rootCmd.Flags().StringArrayVarP(&input.platforms, "platform", "P", []string{}, "custom image to use per platform (e.g. -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04)")
|
||||
rootCmd.Flags().BoolVarP(&input.reuseContainers, "reuse", "r", false, "don't remove container(s) on successfully completed workflow(s) to maintain state between runs")
|
||||
rootCmd.Flags().BoolVarP(&input.bindWorkdir, "bind", "b", false, "bind working directory to container, rather than copy")
|
||||
rootCmd.Flags().BoolVarP(&input.forcePull, "pull", "p", false, "pull docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forcePull, "pull", "p", true, "pull docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.forceRebuild, "rebuild", "", true, "rebuild local action docker image(s) even if already present")
|
||||
rootCmd.Flags().BoolVarP(&input.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
|
||||
rootCmd.Flags().StringVarP(&input.eventPath, "eventpath", "e", "", "path to event JSON file")
|
||||
rootCmd.Flags().StringVar(&input.defaultBranch, "defaultbranch", "", "the name of the main branch")
|
||||
@@ -63,23 +69,37 @@ func Execute(ctx context.Context, version string) {
|
||||
rootCmd.Flags().BoolVar(&input.autoRemove, "rm", false, "automatically remove container(s)/volume(s) after a workflow(s) failure")
|
||||
rootCmd.Flags().StringArrayVarP(&input.replaceGheActionWithGithubCom, "replace-ghe-action-with-github-com", "", []string{}, "If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com =github/super-linter)")
|
||||
rootCmd.Flags().StringVar(&input.replaceGheActionTokenWithGithubCom, "replace-ghe-action-token-with-github-com", "", "If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token")
|
||||
rootCmd.Flags().StringArrayVarP(&input.matrix, "matrix", "", []string{}, "specify which matrix configuration to include (e.g. --matrix java:13")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.actor, "actor", "a", "nektos/act", "user that triggered the event")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.workflowsPath, "workflows", "W", "./.github/workflows/", "path to workflow file(s)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.workdir, "directory", "C", ".", "working directory")
|
||||
rootCmd.PersistentFlags().BoolP("verbose", "v", false, "verbose output")
|
||||
rootCmd.PersistentFlags().BoolVar(&input.jsonLogger, "json", false, "Output logs in json format")
|
||||
rootCmd.PersistentFlags().BoolVar(&input.logPrefixJobID, "log-prefix-job-id", false, "Output the job id within non-json logs instead of the entire name")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noOutput, "quiet", "q", false, "disable logging of output from steps")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.dryrun, "dryrun", "n", false, "dryrun mode")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.secretfile, "secret-file", "", ".secrets", "file with list of secrets to read from (e.g. --secret-file .secrets)")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.varfile, "var-file", "", ".vars", "file with list of vars to read from (e.g. --var-file .vars)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.inputfile, "input-file", "", ".input", "input file to read and use as action input")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "", "URI to Docker Engine socket (e.g.: unix://~/.docker/run/docker.sock or - to disable bind mounting the socket)")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.containerOptions, "container-options", "", "", "Custom docker container options for the job container without an options property in the job definition")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.githubInstance, "github-instance", "", "github.com", "GitHub instance to use. Don't use this if you are not using GitHub Enterprise Server.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPath, "artifact-server-path", "", "", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerAddr, "artifact-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the artifact server binds.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.noCacheServer, "no-cache-server", "", false, "Disable cache server")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerPath, "cache-server-path", "", filepath.Join(CacheHomeDir, "actcache"), "Defines the path where the cache server stores caches.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
|
||||
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this,will turn off force pull")
|
||||
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
|
||||
rootCmd.SetArgs(args())
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
@@ -87,25 +107,50 @@ func Execute(ctx context.Context, version string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
|
||||
func configLocations() []string {
|
||||
home, err := homedir.Dir()
|
||||
configFileName := ".actrc"
|
||||
|
||||
homePath := filepath.Join(UserHomeDir, configFileName)
|
||||
invocationPath := filepath.Join(".", configFileName)
|
||||
|
||||
// Though named xdg, adrg's lib support macOS and Windows config paths as well
|
||||
// It also takes cares of creating the parent folder so we don't need to bother later
|
||||
specPath, err := xdg.ConfigFile("act/actrc")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
specPath = homePath
|
||||
}
|
||||
|
||||
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
var actrcXdg string
|
||||
if xdg, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok && xdg != "" {
|
||||
actrcXdg = filepath.Join(xdg, ".actrc")
|
||||
} else {
|
||||
actrcXdg = filepath.Join(home, ".config", ".actrc")
|
||||
// This order should be enforced since the survey part relies on it
|
||||
return []string{specPath, homePath, invocationPath}
|
||||
}
|
||||
|
||||
var commonSocketPaths = []string{
|
||||
"/var/run/docker.sock",
|
||||
"/run/podman/podman.sock",
|
||||
"$HOME/.colima/docker.sock",
|
||||
"$XDG_RUNTIME_DIR/docker.sock",
|
||||
"$XDG_RUNTIME_DIR/podman/podman.sock",
|
||||
`\\.\pipe\docker_engine`,
|
||||
"$HOME/.docker/run/docker.sock",
|
||||
}
|
||||
|
||||
// returns socket path or false if not found any
|
||||
func socketLocation() (string, bool) {
|
||||
if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists {
|
||||
return dockerHost, true
|
||||
}
|
||||
|
||||
return []string{
|
||||
filepath.Join(home, ".actrc"),
|
||||
actrcXdg,
|
||||
filepath.Join(".", ".actrc"),
|
||||
for _, p := range commonSocketPaths {
|
||||
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
|
||||
if strings.HasPrefix(p, `\\.\`) {
|
||||
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true
|
||||
}
|
||||
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func args() []string {
|
||||
@@ -121,15 +166,6 @@ func args() []string {
|
||||
}
|
||||
|
||||
func bugReport(ctx context.Context, version string) error {
|
||||
var commonSocketPaths = []string{
|
||||
"/var/run/docker.sock",
|
||||
"/var/run/podman/podman.sock",
|
||||
"$HOME/.colima/docker.sock",
|
||||
"$XDG_RUNTIME_DIR/docker.sock",
|
||||
`\\.\pipe\docker_engine`,
|
||||
"$HOME/.docker/run/docker.sock",
|
||||
}
|
||||
|
||||
sprintf := func(key, val string) string {
|
||||
return fmt.Sprintf("%-24s%s\n", key, val)
|
||||
}
|
||||
@@ -140,19 +176,20 @@ func bugReport(ctx context.Context, version string) error {
|
||||
report += sprintf("NumCPU:", fmt.Sprint(runtime.NumCPU()))
|
||||
|
||||
var dockerHost string
|
||||
if dockerHost = os.Getenv("DOCKER_HOST"); dockerHost == "" {
|
||||
dockerHost = "DOCKER_HOST environment variable is unset/empty."
|
||||
var exists bool
|
||||
if dockerHost, exists = os.LookupEnv("DOCKER_HOST"); !exists {
|
||||
dockerHost = "DOCKER_HOST environment variable is not set"
|
||||
} else if dockerHost == "" {
|
||||
dockerHost = "DOCKER_HOST environment variable is empty."
|
||||
}
|
||||
|
||||
report += sprintf("Docker host:", dockerHost)
|
||||
report += fmt.Sprintln("Sockets found:")
|
||||
for _, p := range commonSocketPaths {
|
||||
if strings.HasPrefix(p, `$`) {
|
||||
v := strings.Split(p, `/`)[0]
|
||||
p = strings.Replace(p, v, os.Getenv(strings.TrimPrefix(v, `$`)), 1)
|
||||
}
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if _, err := os.Lstat(os.ExpandEnv(p)); err != nil {
|
||||
continue
|
||||
} else if _, err := os.Stat(os.ExpandEnv(p)); err != nil {
|
||||
report += fmt.Sprintf("\t%s(broken)\n", p)
|
||||
} else {
|
||||
report += fmt.Sprintf("\t%s\n", p)
|
||||
}
|
||||
@@ -231,7 +268,8 @@ func readArgsFile(file string, split bool) []string {
|
||||
}()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
arg := strings.TrimSpace(scanner.Text())
|
||||
arg := os.ExpandEnv(strings.TrimSpace(scanner.Text()))
|
||||
|
||||
if strings.HasPrefix(arg, "-") && split {
|
||||
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
|
||||
} else if !split {
|
||||
@@ -241,16 +279,55 @@ func readArgsFile(file string, split bool) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
func setupLogging(cmd *cobra.Command, _ []string) {
|
||||
verbose, _ := cmd.Flags().GetBool("verbose")
|
||||
if verbose {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
func setup(_ *Input) func(*cobra.Command, []string) {
|
||||
return func(cmd *cobra.Command, _ []string) {
|
||||
verbose, _ := cmd.Flags().GetBool("verbose")
|
||||
if verbose {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}
|
||||
loadVersionNotices(cmd.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanup(inputs *Input) func(*cobra.Command, []string) {
|
||||
return func(cmd *cobra.Command, _ []string) {
|
||||
displayNotices(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
func parseEnvs(env []string) map[string]string {
|
||||
envs := make(map[string]string, len(env))
|
||||
for _, envVar := range env {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
func readYamlFile(file string) (map[string]string, error) {
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := map[string]string{}
|
||||
if err = yaml.Unmarshal(content, &ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func readEnvs(path string, envs map[string]string) bool {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
env, err := godotenv.Read(path)
|
||||
var env map[string]string
|
||||
if ext := filepath.Ext(path); ext == ".yml" || ext == ".yaml" {
|
||||
env, err = readYamlFile(path)
|
||||
} else {
|
||||
env, err = godotenv.Read(path)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading from %s: %v", path, err)
|
||||
}
|
||||
@@ -262,6 +339,35 @@ func readEnvs(path string, envs map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func parseMatrix(matrix []string) map[string]map[string]bool {
|
||||
// each matrix entry should be of the form - string:string
|
||||
r := regexp.MustCompile(":")
|
||||
matrixes := make(map[string]map[string]bool)
|
||||
for _, m := range matrix {
|
||||
matrix := r.Split(m, 2)
|
||||
if len(matrix) < 2 {
|
||||
log.Fatalf("Invalid matrix format. Failed to parse %s", m)
|
||||
}
|
||||
if _, ok := matrixes[matrix[0]]; !ok {
|
||||
matrixes[matrix[0]] = make(map[string]bool)
|
||||
}
|
||||
matrixes[matrix[0]][matrix[1]] = true
|
||||
}
|
||||
return matrixes
|
||||
}
|
||||
|
||||
func isDockerHostURI(daemonPath string) bool {
|
||||
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
|
||||
scheme := daemonPath[:protoIndex]
|
||||
if strings.IndexFunc(scheme, func(r rune) bool {
|
||||
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
|
||||
}) == -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
@@ -273,33 +379,56 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
return bugReport(ctx, cmd.Version)
|
||||
}
|
||||
|
||||
// Prefer DOCKER_HOST, don't override it
|
||||
socketPath, hasDockerHost := os.LookupEnv("DOCKER_HOST")
|
||||
if !hasDockerHost {
|
||||
// a - in containerDaemonSocket means don't mount, preserve this value
|
||||
// otherwise if input.containerDaemonSocket is a filepath don't use it as socketPath
|
||||
skipMount := input.containerDaemonSocket == "-" || !isDockerHostURI(input.containerDaemonSocket)
|
||||
if input.containerDaemonSocket != "" && !skipMount {
|
||||
socketPath = input.containerDaemonSocket
|
||||
} else {
|
||||
socket, found := socketLocation()
|
||||
if !found {
|
||||
log.Errorln("daemon Docker Engine socket not found and containerDaemonSocket option was not set")
|
||||
} else {
|
||||
socketPath = socket
|
||||
}
|
||||
if !skipMount {
|
||||
input.containerDaemonSocket = socketPath
|
||||
}
|
||||
}
|
||||
os.Setenv("DOCKER_HOST", socketPath)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" && input.containerArchitecture == "" {
|
||||
l := log.New()
|
||||
l.SetFormatter(&log.TextFormatter{
|
||||
DisableQuote: true,
|
||||
DisableTimestamp: true,
|
||||
})
|
||||
l.Warnf(" \U000026A0 You are using Apple M1 chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
|
||||
l.Warnf(" \U000026A0 You are using Apple M-series chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
|
||||
}
|
||||
|
||||
log.Debugf("Loading environment from %s", input.Envfile())
|
||||
envs := make(map[string]string)
|
||||
if input.envs != nil {
|
||||
for _, envVar := range input.envs {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
envs := parseEnvs(input.envs)
|
||||
_ = readEnvs(input.Envfile(), envs)
|
||||
|
||||
log.Debugf("Loading action inputs from %s", input.Inputfile())
|
||||
inputs := parseEnvs(input.inputs)
|
||||
_ = readEnvs(input.Inputfile(), inputs)
|
||||
|
||||
log.Debugf("Loading secrets from %s", input.Secretfile())
|
||||
secrets := newSecrets(input.secrets)
|
||||
_ = readEnvs(input.Secretfile(), secrets)
|
||||
|
||||
log.Debugf("Loading vars from %s", input.Varfile())
|
||||
vars := newSecrets(input.vars)
|
||||
_ = readEnvs(input.Varfile(), vars)
|
||||
|
||||
matrixes := parseMatrix(input.matrix)
|
||||
log.Debugf("Evaluated matrix inclusions: %v", matrixes)
|
||||
|
||||
planner, err := model.NewWorkflowPlanner(input.WorkflowsPath(), input.noWorkflowRecurse)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -329,7 +458,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
var filterPlan *model.Plan
|
||||
|
||||
// Determine the event name to be filtered
|
||||
var filterEventName string = ""
|
||||
var filterEventName string
|
||||
|
||||
if len(args) > 0 {
|
||||
log.Debugf("Using first passed in arguments event for filtering: %s", args[0])
|
||||
@@ -341,23 +470,35 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
filterEventName = events[0]
|
||||
}
|
||||
|
||||
var plannerErr error
|
||||
if jobID != "" {
|
||||
log.Debugf("Preparing plan with a job: %s", jobID)
|
||||
filterPlan = planner.PlanJob(jobID)
|
||||
filterPlan, plannerErr = planner.PlanJob(jobID)
|
||||
} else if filterEventName != "" {
|
||||
log.Debugf("Preparing plan for a event: %s", filterEventName)
|
||||
filterPlan = planner.PlanEvent(filterEventName)
|
||||
filterPlan, plannerErr = planner.PlanEvent(filterEventName)
|
||||
} else {
|
||||
log.Debugf("Preparing plan with all jobs")
|
||||
filterPlan = planner.PlanAll()
|
||||
filterPlan, plannerErr = planner.PlanAll()
|
||||
}
|
||||
if filterPlan == nil && plannerErr != nil {
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
if list {
|
||||
return printList(filterPlan)
|
||||
err = printList(filterPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
if graph {
|
||||
return drawGraph(filterPlan)
|
||||
err = drawGraph(filterPlan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
// plan with triggered jobs
|
||||
@@ -385,10 +526,13 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
// build the plan for this run
|
||||
if jobID != "" {
|
||||
log.Debugf("Planning job: %s", jobID)
|
||||
plan = planner.PlanJob(jobID)
|
||||
plan, plannerErr = planner.PlanJob(jobID)
|
||||
} else {
|
||||
log.Debugf("Planning jobs for event: %s", eventName)
|
||||
plan = planner.PlanEvent(eventName)
|
||||
plan, plannerErr = planner.PlanEvent(eventName)
|
||||
}
|
||||
if plan == nil && plannerErr != nil {
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
// check to see if the main branch was defined
|
||||
@@ -408,12 +552,26 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
}
|
||||
}
|
||||
if !cfgFound && len(cfgLocations) > 0 {
|
||||
// The first config location refers to the global config folder one
|
||||
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
input.platforms = readArgsFile(cfgLocations[0], true)
|
||||
}
|
||||
}
|
||||
deprecationWarning := "--%s is deprecated and will be removed soon, please switch to cli: `--container-options \"%[2]s\"` or `.actrc`: `--container-options %[2]s`."
|
||||
if input.privileged {
|
||||
log.Warnf(deprecationWarning, "privileged", "--privileged")
|
||||
}
|
||||
if len(input.usernsMode) > 0 {
|
||||
log.Warnf(deprecationWarning, "userns", fmt.Sprintf("--userns=%s", input.usernsMode))
|
||||
}
|
||||
if len(input.containerCapAdd) > 0 {
|
||||
log.Warnf(deprecationWarning, "container-cap-add", fmt.Sprintf("--cap-add=%s", input.containerCapAdd))
|
||||
}
|
||||
if len(input.containerCapDrop) > 0 {
|
||||
log.Warnf(deprecationWarning, "container-cap-drop", fmt.Sprintf("--cap-drop=%s", input.containerCapDrop))
|
||||
}
|
||||
|
||||
// run the plan
|
||||
config := &runner.Config{
|
||||
@@ -421,15 +579,20 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
EventName: eventName,
|
||||
EventPath: input.EventPath(),
|
||||
DefaultBranch: defaultbranch,
|
||||
ForcePull: input.forcePull,
|
||||
ForcePull: !input.actionOfflineMode && input.forcePull,
|
||||
ForceRebuild: input.forceRebuild,
|
||||
ReuseContainers: input.reuseContainers,
|
||||
Workdir: input.Workdir(),
|
||||
ActionCacheDir: input.actionCachePath,
|
||||
ActionOfflineMode: input.actionOfflineMode,
|
||||
BindWorkdir: input.bindWorkdir,
|
||||
LogOutput: !input.noOutput,
|
||||
JSONLogger: input.jsonLogger,
|
||||
LogPrefixJobID: input.logPrefixJobID,
|
||||
Env: envs,
|
||||
Secrets: secrets,
|
||||
Vars: vars,
|
||||
Inputs: inputs,
|
||||
Token: secrets["GITHUB_TOKEN"],
|
||||
InsecureSecrets: input.insecureSecrets,
|
||||
Platforms: input.newPlatforms(),
|
||||
@@ -437,44 +600,73 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||
UsernsMode: input.usernsMode,
|
||||
ContainerArchitecture: input.containerArchitecture,
|
||||
ContainerDaemonSocket: input.containerDaemonSocket,
|
||||
ContainerOptions: input.containerOptions,
|
||||
UseGitIgnore: input.useGitIgnore,
|
||||
GitHubInstance: input.githubInstance,
|
||||
ContainerCapAdd: input.containerCapAdd,
|
||||
ContainerCapDrop: input.containerCapDrop,
|
||||
AutoRemove: input.autoRemove,
|
||||
ArtifactServerPath: input.artifactServerPath,
|
||||
ArtifactServerAddr: input.artifactServerAddr,
|
||||
ArtifactServerPort: input.artifactServerPort,
|
||||
NoSkipCheckout: input.noSkipCheckout,
|
||||
RemoteName: input.remoteName,
|
||||
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
|
||||
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
|
||||
Matrix: matrixes,
|
||||
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
|
||||
}
|
||||
if input.useNewActionCache {
|
||||
config.ActionCache = &runner.GoGitActionCache{
|
||||
Path: config.ActionCacheDir,
|
||||
}
|
||||
}
|
||||
r, err := runner.New(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerPort)
|
||||
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerAddr, input.artifactServerPort)
|
||||
|
||||
const cacheURLKey = "ACTIONS_CACHE_URL"
|
||||
var cacheHandler *artifactcache.Handler
|
||||
if !input.noCacheServer && envs[cacheURLKey] == "" {
|
||||
var err error
|
||||
cacheHandler, err = artifactcache.StartHandler(input.cacheServerPath, input.cacheServerAddr, input.cacheServerPort, common.Logger(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envs[cacheURLKey] = cacheHandler.ExternalURL() + "/"
|
||||
}
|
||||
|
||||
ctx = common.WithDryrun(ctx, input.dryrun)
|
||||
if watch, err := cmd.Flags().GetBool("watch"); err != nil {
|
||||
return err
|
||||
} else if watch {
|
||||
return watchAndRun(ctx, r.NewPlanExecutor(plan))
|
||||
err = watchAndRun(ctx, r.NewPlanExecutor(plan))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
|
||||
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
|
||||
cancel()
|
||||
_ = cacheHandler.Close()
|
||||
return nil
|
||||
})
|
||||
return executor(ctx)
|
||||
err = executor(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return plannerErr
|
||||
}
|
||||
}
|
||||
|
||||
func defaultImageSurvey(actrc string) error {
|
||||
var answer string
|
||||
confirmation := &survey.Select{
|
||||
Message: "Please choose the default image you want to use with act:\n\n - Large size image: +20GB Docker image, includes almost all tools used on GitHub Actions (IMPORTANT: currently only ubuntu-18.04 platform is available)\n - Medium size image: ~500MB, includes only necessary tools to bootstrap actions and aims to be compatible with all actions\n - Micro size image: <200MB, contains only NodeJS required to bootstrap actions, doesn't work with all actions\n\nDefault image and other options can be changed manually in ~/.actrc (please refer to https://github.com/nektos/act#configuration for additional information about file structure)",
|
||||
Message: "Please choose the default image you want to use with act:\n - Large size image: ca. 17GB download + 53.1GB storage, you will need 75GB of free disk space, snapshots of GitHub Hosted Runners without snap and pulled docker images\n - Medium size image: ~500MB, includes only necessary tools to bootstrap actions and aims to be compatible with most actions\n - Micro size image: <200MB, contains only NodeJS required to bootstrap actions, doesn't work with all actions\n\nDefault image and other options can be changed manually in ~/.actrc (please refer to https://github.com/nektos/act#configuration for additional information about file structure)",
|
||||
Help: "If you want to know why act asks you that, please go to https://github.com/nektos/act/issues/107",
|
||||
Default: "Medium",
|
||||
Options: []string{"Large", "Medium", "Micro"},
|
||||
@@ -488,11 +680,11 @@ func defaultImageSurvey(actrc string) error {
|
||||
var option string
|
||||
switch answer {
|
||||
case "Large":
|
||||
option = "-P ubuntu-latest=catthehacker/ubuntu:full-latest\n-P ubuntu-latest=catthehacker/ubuntu:full-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:full-18.04\n"
|
||||
option = "-P ubuntu-latest=catthehacker/ubuntu:full-latest\n-P ubuntu-22.04=catthehacker/ubuntu:full-22.04\n-P ubuntu-20.04=catthehacker/ubuntu:full-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:full-18.04\n"
|
||||
case "Medium":
|
||||
option = "-P ubuntu-latest=catthehacker/ubuntu:act-latest\n-P ubuntu-22.04=catthehacker/ubuntu:act-22.04\n-P ubuntu-20.04=catthehacker/ubuntu:act-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:act-18.04\n"
|
||||
case "Micro":
|
||||
option = "-P ubuntu-latest=node:16-buster-slim\n-P -P ubuntu-22.04=node:16-bullseye-slim\n ubuntu-20.04=node:16-buster-slim\n-P ubuntu-18.04=node:16-buster-slim\n"
|
||||
option = "-P ubuntu-latest=node:16-buster-slim\n-P ubuntu-22.04=node:16-bullseye-slim\n-P ubuntu-20.04=node:16-buster-slim\n-P ubuntu-18.04=node:16-buster-slim\n"
|
||||
}
|
||||
|
||||
f, err := os.Create(actrc)
|
||||
@@ -515,45 +707,47 @@ func defaultImageSurvey(actrc string) error {
|
||||
}
|
||||
|
||||
func watchAndRun(ctx context.Context, fn common.Executor) error {
|
||||
recurse := true
|
||||
checkIntervalInSeconds := 2
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ignore *gitignore.GitIgnore
|
||||
if _, err := os.Stat(filepath.Join(dir, ".gitignore")); !os.IsNotExist(err) {
|
||||
ignore, _ = gitignore.CompileIgnoreFile(filepath.Join(dir, ".gitignore"))
|
||||
} else {
|
||||
ignore = &gitignore.GitIgnore{}
|
||||
ignoreFile := filepath.Join(dir, ".gitignore")
|
||||
ignore := &gitignore.GitIgnore{}
|
||||
if info, err := os.Stat(ignoreFile); err == nil && !info.IsDir() {
|
||||
ignore, err = gitignore.CompileIgnoreFile(ignoreFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compile %q: %w", ignoreFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
folderWatcher := fswatch.NewFolderWatcher(
|
||||
dir,
|
||||
recurse,
|
||||
true,
|
||||
ignore.MatchesPath,
|
||||
checkIntervalInSeconds,
|
||||
2, // 2 seconds
|
||||
)
|
||||
|
||||
folderWatcher.Start()
|
||||
defer folderWatcher.Stop()
|
||||
|
||||
go func() {
|
||||
for folderWatcher.IsRunning() {
|
||||
if err = fn(ctx); err != nil {
|
||||
break
|
||||
}
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
for changes := range folderWatcher.ChangeDetails() {
|
||||
log.Debugf("%s", changes.String())
|
||||
if err = fn(ctx); err != nil {
|
||||
break
|
||||
}
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
// run once before watching
|
||||
if err := fn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for folderWatcher.IsRunning() {
|
||||
log.Debugf("Watching %s for changes", dir)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case changes := <-folderWatcher.ChangeDetails():
|
||||
log.Debugf("%s", changes.String())
|
||||
if err := fn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}()
|
||||
<-ctx.Done()
|
||||
folderWatcher.Stop()
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
117
go.mod
117
go.mod
@@ -1,85 +1,94 @@
|
||||
module github.com/nektos/act
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/adrg/xdg v0.4.0
|
||||
github.com/andreaskoch/go-fswatch v1.0.0
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/docker/cli v20.10.21+incompatible
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/docker/cli v24.0.7+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible // 24.0 branch
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/go-git/go-billy/v5 v5.3.1
|
||||
github.com/go-git/go-git/v5 v5.4.2
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/imdario/mergo v0.3.13
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/go-git/go-billy/v5 v5.5.0
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/buildkit v0.10.6
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||
github.com/opencontainers/selinux v1.10.2
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/moby/buildkit v0.12.5
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3
|
||||
github.com/opencontainers/selinux v1.11.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rhysd/actionlint v1.6.22
|
||||
github.com/rhysd/actionlint v1.6.26
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.1
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
||||
go.etcd.io/bbolt v1.3.8
|
||||
golang.org/x/term v0.16.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.4.0
|
||||
gotest.tools/v3 v3.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.3 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220404123522-616f957b79ad // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.6.6 // indirect
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/containerd/containerd v1.7.2 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emirpasic/gods v1.12.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/moby/sys/mount v0.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.12 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.3.4 // indirect
|
||||
github.com/robfig/cron v1.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 // indirect
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/go-git/go-git/v5 => github.com/ZauberNerd/go-git/v5 v5.4.3-0.20220315170230-29ec1bc1e5db
|
||||
|
4
main.go
4
main.go
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
@@ -9,7 +10,8 @@ import (
|
||||
"github.com/nektos/act/cmd"
|
||||
)
|
||||
|
||||
var version = "v0.2.27-dev" // Manually bump after tagging next release
|
||||
//go:embed VERSION
|
||||
var version string
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
8
pkg/artifactcache/doc.go
Normal file
8
pkg/artifactcache/doc.go
Normal file
@@ -0,0 +1,8 @@
|
||||
// Package artifactcache provides a cache handler for the runner.
|
||||
//
|
||||
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
||||
//
|
||||
// TODO: Authorization
|
||||
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
package artifactcache
|
536
pkg/artifactcache/handler.go
Normal file
536
pkg/artifactcache/handler.go
Normal file
@@ -0,0 +1,536 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/timshannon/bolthold"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
const (
|
||||
urlBase = "/_apis/artifactcache"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
dir string
|
||||
storage *Storage
|
||||
router *httprouter.Router
|
||||
listener net.Listener
|
||||
server *http.Server
|
||||
logger logrus.FieldLogger
|
||||
|
||||
gcing int32 // TODO: use atomic.Bool when we can use Go 1.19
|
||||
gcAt time.Time
|
||||
|
||||
outboundIP string
|
||||
}
|
||||
|
||||
func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) {
|
||||
h := &Handler{}
|
||||
|
||||
if logger == nil {
|
||||
discard := logrus.New()
|
||||
discard.Out = io.Discard
|
||||
logger = discard
|
||||
}
|
||||
logger = logger.WithField("module", "artifactcache")
|
||||
h.logger = logger
|
||||
|
||||
if dir == "" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir = filepath.Join(home, ".cache", "actcache")
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.dir = dir
|
||||
|
||||
storage, err := NewStorage(filepath.Join(dir, "cache"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.storage = storage
|
||||
|
||||
if outboundIP != "" {
|
||||
h.outboundIP = outboundIP
|
||||
} else if ip := common.GetOutboundIP(); ip == nil {
|
||||
return nil, fmt.Errorf("unable to determine outbound IP address")
|
||||
} else {
|
||||
h.outboundIP = ip.String()
|
||||
}
|
||||
|
||||
router := httprouter.New()
|
||||
router.GET(urlBase+"/cache", h.middleware(h.find))
|
||||
router.POST(urlBase+"/caches", h.middleware(h.reserve))
|
||||
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
|
||||
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
|
||||
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
|
||||
router.POST(urlBase+"/clean", h.middleware(h.clean))
|
||||
|
||||
h.router = router
|
||||
|
||||
h.gcCache()
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server := &http.Server{
|
||||
ReadHeaderTimeout: 2 * time.Second,
|
||||
Handler: router,
|
||||
}
|
||||
go func() {
|
||||
if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) {
|
||||
logger.Errorf("http serve: %v", err)
|
||||
}
|
||||
}()
|
||||
h.listener = listener
|
||||
h.server = server
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *Handler) ExternalURL() string {
|
||||
// TODO: make the external url configurable if necessary
|
||||
return fmt.Sprintf("http://%s:%d",
|
||||
h.outboundIP,
|
||||
h.listener.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
|
||||
func (h *Handler) Close() error {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
var retErr error
|
||||
if h.server != nil {
|
||||
err := h.server.Close()
|
||||
if err != nil {
|
||||
retErr = err
|
||||
}
|
||||
h.server = nil
|
||||
}
|
||||
if h.listener != nil {
|
||||
err := h.listener.Close()
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
retErr = err
|
||||
}
|
||||
h.listener = nil
|
||||
}
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (h *Handler) openDB() (*bolthold.Store, error) {
|
||||
return bolthold.Open(filepath.Join(h.dir, "bolt.db"), 0o644, &bolthold.Options{
|
||||
Encoder: json.Marshal,
|
||||
Decoder: json.Unmarshal,
|
||||
Options: &bbolt.Options{
|
||||
Timeout: 5 * time.Second,
|
||||
NoGrowSync: bbolt.DefaultOptions.NoGrowSync,
|
||||
FreelistType: bbolt.DefaultOptions.FreelistType,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/cache
|
||||
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
||||
// cache keys are case insensitive
|
||||
for i, key := range keys {
|
||||
keys[i] = strings.ToLower(key)
|
||||
}
|
||||
version := r.URL.Query().Get("version")
|
||||
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
cache, err := h.findCache(db, keys, version)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
if cache == nil {
|
||||
h.responseJSON(w, r, 204)
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := h.storage.Exist(cache.ID); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
} else if !ok {
|
||||
_ = db.Delete(cache.ID, cache)
|
||||
h.responseJSON(w, r, 204)
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 200, map[string]any{
|
||||
"result": "hit",
|
||||
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
|
||||
"cacheKey": cache.Key,
|
||||
})
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches
|
||||
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
api := &Request{}
|
||||
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
// cache keys are case insensitive
|
||||
api.Key = strings.ToLower(api.Key)
|
||||
|
||||
cache := api.ToCache()
|
||||
cache.FillKeyVersionHash()
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("already exist"))
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
cache.CreatedAt = now
|
||||
cache.UsedAt = now
|
||||
if err := db.Insert(bolthold.NextSequence(), cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
// write back id to db
|
||||
if err := db.Update(cache.ID, cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 200, map[string]any{
|
||||
"cacheId": cache.ID,
|
||||
})
|
||||
}
|
||||
|
||||
// PATCH /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{}
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
if err := db.Get(id, cache); err != nil {
|
||||
if errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
db.Close()
|
||||
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
}
|
||||
h.useCache(id)
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{}
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
if err := db.Get(id, cache); err != nil {
|
||||
if errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
|
||||
db.Close()
|
||||
|
||||
size, err := h.storage.Commit(cache.ID, cache.Size)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
|
||||
cache.Size = size
|
||||
|
||||
db, err = h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
cache.Complete = true
|
||||
if err := db.Update(cache.ID, cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/artifacts/:id
|
||||
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
h.useCache(id)
|
||||
h.storage.Serve(w, r, uint64(id))
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/clean
|
||||
func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
// TODO: don't support force deleting cache entries
|
||||
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
|
||||
h.responseJSON(w, r, 200)
|
||||
}
|
||||
|
||||
func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
h.logger.Debugf("%s %s", r.Method, r.RequestURI)
|
||||
handler(w, r, params)
|
||||
go h.gcCache()
|
||||
}
|
||||
}
|
||||
|
||||
// if not found, return (nil, nil) instead of an error.
|
||||
func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
key := keys[0] // the first key is for exact match.
|
||||
|
||||
cache := &Cache{
|
||||
Key: key,
|
||||
Version: version,
|
||||
}
|
||||
cache.FillKeyVersionHash()
|
||||
|
||||
if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
} else if cache.Complete {
|
||||
return cache, nil
|
||||
}
|
||||
stop := fmt.Errorf("stop")
|
||||
|
||||
for _, prefix := range keys[1:] {
|
||||
found := false
|
||||
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
|
||||
re, err := regexp.Compile(prefixPattern)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
|
||||
if !strings.HasPrefix(v.Key, prefix) {
|
||||
return stop
|
||||
}
|
||||
if v.Complete {
|
||||
cache = v
|
||||
found = true
|
||||
return stop
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
if !errors.Is(err, stop) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return cache, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (h *Handler) useCache(id int64) {
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
cache := &Cache{}
|
||||
if err := db.Get(id, cache); err != nil {
|
||||
return
|
||||
}
|
||||
cache.UsedAt = time.Now().Unix()
|
||||
_ = db.Update(cache.ID, cache)
|
||||
}
|
||||
|
||||
func (h *Handler) gcCache() {
|
||||
if atomic.LoadInt32(&h.gcing) != 0 {
|
||||
return
|
||||
}
|
||||
if !atomic.CompareAndSwapInt32(&h.gcing, 0, 1) {
|
||||
return
|
||||
}
|
||||
defer atomic.StoreInt32(&h.gcing, 0)
|
||||
|
||||
if time.Since(h.gcAt) < time.Hour {
|
||||
h.logger.Debugf("skip gc: %v", h.gcAt.String())
|
||||
return
|
||||
}
|
||||
h.gcAt = time.Now()
|
||||
h.logger.Debugf("gc: %v", h.gcAt.String())
|
||||
|
||||
const (
|
||||
keepUsed = 30 * 24 * time.Hour
|
||||
keepUnused = 7 * 24 * time.Hour
|
||||
keepTemp = 5 * time.Minute
|
||||
)
|
||||
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var caches []*Cache
|
||||
if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
if cache.Complete {
|
||||
continue
|
||||
}
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
var data []byte
|
||||
if len(v) == 0 || v[0] == nil {
|
||||
data, _ = json.Marshal(struct{}{})
|
||||
} else if err, ok := v[0].(error); ok {
|
||||
h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
|
||||
data, _ = json.Marshal(map[string]any{
|
||||
"error": err.Error(),
|
||||
})
|
||||
} else {
|
||||
data, _ = json.Marshal(v[0])
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
func parseContentRange(s string) (int64, int64, error) {
|
||||
// support the format like "bytes 11-22/*" only
|
||||
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
|
||||
s1, s2, _ := strings.Cut(s, "-")
|
||||
|
||||
start, err := strconv.ParseInt(s1, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
stop, err := strconv.ParseInt(s2, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
return start, stop, nil
|
||||
}
|
471
pkg/artifactcache/handler_test.go
Normal file
471
pkg/artifactcache/handler_test.go
Normal file
@@ -0,0 +1,471 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||
handler, err := StartHandler(dir, "", 0, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase)
|
||||
|
||||
defer func() {
|
||||
t.Run("inpect db", func(t *testing.T) {
|
||||
db, err := handler.openDB()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
|
||||
t.Logf("%s: %s", k, v)
|
||||
return nil
|
||||
})
|
||||
}))
|
||||
})
|
||||
t.Run("close", func(t *testing.T) {
|
||||
require.NoError(t, handler.Close())
|
||||
assert.Nil(t, handler.server)
|
||||
assert.Nil(t, handler.listener)
|
||||
_, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Run("get not exist", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 204, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("reserve and upload", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, key, version, content)
|
||||
})
|
||||
|
||||
t.Run("clean", func(t *testing.T) {
|
||||
resp, err := http.Post(fmt.Sprintf("%s/clean", base), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("reserve with bad request", func(t *testing.T) {
|
||||
body := []byte(`invalid json`)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("duplicate reserve", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
}
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upload with bad id", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/invalid_id", base), bytes.NewReader(nil))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("upload without reserve", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, 1000), bytes.NewReader(nil))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("upload with complete", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("upload with invalid range", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes xx-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit with bad id", func(t *testing.T) {
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/invalid_id", base), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit with not exist id", func(t *testing.T) {
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("duplicate commit", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("commit early", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var id uint64
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content[:50]))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-59/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 500, resp.StatusCode)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get with bad id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/invalid_id", base))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 400, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with not exist id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 404, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with not exist id", func(t *testing.T) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 404, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("get with multiple keys", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
keys := [3]string{
|
||||
key + "_a",
|
||||
key + "_a_b",
|
||||
key + "_a_b_c",
|
||||
}
|
||||
contents := [3][]byte{
|
||||
make([]byte, 100),
|
||||
make([]byte, 200),
|
||||
make([]byte, 300),
|
||||
}
|
||||
for i := range contents {
|
||||
_, err := rand.Read(contents[i])
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, keys[i], version, contents[i])
|
||||
}
|
||||
|
||||
reqKeys := strings.Join([]string{
|
||||
key + "_a_b_x",
|
||||
key + "_a_b",
|
||||
key + "_a",
|
||||
}, ",")
|
||||
var archiveLocation string
|
||||
{
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, keys[1], got.CacheKey)
|
||||
archiveLocation = got.ArchiveLocation
|
||||
}
|
||||
{
|
||||
resp, err := http.Get(archiveLocation) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[1], got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("case insensitive", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
content := make([]byte, 100)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, key+"_ABC", version, content)
|
||||
|
||||
{
|
||||
reqKey := key + "_aBc"
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, key+"_abc", got.CacheKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) {
|
||||
var id uint64
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
Version: version,
|
||||
Size: int64(len(content)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
id = got.CacheID
|
||||
}
|
||||
{
|
||||
req, err := http.NewRequest(http.MethodPatch,
|
||||
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Range", "bytes 0-99/*")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
{
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
var archiveLocation string
|
||||
{
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, strings.ToLower(key), got.CacheKey)
|
||||
archiveLocation = got.ArchiveLocation
|
||||
}
|
||||
{
|
||||
resp, err := http.Get(archiveLocation) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, content, got)
|
||||
}
|
||||
}
|
44
pkg/artifactcache/model.go
Normal file
44
pkg/artifactcache/model.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Key string `json:"key" `
|
||||
Version string `json:"version"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
}
|
||||
|
||||
func (c *Request) ToCache() *Cache {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
ret := &Cache{
|
||||
Key: c.Key,
|
||||
Version: c.Version,
|
||||
Size: c.Size,
|
||||
}
|
||||
if c.Size == 0 {
|
||||
// So the request comes from old versions of actions, like `actions/cache@v2`.
|
||||
// It doesn't send cache size. Set it to -1 to indicate that.
|
||||
ret.Size = -1
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||
Key string `json:"key" boltholdIndex:"Key"`
|
||||
Version string `json:"version" boltholdIndex:"Version"`
|
||||
KeyVersionHash string `json:"keyVersionHash" boltholdUnique:"KeyVersionHash"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
Complete bool `json:"complete"`
|
||||
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
|
||||
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
|
||||
}
|
||||
|
||||
func (c *Cache) FillKeyVersionHash() {
|
||||
c.KeyVersionHash = fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%s:%s", c.Key, c.Version))))
|
||||
}
|
130
pkg/artifactcache/storage.go
Normal file
130
pkg/artifactcache/storage.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
rootDir string
|
||||
}
|
||||
|
||||
func NewStorage(rootDir string) (*Storage, error) {
|
||||
if err := os.MkdirAll(rootDir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Storage{
|
||||
rootDir: rootDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Exist(id uint64) (bool, error) {
|
||||
name := s.filename(id)
|
||||
if _, err := os.Stat(name); os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error {
|
||||
name := s.tempName(id, offset)
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Storage) Commit(id uint64, size int64) (int64, error) {
|
||||
defer func() {
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}()
|
||||
|
||||
name := s.filename(id)
|
||||
tempNames, err := s.tempNames(id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var written int64
|
||||
for _, v := range tempNames {
|
||||
f, err := os.Open(v)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := io.Copy(file, f)
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
written += n
|
||||
}
|
||||
|
||||
// If size is less than 0, it means the size is unknown.
|
||||
// We can't check the size of the file, just skip the check.
|
||||
// It happens when the request comes from old versions of actions, like `actions/cache@v2`.
|
||||
if size >= 0 && written != size {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(name)
|
||||
return 0, fmt.Errorf("broken file: %v != %v", written, size)
|
||||
}
|
||||
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) {
|
||||
name := s.filename(id)
|
||||
http.ServeFile(w, r, name)
|
||||
}
|
||||
|
||||
func (s *Storage) Remove(id uint64) {
|
||||
_ = os.Remove(s.filename(id))
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}
|
||||
|
||||
func (s *Storage) filename(id uint64) string {
|
||||
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempDir(id uint64) string {
|
||||
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempName(id uint64, offset int64) string {
|
||||
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
|
||||
}
|
||||
|
||||
func (s *Storage) tempNames(id uint64) ([]string, error) {
|
||||
dir := s.tempDir(id)
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var names []string
|
||||
for _, v := range files {
|
||||
if !v.IsDir() {
|
||||
names = append(names, filepath.Join(dir, v.Name()))
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
}
|
30
pkg/artifactcache/testdata/example/example.yaml
vendored
Normal file
30
pkg/artifactcache/testdata/example/example.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copied from https://github.com/actions/cache#example-cache-workflow
|
||||
name: Caching Primes
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: env
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Cache Primes
|
||||
id: cache-primes
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: prime-numbers
|
||||
key: ${{ runner.os }}-primes-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-primes
|
||||
${{ runner.os }}
|
||||
|
||||
- name: Generate Prime Numbers
|
||||
if: steps.cache-primes.outputs.cache-hit != 'true'
|
||||
run: cat /proc/sys/kernel/random/uuid > prime-numbers
|
||||
|
||||
- name: Use Prime Numbers
|
||||
run: cat prime-numbers
|
@@ -9,12 +9,12 @@ import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
@@ -46,44 +46,53 @@ type ResponseMessage struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type MkdirFS interface {
|
||||
fs.FS
|
||||
MkdirAll(path string, perm fs.FileMode) error
|
||||
Open(name string) (fs.File, error)
|
||||
OpenAtEnd(name string) (fs.File, error)
|
||||
type WritableFile interface {
|
||||
io.WriteCloser
|
||||
}
|
||||
|
||||
type MkdirFsImpl struct {
|
||||
dir string
|
||||
fs.FS
|
||||
type WriteFS interface {
|
||||
OpenWritable(name string) (WritableFile, error)
|
||||
OpenAppendable(name string) (WritableFile, error)
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
||||
return os.MkdirAll(fsys.dir+"/"+path, perm)
|
||||
type readWriteFSImpl struct {
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) Open(name string) (fs.File, error) {
|
||||
return os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
||||
func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
|
||||
file, err := os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR, 0644)
|
||||
func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
|
||||
}
|
||||
|
||||
func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o644)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = file.Seek(0, os.SEEK_END)
|
||||
_, err = file.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
var gzipExtension = ".gz__"
|
||||
|
||||
func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
func safeResolve(baseDir string, relPath string) string {
|
||||
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
|
||||
}
|
||||
|
||||
func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
|
||||
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
runID := params.ByName("runId")
|
||||
|
||||
@@ -108,19 +117,15 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
itemPath += gzipExtension
|
||||
}
|
||||
|
||||
filePath := fmt.Sprintf("%s/%s", runID, itemPath)
|
||||
safeRunPath := safeResolve(baseDir, runID)
|
||||
safePath := safeResolve(safeRunPath, itemPath)
|
||||
|
||||
err := fsys.MkdirAll(path.Dir(filePath), os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
file, err := func() (fs.File, error) {
|
||||
file, err := func() (WritableFile, error) {
|
||||
contentRange := req.Header.Get("Content-Range")
|
||||
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
|
||||
return fsys.OpenAtEnd(filePath)
|
||||
return fsys.OpenAppendable(safePath)
|
||||
}
|
||||
return fsys.Open(filePath)
|
||||
return fsys.OpenWritable(safePath)
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
@@ -170,11 +175,13 @@ func uploads(router *httprouter.Router, fsys MkdirFS) {
|
||||
})
|
||||
}
|
||||
|
||||
func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
|
||||
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
runID := params.ByName("runId")
|
||||
|
||||
entries, err := fs.ReadDir(fsys, runID)
|
||||
safePath := safeResolve(baseDir, runID)
|
||||
|
||||
entries, err := fs.ReadDir(fsys, safePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -204,21 +211,25 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
container := params.ByName("container")
|
||||
itemPath := req.URL.Query().Get("itemPath")
|
||||
dirPath := fmt.Sprintf("%s/%s", container, itemPath)
|
||||
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
|
||||
|
||||
var files []ContainerItem
|
||||
err := fs.WalkDir(fsys, dirPath, func(path string, entry fs.DirEntry, err error) error {
|
||||
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, err error) error {
|
||||
if !entry.IsDir() {
|
||||
rel, err := filepath.Rel(dirPath, path)
|
||||
rel, err := filepath.Rel(safePath, path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// if it was upload as gzip
|
||||
rel = strings.TrimSuffix(rel, gzipExtension)
|
||||
path := filepath.Join(itemPath, rel)
|
||||
|
||||
rel = filepath.ToSlash(rel)
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
files = append(files, ContainerItem{
|
||||
Path: fmt.Sprintf("%s/%s", itemPath, rel),
|
||||
Path: path,
|
||||
ItemType: "file",
|
||||
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
|
||||
})
|
||||
@@ -245,10 +256,12 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
router.GET("/artifact/*path", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
path := params.ByName("path")[1:]
|
||||
|
||||
file, err := fsys.Open(path)
|
||||
safePath := safeResolve(baseDir, path)
|
||||
|
||||
file, err := fsys.Open(safePath)
|
||||
if err != nil {
|
||||
// try gzip file
|
||||
file, err = fsys.Open(path + gzipExtension)
|
||||
file, err = fsys.Open(safePath + gzipExtension)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -262,7 +275,7 @@ func downloads(router *httprouter.Router, fsys fs.FS) {
|
||||
})
|
||||
}
|
||||
|
||||
func Serve(ctx context.Context, artifactPath string, port string) context.CancelFunc {
|
||||
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
|
||||
serverContext, cancel := context.WithCancel(ctx)
|
||||
logger := common.Logger(serverContext)
|
||||
|
||||
@@ -273,20 +286,19 @@ func Serve(ctx context.Context, artifactPath string, port string) context.Cancel
|
||||
router := httprouter.New()
|
||||
|
||||
logger.Debugf("Artifacts base path '%s'", artifactPath)
|
||||
fs := os.DirFS(artifactPath)
|
||||
uploads(router, MkdirFsImpl{artifactPath, fs})
|
||||
downloads(router, fs)
|
||||
ip := common.GetOutboundIP().String()
|
||||
fsys := readWriteFSImpl{}
|
||||
uploads(router, artifactPath, fsys)
|
||||
downloads(router, artifactPath, fsys)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%s", ip, port),
|
||||
Addr: fmt.Sprintf("%s:%s", addr, port),
|
||||
ReadHeaderTimeout: 2 * time.Second,
|
||||
Handler: router,
|
||||
}
|
||||
|
||||
// run server
|
||||
go func() {
|
||||
logger.Infof("Start server on http://%s:%s", ip, port)
|
||||
logger.Infof("Start server on http://%s:%s", addr, port)
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -15,50 +14,50 @@ import (
|
||||
"testing/fstest"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/runner"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/runner"
|
||||
)
|
||||
|
||||
type MapFsImpl struct {
|
||||
fstest.MapFS
|
||||
type writableMapFile struct {
|
||||
fstest.MapFile
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) MkdirAll(path string, perm fs.FileMode) error {
|
||||
// mocked no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
type WritableFile struct {
|
||||
fs.File
|
||||
fsys fstest.MapFS
|
||||
path string
|
||||
}
|
||||
|
||||
func (file WritableFile) Write(data []byte) (int, error) {
|
||||
file.fsys[file.path].Data = data
|
||||
func (f *writableMapFile) Write(data []byte) (int, error) {
|
||||
f.Data = data
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) Open(path string) (fs.File, error) {
|
||||
var file = fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
}
|
||||
fsys.MapFS[path] = &file
|
||||
|
||||
result, err := fsys.MapFS.Open(path)
|
||||
return WritableFile{result, fsys.MapFS, path}, err
|
||||
func (f *writableMapFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsys MapFsImpl) OpenAtEnd(path string) (fs.File, error) {
|
||||
var file = fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
}
|
||||
fsys.MapFS[path] = &file
|
||||
type writeMapFS struct {
|
||||
fstest.MapFS
|
||||
}
|
||||
|
||||
result, err := fsys.MapFS.Open(path)
|
||||
return WritableFile{result, fsys.MapFS, path}, err
|
||||
func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) {
|
||||
var file = &writableMapFile{
|
||||
MapFile: fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
},
|
||||
}
|
||||
fsys.MapFS[name] = &file.MapFile
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) {
|
||||
var file = &writableMapFile{
|
||||
MapFile: fstest.MapFile{
|
||||
Data: []byte("content2"),
|
||||
},
|
||||
}
|
||||
fsys.MapFS[name] = &file.MapFile
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||
@@ -67,7 +66,7 @@ func TestNewArtifactUploadPrepare(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -93,7 +92,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -111,7 +110,7 @@ func TestArtifactUploadBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal("success", response.Message)
|
||||
assert.Equal("content", string(memfs["1/some/file"].Data))
|
||||
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||
}
|
||||
|
||||
func TestFinalizeArtifactUpload(t *testing.T) {
|
||||
@@ -120,7 +119,7 @@ func TestFinalizeArtifactUpload(t *testing.T) {
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, MapFsImpl{memfs})
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -144,13 +143,13 @@ func TestListArtifacts(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/file.txt": {
|
||||
"artifact/server/path/1/file.txt": {
|
||||
Data: []byte(""),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -176,13 +175,13 @@ func TestListArtifactContainer(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/some/file": {
|
||||
"artifact/server/path/1/some/file": {
|
||||
Data: []byte(""),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -200,7 +199,7 @@ func TestListArtifactContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal(1, len(response.Value))
|
||||
assert.Equal("some/file/.", response.Value[0].Path)
|
||||
assert.Equal("some/file", response.Value[0].Path)
|
||||
assert.Equal("file", response.Value[0].ItemType)
|
||||
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
|
||||
}
|
||||
@@ -209,13 +208,13 @@ func TestDownloadArtifactFile(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"1/some/file": {
|
||||
"artifact/server/path/1/some/file": {
|
||||
Data: []byte("content"),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, memfs)
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -240,8 +239,11 @@ type TestJobFileInfo struct {
|
||||
containerArchitecture string
|
||||
}
|
||||
|
||||
var aritfactsPath = path.Join(os.TempDir(), "test-artifacts")
|
||||
var artifactsPort = "12345"
|
||||
var (
|
||||
artifactsPath = path.Join(os.TempDir(), "test-artifacts")
|
||||
artifactsAddr = "127.0.0.1"
|
||||
artifactsPort = "12345"
|
||||
)
|
||||
|
||||
func TestArtifactFlow(t *testing.T) {
|
||||
if testing.Short() {
|
||||
@@ -250,15 +252,16 @@ func TestArtifactFlow(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cancel := Serve(ctx, aritfactsPath, artifactsPort)
|
||||
cancel := Serve(ctx, artifactsPath, artifactsAddr, artifactsPort)
|
||||
defer cancel()
|
||||
|
||||
platforms := map[string]string{
|
||||
"ubuntu-latest": "node:16-buster-slim",
|
||||
"ubuntu-latest": "node:16-buster", // Don't use node:16-buster-slim because it doesn't have curl command, which is used in the tests
|
||||
}
|
||||
|
||||
tables := []TestJobFileInfo{
|
||||
{"testdata", "upload-and-download", "push", "", platforms, ""},
|
||||
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
|
||||
}
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
@@ -271,7 +274,7 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
t.Run(tjfi.workflowPath, func(t *testing.T) {
|
||||
fmt.Printf("::group::%s\n", tjfi.workflowPath)
|
||||
|
||||
if err := os.RemoveAll(aritfactsPath); err != nil {
|
||||
if err := os.RemoveAll(artifactsPath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -286,7 +289,8 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
ReuseContainers: false,
|
||||
ContainerArchitecture: tjfi.containerArchitecture,
|
||||
GitHubInstance: "github.com",
|
||||
ArtifactServerPath: aritfactsPath,
|
||||
ArtifactServerPath: artifactsPath,
|
||||
ArtifactServerAddr: artifactsAddr,
|
||||
ArtifactServerPort: artifactsPort,
|
||||
}
|
||||
|
||||
@@ -296,15 +300,96 @@ func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
|
||||
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true)
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
|
||||
plan := planner.PlanEvent(tjfi.eventName)
|
||||
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
if tjfi.errorMessage == "" {
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
plan, err := planner.PlanEvent(tjfi.eventName)
|
||||
if err == nil {
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
if tjfi.errorMessage == "" {
|
||||
assert.Nil(t, err, fullWorkflowPath)
|
||||
} else {
|
||||
assert.Error(t, err, tjfi.errorMessage)
|
||||
}
|
||||
} else {
|
||||
assert.Error(t, err, tjfi.errorMessage)
|
||||
assert.Nil(t, plan)
|
||||
}
|
||||
|
||||
fmt.Println("::endgroup::")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMkdirFsImplSafeResolve(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
baseDir := "/foo/bar"
|
||||
|
||||
tests := map[string]struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
"simple": {input: "baz", want: "/foo/bar/baz"},
|
||||
"nested": {input: "baz/blue", want: "/foo/bar/baz/blue"},
|
||||
"dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"},
|
||||
"leading dots": {input: "../../parent", want: "/foo/bar/parent"},
|
||||
"root path": {input: "/root", want: "/foo/bar/root"},
|
||||
"root": {input: "/", want: "/foo/bar"},
|
||||
"empty": {input: "", want: "/foo/bar"},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownloadArtifactFileUnsafePath(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
|
||||
"artifact/server/path/some/file": {
|
||||
Data: []byte("content"),
|
||||
},
|
||||
})
|
||||
|
||||
router := httprouter.New()
|
||||
downloads(router, "artifact/server/path", memfs)
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
|
||||
}
|
||||
|
||||
data := rr.Body.Bytes()
|
||||
|
||||
assert.Equal("content", string(data))
|
||||
}
|
||||
|
||||
func TestArtifactUploadBlobUnsafePath(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
|
||||
|
||||
router := httprouter.New()
|
||||
uploads(router, "artifact/server/path", writeMapFS{memfs})
|
||||
|
||||
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content"))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
assert.Fail("Wrong status")
|
||||
}
|
||||
|
||||
response := ResponseMessage{}
|
||||
err := json.Unmarshal(rr.Body.Bytes(), &response)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
assert.Equal("success", response.Message)
|
||||
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
|
||||
}
|
||||
|
39
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
39
pkg/artifacts/testdata/GHSL-2023-004/artifacts.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
name: "GHSL-2023-0004"
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
test-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "hello world" > test.txt
|
||||
- name: curl upload
|
||||
run: curl --silent --show-error --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: my-artifact
|
||||
path: test-artifacts
|
||||
- name: 'Verify Artifact #1'
|
||||
run: |
|
||||
file="test-artifacts/secret.txt"
|
||||
if [ ! -f $file ] ; then
|
||||
echo "Expected file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat $file)" != "hello world" ] ; then
|
||||
echo "File contents of downloaded artifact are incorrect"
|
||||
exit 1
|
||||
fi
|
||||
- name: Verify download should work by clean extra dots
|
||||
run: curl --silent --show-error --fail --path-as-is -o out.txt ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt
|
||||
- name: 'Verify download content'
|
||||
run: |
|
||||
file="out.txt"
|
||||
if [ ! -f $file ] ; then
|
||||
echo "Expected file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat $file)" != "hello world" ] ; then
|
||||
echo "File contents of downloaded artifact are incorrect"
|
||||
exit 1
|
||||
fi
|
@@ -3,6 +3,8 @@ package common
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Warning that implements `error` but safe to ignore
|
||||
@@ -94,6 +96,11 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor {
|
||||
work := make(chan Executor, len(executors))
|
||||
errs := make(chan error, len(executors))
|
||||
|
||||
if 1 > parallel {
|
||||
log.Infof("Parallel tasks (%d) below minimum, setting to 1", parallel)
|
||||
parallel = 1
|
||||
}
|
||||
|
||||
for i := 0; i < parallel; i++ {
|
||||
go func(work <-chan Executor, errs chan<- error) {
|
||||
for executor := range work {
|
||||
|
@@ -100,6 +100,17 @@ func TestNewParallelExecutor(t *testing.T) {
|
||||
assert.Equal(3, count, "should run all 3 executors")
|
||||
assert.Equal(2, maxCount, "should run at most 2 executors in parallel")
|
||||
assert.Nil(err)
|
||||
|
||||
// Reset to test running the executor with 0 parallelism
|
||||
count = 0
|
||||
activeCount = 0
|
||||
maxCount = 0
|
||||
|
||||
errSingle := NewParallelExecutor(0, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx)
|
||||
|
||||
assert.Equal(3, count, "should run all 3 executors")
|
||||
assert.Equal(1, maxCount, "should run at most 1 executors in parallel")
|
||||
assert.Nil(errSingle)
|
||||
}
|
||||
|
||||
func TestNewParallelExecutorFailed(t *testing.T) {
|
||||
|
@@ -7,20 +7,19 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
"github.com/go-ini/ini"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,41 +54,40 @@ func (e *Error) Commit() string {
|
||||
// FindGitRevision get the current git revision
|
||||
func FindGitRevision(ctx context.Context, file string) (shortSha string, sha string, err error) {
|
||||
logger := common.Logger(ctx)
|
||||
gitDir, err := findGitDirectory(file)
|
||||
|
||||
gitDir, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("path", file, "not located inside a git repository")
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
head, err := gitDir.Reference(plumbing.HEAD, true)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
bts, err := os.ReadFile(filepath.Join(gitDir, "HEAD"))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
if head.Hash().IsZero() {
|
||||
return "", "", fmt.Errorf("HEAD sha1 could not be resolved")
|
||||
}
|
||||
|
||||
var ref = strings.TrimSpace(strings.TrimPrefix(string(bts), "ref:"))
|
||||
var refBuf []byte
|
||||
if strings.HasPrefix(ref, "refs/") {
|
||||
// load commitid ref
|
||||
refBuf, err = os.ReadFile(filepath.Join(gitDir, ref))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
refBuf = []byte(ref)
|
||||
}
|
||||
hash := head.Hash().String()
|
||||
|
||||
logger.Debugf("Found revision: %s", refBuf)
|
||||
return string(refBuf[:7]), strings.TrimSpace(string(refBuf)), nil
|
||||
logger.Debugf("Found revision: %s", hash)
|
||||
return hash[:7], strings.TrimSpace(hash), nil
|
||||
}
|
||||
|
||||
// FindGitRef get the current git ref
|
||||
func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
logger := common.Logger(ctx)
|
||||
gitDir, err := findGitDirectory(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
logger.Debugf("Loading revision from git directory '%s'", gitDir)
|
||||
|
||||
logger.Debugf("Loading revision from git directory")
|
||||
_, ref, err := FindGitRevision(ctx, file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -100,28 +98,58 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
// Prefer the git library to iterate over the references and find a matching tag or branch.
|
||||
var refTag = ""
|
||||
var refBranch = ""
|
||||
r, err := git.PlainOpen(filepath.Join(gitDir, ".."))
|
||||
if err == nil {
|
||||
iter, err := r.References()
|
||||
if err == nil {
|
||||
for {
|
||||
r, err := iter.Next()
|
||||
if r == nil || err != nil {
|
||||
break
|
||||
}
|
||||
// logger.Debugf("Reference: name=%s sha=%s", r.Name().String(), r.Hash().String())
|
||||
if r.Hash().String() == ref {
|
||||
if r.Name().IsTag() {
|
||||
refTag = r.Name().String()
|
||||
}
|
||||
if r.Name().IsBranch() {
|
||||
refBranch = r.Name().String()
|
||||
}
|
||||
}
|
||||
}
|
||||
iter.Close()
|
||||
}
|
||||
repo, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
iter, err := repo.References()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// find the reference that matches the revision's has
|
||||
err = iter.ForEach(func(r *plumbing.Reference) error {
|
||||
/* tags and branches will have the same hash
|
||||
* when a user checks out a tag, it is not mentioned explicitly
|
||||
* in the go-git package, we must identify the revision
|
||||
* then check if any tag matches that revision,
|
||||
* if so then we checked out a tag
|
||||
* else we look for branches and if matches,
|
||||
* it means we checked out a branch
|
||||
*
|
||||
* If a branches matches first we must continue and check all tags (all references)
|
||||
* in case we match with a tag later in the interation
|
||||
*/
|
||||
if r.Hash().String() == ref {
|
||||
if r.Name().IsTag() {
|
||||
refTag = r.Name().String()
|
||||
}
|
||||
if r.Name().IsBranch() {
|
||||
refBranch = r.Name().String()
|
||||
}
|
||||
}
|
||||
|
||||
// we found what we where looking for
|
||||
if refTag != "" && refBranch != "" {
|
||||
return storer.ErrStop
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// order matters here see above comment.
|
||||
if refTag != "" {
|
||||
return refTag, nil
|
||||
}
|
||||
@@ -129,39 +157,7 @@ func FindGitRef(ctx context.Context, file string) (string, error) {
|
||||
return refBranch, nil
|
||||
}
|
||||
|
||||
// If the above doesn't work, fall back to the old way
|
||||
|
||||
// try tags first
|
||||
tag, err := findGitPrettyRef(ctx, ref, gitDir, "refs/tags")
|
||||
if err != nil || tag != "" {
|
||||
return tag, err
|
||||
}
|
||||
// and then branches
|
||||
return findGitPrettyRef(ctx, ref, gitDir, "refs/heads")
|
||||
}
|
||||
|
||||
func findGitPrettyRef(ctx context.Context, head, root, sub string) (string, error) {
|
||||
var name string
|
||||
var err = filepath.Walk(filepath.Join(root, sub), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name != "" || info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
var bts []byte
|
||||
if bts, err = os.ReadFile(path); err != nil {
|
||||
return err
|
||||
}
|
||||
var pointsTo = strings.TrimSpace(string(bts))
|
||||
if head == pointsTo {
|
||||
// On Windows paths are separated with backslash character so they should be replaced to provide proper git refs format
|
||||
name = strings.TrimPrefix(strings.ReplaceAll(strings.Replace(path, root, "", 1), `\`, `/`), "/")
|
||||
common.Logger(ctx).Debugf("HEAD matches %s", name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return name, err
|
||||
return "", fmt.Errorf("failed to identify reference (tag/branch) for the checked-out revision '%s'", ref)
|
||||
}
|
||||
|
||||
// FindGithubRepo get the repo
|
||||
@@ -178,27 +174,28 @@ func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string
|
||||
return slug, err
|
||||
}
|
||||
|
||||
func findGitRemoteURL(ctx context.Context, file, remoteName string) (string, error) {
|
||||
gitDir, err := findGitDirectory(file)
|
||||
func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) {
|
||||
repo, err := git.PlainOpenWithOptions(
|
||||
file,
|
||||
&git.PlainOpenOptions{
|
||||
DetectDotGit: true,
|
||||
EnableDotGitCommonDir: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
common.Logger(ctx).Debugf("Loading slug from git directory '%s'", gitDir)
|
||||
|
||||
gitconfig, err := ini.InsensitiveLoad(fmt.Sprintf("%s/config", gitDir))
|
||||
remote, err := repo.Remote(remoteName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
remote, err := gitconfig.GetSection(fmt.Sprintf(`remote "%s"`, remoteName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
if len(remote.Config().URLs) < 1 {
|
||||
return "", fmt.Errorf("remote '%s' exists but has no URL", remoteName)
|
||||
}
|
||||
urlKey, err := remote.GetKey("url")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
url := urlKey.String()
|
||||
return url, nil
|
||||
|
||||
return remote.Config().URLs[0], nil
|
||||
}
|
||||
|
||||
func findGitSlug(url string, githubInstance string) (string, string, error) {
|
||||
@@ -222,41 +219,13 @@ func findGitSlug(url string, githubInstance string) (string, string, error) {
|
||||
return "", url, nil
|
||||
}
|
||||
|
||||
func findGitDirectory(fromFile string) (string, error) {
|
||||
absPath, err := filepath.Abs(fromFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(absPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var dir string
|
||||
if fi.Mode().IsDir() {
|
||||
dir = absPath
|
||||
} else {
|
||||
dir = filepath.Dir(absPath)
|
||||
}
|
||||
|
||||
gitPath := filepath.Join(dir, ".git")
|
||||
fi, err = os.Stat(gitPath)
|
||||
if err == nil && fi.Mode().IsDir() {
|
||||
return gitPath, nil
|
||||
} else if dir == "/" || dir == "C:\\" || dir == "c:\\" {
|
||||
return "", &Error{err: ErrNoRepo}
|
||||
}
|
||||
|
||||
return findGitDirectory(filepath.Dir(dir))
|
||||
}
|
||||
|
||||
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
|
||||
type NewGitCloneExecutorInput struct {
|
||||
URL string
|
||||
Ref string
|
||||
Dir string
|
||||
Token string
|
||||
URL string
|
||||
Ref string
|
||||
Dir string
|
||||
Token string
|
||||
OfflineMode bool
|
||||
}
|
||||
|
||||
// CloneIfRequired ...
|
||||
@@ -292,7 +261,7 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = os.Chmod(input.Dir, 0755); err != nil {
|
||||
if err = os.Chmod(input.Dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -334,12 +303,16 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||
return err
|
||||
}
|
||||
|
||||
isOfflineMode := input.OfflineMode
|
||||
|
||||
// fetch latest changes
|
||||
fetchOptions, pullOptions := gitOptions(input.Token)
|
||||
|
||||
err = r.Fetch(&fetchOptions)
|
||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return err
|
||||
if !isOfflineMode {
|
||||
err = r.Fetch(&fetchOptions)
|
||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var hash *plumbing.Hash
|
||||
@@ -399,9 +372,10 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
logger.Debugf("Unable to pull %s: %v", refName, err)
|
||||
if !isOfflineMode {
|
||||
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
logger.Debugf("Unable to pull %s: %v", refName, err)
|
||||
}
|
||||
}
|
||||
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
|
||||
|
||||
|
@@ -82,12 +82,19 @@ func TestFindGitRemoteURL(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
|
||||
err = gitCmd("config", "-f", fmt.Sprintf("%s/.git/config", basedir), "--add", "remote.origin.url", remoteURL)
|
||||
err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL)
|
||||
assert.NoError(err)
|
||||
|
||||
u, err := findGitRemoteURL(context.Background(), basedir, "origin")
|
||||
assert.NoError(err)
|
||||
assert.Equal(remoteURL, u)
|
||||
|
||||
remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git"
|
||||
err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL)
|
||||
assert.NoError(err)
|
||||
u, err = findGitRemoteURL(context.Background(), basedir, "upstream")
|
||||
assert.NoError(err)
|
||||
assert.Equal(remoteURL, u)
|
||||
}
|
||||
|
||||
func TestGitFindRef(t *testing.T) {
|
||||
@@ -160,7 +167,7 @@ func TestGitFindRef(t *testing.T) {
|
||||
name := name
|
||||
t.Run(name, func(t *testing.T) {
|
||||
dir := filepath.Join(basedir, name)
|
||||
require.NoError(t, os.MkdirAll(dir, 0755))
|
||||
require.NoError(t, os.MkdirAll(dir, 0o755))
|
||||
require.NoError(t, gitCmd("-C", dir, "init", "--initial-branch=master"))
|
||||
require.NoError(t, cleanGitHooks(dir))
|
||||
tt.Prepare(t, dir)
|
||||
|
@@ -2,20 +2,74 @@ package common
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://stackoverflow.com/a/37382208
|
||||
// Get preferred outbound ip of this machine
|
||||
// GetOutboundIP returns an outbound IP address of this machine.
|
||||
// It tries to access the internet and returns the local IP address of the connection.
|
||||
// If the machine cannot access the internet, it returns a preferred IP address from network interfaces.
|
||||
// It returns nil if no IP address is found.
|
||||
func GetOutboundIP() net.IP {
|
||||
// See https://stackoverflow.com/a/37382208
|
||||
conn, err := net.Dial("udp", "8.8.8.8:80")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if err == nil {
|
||||
defer conn.Close()
|
||||
return conn.LocalAddr().(*net.UDPAddr).IP
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
localAddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
// So the machine cannot access the internet. Pick an IP address from network interfaces.
|
||||
if ifs, err := net.Interfaces(); err == nil {
|
||||
type IP struct {
|
||||
net.IP
|
||||
net.Interface
|
||||
}
|
||||
var ips []IP
|
||||
for _, i := range ifs {
|
||||
if addrs, err := i.Addrs(); err == nil {
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
if ip.IsGlobalUnicast() {
|
||||
ips = append(ips, IP{ip, i})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ips) > 1 {
|
||||
sort.Slice(ips, func(i, j int) bool {
|
||||
ifi := ips[i].Interface
|
||||
ifj := ips[j].Interface
|
||||
|
||||
return localAddr.IP
|
||||
// ethernet is preferred
|
||||
if vi, vj := strings.HasPrefix(ifi.Name, "e"), strings.HasPrefix(ifj.Name, "e"); vi != vj {
|
||||
return vi
|
||||
}
|
||||
|
||||
ipi := ips[i].IP
|
||||
ipj := ips[j].IP
|
||||
|
||||
// IPv4 is preferred
|
||||
if vi, vj := ipi.To4() != nil, ipj.To4() != nil; vi != vj {
|
||||
return vi
|
||||
}
|
||||
|
||||
// en0 is preferred to en1
|
||||
if ifi.Name != ifj.Name {
|
||||
return ifi.Name < ifj.Name
|
||||
}
|
||||
|
||||
// fallback
|
||||
return ipi.String() < ipj.String()
|
||||
})
|
||||
return ips[0].IP
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
81
pkg/container/container_types.go
Normal file
81
pkg/container/container_types.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewContainerInput the input for the New function
|
||||
type NewContainerInput struct {
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
NetworkAliases []string
|
||||
ExposedPorts nat.PortSet
|
||||
PortBindings nat.PortMap
|
||||
|
||||
// Gitea specific
|
||||
AutoRemove bool
|
||||
|
||||
ValidVolumes []string
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
type FileEntry struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}
|
||||
|
||||
// Container for managing docker run containers
|
||||
type Container interface {
|
||||
Create(capAdd []string, capDrop []string) common.Executor
|
||||
ConnectToNetwork(name string) common.Executor
|
||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
|
||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
||||
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
||||
Pull(forcePull bool) common.Executor
|
||||
Start(attach bool) common.Executor
|
||||
Exec(command []string, env map[string]string, user, workdir string) common.Executor
|
||||
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
|
||||
UpdateFromImageEnv(env *map[string]string) common.Executor
|
||||
Remove() common.Executor
|
||||
Close() common.Executor
|
||||
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||
type NewDockerBuildExecutorInput struct {
|
||||
ContextDir string
|
||||
Dockerfile string
|
||||
BuildContext io.Reader
|
||||
ImageTag string
|
||||
Platform string
|
||||
}
|
||||
|
||||
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
|
||||
type NewDockerPullExecutorInput struct {
|
||||
Image string
|
||||
ForcePull bool
|
||||
Platform string
|
||||
Username string
|
||||
Password string
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -6,16 +8,16 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func LoadDockerAuthConfig(ctx context.Context, image string) (types.AuthConfig, error) {
|
||||
func LoadDockerAuthConfig(ctx context.Context, image string) (registry.AuthConfig, error) {
|
||||
logger := common.Logger(ctx)
|
||||
config, err := config.Load(config.Dir())
|
||||
if err != nil {
|
||||
logger.Warnf("Could not load docker config: %v", err)
|
||||
return types.AuthConfig{}, err
|
||||
return registry.AuthConfig{}, err
|
||||
}
|
||||
|
||||
if !config.ContainsAuth() {
|
||||
@@ -31,8 +33,29 @@ func LoadDockerAuthConfig(ctx context.Context, image string) (types.AuthConfig,
|
||||
authConfig, err := config.GetAuthConfig(hostName)
|
||||
if err != nil {
|
||||
logger.Warnf("Could not get auth config from docker config: %v", err)
|
||||
return types.AuthConfig{}, err
|
||||
return registry.AuthConfig{}, err
|
||||
}
|
||||
|
||||
return types.AuthConfig(authConfig), nil
|
||||
return registry.AuthConfig(authConfig), nil
|
||||
}
|
||||
|
||||
func LoadDockerAuthConfigs(ctx context.Context) map[string]registry.AuthConfig {
|
||||
logger := common.Logger(ctx)
|
||||
config, err := config.Load(config.Dir())
|
||||
if err != nil {
|
||||
logger.Warnf("Could not load docker config: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !config.ContainsAuth() {
|
||||
config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore)
|
||||
}
|
||||
|
||||
creds, _ := config.GetAllCredentials()
|
||||
authConfigs := make(map[string]registry.AuthConfig, len(creds))
|
||||
for k, v := range creds {
|
||||
authConfigs[k] = registry.AuthConfig(v)
|
||||
}
|
||||
|
||||
return authConfigs
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -8,22 +10,14 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// github.com/docker/docker/builder/dockerignore is deprecated
|
||||
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
||||
"github.com/moby/patternmatcher"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
|
||||
type NewDockerBuildExecutorInput struct {
|
||||
ContextDir string
|
||||
Container Container
|
||||
ImageTag string
|
||||
Platform string
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutor function to create a run executor for the container
|
||||
func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -47,15 +41,17 @@ func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
|
||||
tags := []string{input.ImageTag}
|
||||
options := types.ImageBuildOptions{
|
||||
Tags: tags,
|
||||
Remove: true,
|
||||
Platform: input.Platform,
|
||||
Tags: tags,
|
||||
Remove: true,
|
||||
Platform: input.Platform,
|
||||
AuthConfigs: LoadDockerAuthConfigs(ctx),
|
||||
Dockerfile: input.Dockerfile,
|
||||
}
|
||||
var buildContext io.ReadCloser
|
||||
if input.Container != nil {
|
||||
buildContext, err = input.Container.GetContainerArchive(ctx, input.ContextDir+"/.")
|
||||
if input.BuildContext != nil {
|
||||
buildContext = io.NopCloser(input.BuildContext)
|
||||
} else {
|
||||
buildContext, err = createBuildContext(ctx, input.ContextDir, "Dockerfile")
|
||||
buildContext, err = createBuildContext(ctx, input.ContextDir, input.Dockerfile)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,8 +97,8 @@ func createBuildContext(ctx context.Context, contextDir string, relDockerfile st
|
||||
// parses the Dockerfile. Ignore errors here, as they will have been
|
||||
// caught by validateContextDirectory above.
|
||||
var includes = []string{"."}
|
||||
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
|
||||
keepThem1, _ := patternmatcher.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := patternmatcher.Matches(relDockerfile, excludes)
|
||||
if keepThem1 || keepThem2 {
|
||||
includes = append(includes, ".dockerignore", relDockerfile)
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
|
||||
// appended with license information.
|
||||
//
|
||||
|
@@ -663,8 +663,8 @@ func TestRunFlagsParseShmSize(t *testing.T) {
|
||||
|
||||
func TestParseRestartPolicy(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"always:2:3": "invalid restart policy format",
|
||||
"on-failure:invalid": "maximum retry count must be an integer",
|
||||
"always:2:3": "invalid restart policy format: maximum retry count must be an integer",
|
||||
"on-failure:invalid": "invalid restart policy format: maximum retry count must be an integer",
|
||||
}
|
||||
valids := map[string]container.RestartPolicy{
|
||||
"": {},
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -5,7 +7,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||
@@ -17,33 +19,15 @@ func ImageExistsLocally(ctx context.Context, imageName string, platform string)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", imageName)
|
||||
|
||||
imageListOptions := types.ImageListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
images, err := cli.ImageList(ctx, imageListOptions)
|
||||
if err != nil {
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(images) > 0 {
|
||||
if platform == "any" || platform == "" {
|
||||
return true, nil
|
||||
}
|
||||
for _, v := range images {
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, v.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%s/%s", inspectImage.Os, inspectImage.Architecture) == platform {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
if platform == "" || platform == "any" || fmt.Sprintf("%s/%s", inspectImage.Os, inspectImage.Architecture) == platform {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
@@ -52,38 +36,25 @@ func ImageExistsLocally(ctx context.Context, imageName string, platform string)
|
||||
// RemoveImage removes image from local store, the function is used to run different
|
||||
// container image architectures
|
||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
||||
if exists, err := ImageExistsLocally(ctx, imageName, "any"); !exists {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", imageName)
|
||||
|
||||
imageListOptions := types.ImageListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
images, err := cli.ImageList(ctx, imageListOptions)
|
||||
if err != nil {
|
||||
inspectImage, _, err := cli.ImageInspectWithRaw(ctx, imageName)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(images) > 0 {
|
||||
for _, v := range images {
|
||||
if _, err = cli.ImageRemove(ctx, v.ID, types.ImageRemoveOptions{
|
||||
Force: force,
|
||||
PruneChildren: pruneChildren,
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
if _, err = cli.ImageRemove(ctx, inspectImage.ID, types.ImageRemoveOptions{
|
||||
Force: force,
|
||||
PruneChildren: pruneChildren,
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return true, nil
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
|
79
pkg/container/docker_network.go
Normal file
79
pkg/container/docker_network.go
Normal file
@@ -0,0 +1,79 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Only create the network if it doesn't exist
|
||||
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.Logger(ctx).Debugf("%v", networks)
|
||||
for _, network := range networks {
|
||||
if network.Name == name {
|
||||
common.Logger(ctx).Debugf("Network %v exists", name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Make shure that all network of the specified name are removed
|
||||
// cli.NetworkRemove refuses to remove a network if there are duplicates
|
||||
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.Logger(ctx).Debugf("%v", networks)
|
||||
for _, network := range networks {
|
||||
if network.Name == name {
|
||||
result, err := cli.NetworkInspect(ctx, network.ID, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(result.Containers) == 0 {
|
||||
if err = cli.NetworkRemove(ctx, network.ID); err != nil {
|
||||
common.Logger(ctx).Debugf("%v", err)
|
||||
}
|
||||
} else {
|
||||
common.Logger(ctx).Debugf("Refusing to remove network %v because it still has active endpoints", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
@@ -5,22 +7,15 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
|
||||
type NewDockerPullExecutorInput struct {
|
||||
Image string
|
||||
ForcePull bool
|
||||
Platform string
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// NewDockerPullExecutor function to create a run executor for the container
|
||||
func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -66,6 +61,13 @@ func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
|
||||
_ = logDockerResponse(logger, reader, err != nil)
|
||||
if err != nil {
|
||||
if imagePullOptions.RegistryAuth != "" && strings.Contains(err.Error(), "unauthorized") {
|
||||
logger.Errorf("pulling image '%v' (%s) failed with credentials %s retrying without them, please check for stale docker config files", imageRef, input.Platform, err.Error())
|
||||
imagePullOptions.RegistryAuth = ""
|
||||
reader, err = cli.ImagePull(ctx, imageRef, imagePullOptions)
|
||||
|
||||
_ = logDockerResponse(logger, reader, err != nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -76,12 +78,12 @@ func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput)
|
||||
imagePullOptions := types.ImagePullOptions{
|
||||
Platform: input.Platform,
|
||||
}
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
if input.Username != "" && input.Password != "" {
|
||||
logger := common.Logger(ctx)
|
||||
logger.Debugf("using authentication for docker pull")
|
||||
|
||||
authConfig := types.AuthConfig{
|
||||
authConfig := registry.AuthConfig{
|
||||
Username: input.Username,
|
||||
Password: input.Password,
|
||||
}
|
||||
@@ -100,6 +102,7 @@ func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput)
|
||||
if authConfig.Username == "" && authConfig.Password == "" {
|
||||
return imagePullOptions, nil
|
||||
}
|
||||
logger.Info("using DockerAuthConfig authentication for docker pull")
|
||||
|
||||
encodedJSON, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
@@ -15,76 +16,31 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/joho/godotenv"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/kballard/go-shellquote"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/kballard/go-shellquote"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/filecollector"
|
||||
)
|
||||
|
||||
// NewContainerInput the input for the New function
|
||||
type NewContainerInput struct {
|
||||
Image string
|
||||
Username string
|
||||
Password string
|
||||
Entrypoint []string
|
||||
Cmd []string
|
||||
WorkingDir string
|
||||
Env []string
|
||||
Binds []string
|
||||
Mounts map[string]string
|
||||
Name string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
NetworkMode string
|
||||
Privileged bool
|
||||
UsernsMode string
|
||||
Platform string
|
||||
Options string
|
||||
|
||||
AutoRemove bool
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
type FileEntry struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Body string
|
||||
}
|
||||
|
||||
// Container for managing docker run containers
|
||||
type Container interface {
|
||||
Create(capAdd []string, capDrop []string) common.Executor
|
||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
||||
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
|
||||
Pull(forcePull bool) common.Executor
|
||||
Start(attach bool) common.Executor
|
||||
Exec(command []string, env map[string]string, user, workdir string) common.Executor
|
||||
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
|
||||
UpdateFromImageEnv(env *map[string]string) common.Executor
|
||||
UpdateFromPath(env *map[string]string) common.Executor
|
||||
Remove() common.Executor
|
||||
Close() common.Executor
|
||||
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
|
||||
}
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
cr := new(containerReference)
|
||||
@@ -92,6 +48,25 @@ func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
return cr
|
||||
}
|
||||
|
||||
func (cr *containerReference) ConnectToNetwork(name string) common.Executor {
|
||||
return common.
|
||||
NewDebugExecutor("%sdocker network connect %s %s", logPrefix, name, cr.input.Name).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
cr.connectToNetwork(name, cr.input.NetworkAliases),
|
||||
).IfNot(common.Dryrun),
|
||||
)
|
||||
}
|
||||
|
||||
func (cr *containerReference) connectToNetwork(name string, aliases []string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return cr.cli.NetworkConnect(ctx, name, cr.input.Name, &networktypes.EndpointSettings{
|
||||
Aliases: aliases,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// supportsContainerImagePlatform returns true if the underlying Docker server
|
||||
// API version is 1.41 and beyond
|
||||
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
|
||||
@@ -112,7 +87,7 @@ func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) b
|
||||
|
||||
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
|
||||
return common.
|
||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
||||
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
@@ -124,7 +99,7 @@ func (cr *containerReference) Create(capAdd []string, capDrop []string) common.E
|
||||
|
||||
func (cr *containerReference) Start(attach bool) common.Executor {
|
||||
return common.
|
||||
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
|
||||
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
@@ -190,17 +165,13 @@ func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath s
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
return cr.extractEnv(srcPath, env).IfNot(common.Dryrun)
|
||||
return parseEnvFile(cr, srcPath, env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromImageEnv(env *map[string]string) common.Executor {
|
||||
return cr.extractFromImageEnv(env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
return cr.extractPath(env).IfNot(common.Dryrun)
|
||||
}
|
||||
|
||||
func (cr *containerReference) Exec(command []string, env map[string]string, user, workdir string) common.Executor {
|
||||
return common.NewPipelineExecutor(
|
||||
common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir),
|
||||
@@ -239,9 +210,6 @@ type containerReference struct {
|
||||
}
|
||||
|
||||
func GetDockerClient(ctx context.Context) (cli client.APIClient, err error) {
|
||||
// TODO: this should maybe need to be a global option, not hidden in here?
|
||||
// though i'm not sure how that works out when there's another Executor :D
|
||||
// I really would like something that works on OSX native for eg
|
||||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
|
||||
if strings.HasPrefix(dockerHost, "ssh://") {
|
||||
@@ -293,8 +261,10 @@ func RunnerArch(ctx context.Context) string {
|
||||
|
||||
archMapper := map[string]string{
|
||||
"x86_64": "X64",
|
||||
"386": "x86",
|
||||
"aarch64": "arm64",
|
||||
"amd64": "X64",
|
||||
"386": "X86",
|
||||
"aarch64": "ARM64",
|
||||
"arm64": "ARM64",
|
||||
}
|
||||
if arch, ok := archMapper[info.Architecture]; ok {
|
||||
return arch
|
||||
@@ -398,14 +368,30 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
|
||||
containerConfig, err := parse(flags, copts, "")
|
||||
// If a service container's network is set to `host`, the container will not be able to
|
||||
// connect to the specified network created for the job container and the service containers.
|
||||
// So comment out the following code.
|
||||
|
||||
// if len(copts.netMode.Value()) == 0 {
|
||||
// if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
||||
// return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// If the `privileged` config has been disabled, `copts.privileged` need to be forced to false,
|
||||
// even if the user specifies `--privileged` in the options string.
|
||||
if !hostConfig.Privileged {
|
||||
copts.privileged = false
|
||||
}
|
||||
|
||||
containerConfig, err := parse(flags, copts, runtime.GOOS)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
|
||||
logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config)
|
||||
|
||||
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride)
|
||||
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot merge container.Config options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
@@ -413,10 +399,21 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
|
||||
logger.Debugf("Custom container.HostConfig from options ==> %+v", containerConfig.HostConfig)
|
||||
|
||||
hostConfig.Binds = append(hostConfig.Binds, containerConfig.HostConfig.Binds...)
|
||||
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
|
||||
binds := hostConfig.Binds
|
||||
mounts := hostConfig.Mounts
|
||||
networkMode := hostConfig.NetworkMode
|
||||
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
hostConfig.Binds = binds
|
||||
hostConfig.Mounts = mounts
|
||||
if len(copts.netMode.Value()) > 0 {
|
||||
logger.Warn("--network and --net in the options will be ignored.")
|
||||
}
|
||||
hostConfig.NetworkMode = networkMode
|
||||
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
return config, hostConfig, nil
|
||||
@@ -432,10 +429,11 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
input := cr.input
|
||||
|
||||
config := &container.Config{
|
||||
Image: input.Image,
|
||||
WorkingDir: input.WorkingDir,
|
||||
Env: input.Env,
|
||||
Tty: isTerminal,
|
||||
Image: input.Image,
|
||||
WorkingDir: input.WorkingDir,
|
||||
Env: input.Env,
|
||||
ExposedPorts: input.ExposedPorts,
|
||||
Tty: isTerminal,
|
||||
}
|
||||
logger.Debugf("Common container.Config ==> %+v", config)
|
||||
|
||||
@@ -471,14 +469,15 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
CapAdd: capAdd,
|
||||
CapDrop: capDrop,
|
||||
Binds: input.Binds,
|
||||
Mounts: mounts,
|
||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
AutoRemove: input.AutoRemove,
|
||||
CapAdd: capAdd,
|
||||
CapDrop: capDrop,
|
||||
Binds: input.Binds,
|
||||
Mounts: mounts,
|
||||
NetworkMode: container.NetworkMode(input.NetworkMode),
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
PortBindings: input.PortBindings,
|
||||
AutoRemove: input.AutoRemove,
|
||||
}
|
||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
@@ -487,7 +486,27 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := cr.cli.ContainerCreate(ctx, config, hostConfig, nil, platSpecs, input.Name)
|
||||
// For Gitea
|
||||
config, hostConfig = cr.sanitizeConfig(ctx, config, hostConfig)
|
||||
|
||||
// For Gitea
|
||||
// network-scoped alias is supported only for containers in user defined networks
|
||||
var networkingConfig *network.NetworkingConfig
|
||||
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
||||
n := hostConfig.NetworkMode
|
||||
// IsUserDefined and IsHost are broken on windows
|
||||
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
|
||||
endpointConfig := &network.EndpointSettings{
|
||||
Aliases: input.NetworkAliases,
|
||||
}
|
||||
networkingConfig = &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
input.NetworkMode: endpointConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := cr.cli.ContainerCreate(ctx, config, hostConfig, networkingConfig, platSpecs, input.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container: '%w'", err)
|
||||
}
|
||||
@@ -500,59 +519,6 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
}
|
||||
}
|
||||
|
||||
var singleLineEnvPattern, multiLineEnvPattern *regexp.Regexp
|
||||
|
||||
func (cr *containerReference) extractEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
if singleLineEnvPattern == nil {
|
||||
// Single line pattern matches:
|
||||
// SOME_VAR=data=moredata
|
||||
// SOME_VAR=datamoredata
|
||||
singleLineEnvPattern = regexp.MustCompile(`^([^=]*)\=(.*)$`)
|
||||
multiLineEnvPattern = regexp.MustCompile(`^([^<]+)<<([\w-]+)$`)
|
||||
}
|
||||
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("failed to read tar archive: %w", err)
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
multiLineEnvKey := ""
|
||||
multiLineEnvDelimiter := ""
|
||||
multiLineEnvContent := ""
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if singleLineEnv := singleLineEnvPattern.FindStringSubmatch(line); singleLineEnv != nil {
|
||||
localEnv[singleLineEnv[1]] = singleLineEnv[2]
|
||||
}
|
||||
if line == multiLineEnvDelimiter {
|
||||
localEnv[multiLineEnvKey] = multiLineEnvContent
|
||||
multiLineEnvKey, multiLineEnvDelimiter, multiLineEnvContent = "", "", ""
|
||||
}
|
||||
if multiLineEnvKey != "" && multiLineEnvDelimiter != "" {
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += line
|
||||
}
|
||||
if multiLineEnvStart := multiLineEnvPattern.FindStringSubmatch(line); multiLineEnvStart != nil {
|
||||
multiLineEnvKey = multiLineEnvStart[1]
|
||||
multiLineEnvDelimiter = multiLineEnvStart[2]
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) extractFromImageEnv(env *map[string]string) common.Executor {
|
||||
envMap := *env
|
||||
return func(ctx context.Context) error {
|
||||
@@ -561,11 +527,17 @@ func (cr *containerReference) extractFromImageEnv(env *map[string]string) common
|
||||
inspect, _, err := cr.cli.ImageInspectWithRaw(ctx, cr.input.Image)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return fmt.Errorf("inspect image: %w", err)
|
||||
}
|
||||
|
||||
if inspect.Config == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
imageEnv, err := godotenv.Unmarshal(strings.Join(inspect.Config.Env, "\n"))
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return fmt.Errorf("unmarshal image env: %w", err)
|
||||
}
|
||||
|
||||
for k, v := range imageEnv {
|
||||
@@ -585,31 +557,6 @@ func (cr *containerReference) extractFromImageEnv(env *map[string]string) common
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) extractPath(env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
pathTar, _, err := cr.cli.CopyFromContainer(ctx, cr.id, localEnv["GITHUB_PATH"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy from container: %w", err)
|
||||
}
|
||||
defer pathTar.Close()
|
||||
|
||||
reader := tar.NewReader(pathTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("failed to read tar archive: %w", err)
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
localEnv["PATH"] = fmt.Sprintf("%s:%s", line, localEnv["PATH"])
|
||||
}
|
||||
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) exec(cmd []string, env map[string]string, user, workdir string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -706,7 +653,7 @@ func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Exe
|
||||
}
|
||||
exp := regexp.MustCompile(`\d+\n`)
|
||||
found := exp.FindString(sid)
|
||||
id, err := strconv.ParseInt(found[:len(found)-1], 10, 32)
|
||||
id, err := strconv.ParseInt(strings.TrimSpace(found), 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -724,7 +671,7 @@ func (cr *containerReference) tryReadGID() common.Executor {
|
||||
return cr.tryReadID("-g", func(id int) { cr.GID = id })
|
||||
}
|
||||
|
||||
func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse, idResp types.IDResponse, user string, workdir string) error {
|
||||
func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse, _ types.IDResponse, _ string, _ string) error {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
cmdResponse := make(chan error)
|
||||
@@ -769,6 +716,32 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||
// Mkdir
|
||||
buf := &bytes.Buffer{}
|
||||
tw := tar.NewWriter(buf)
|
||||
_ = tw.WriteHeader(&tar.Header{
|
||||
Name: destPath,
|
||||
Mode: 777,
|
||||
Typeflag: tar.TypeDir,
|
||||
})
|
||||
tw.Close()
|
||||
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
||||
}
|
||||
// Copy Content
|
||||
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||
}
|
||||
// If this fails, then folders have wrong permissions on non root container
|
||||
if cr.UID != 0 || cr.GID != 0 {
|
||||
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgnore bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -806,12 +779,12 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
||||
ignorer = gitignore.NewMatcher(ps)
|
||||
}
|
||||
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: &tarCollector{
|
||||
Handler: &filecollector.TarCollector{
|
||||
TarWriter: tw,
|
||||
UID: cr.UID,
|
||||
GID: cr.GID,
|
||||
@@ -819,7 +792,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
|
||||
},
|
||||
}
|
||||
|
||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -946,3 +919,63 @@ func (cr *containerReference) wait() common.Executor {
|
||||
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// sanitizeConfig remove the invalid configurations from `config` and `hostConfig`
|
||||
func (cr *containerReference) sanitizeConfig(ctx context.Context, config *container.Config, hostConfig *container.HostConfig) (*container.Config, *container.HostConfig) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
if len(cr.input.ValidVolumes) > 0 {
|
||||
globs := make([]glob.Glob, 0, len(cr.input.ValidVolumes))
|
||||
for _, v := range cr.input.ValidVolumes {
|
||||
if g, err := glob.Compile(v); err != nil {
|
||||
logger.Errorf("create glob from %s error: %v", v, err)
|
||||
} else {
|
||||
globs = append(globs, g)
|
||||
}
|
||||
}
|
||||
isValid := func(v string) bool {
|
||||
for _, g := range globs {
|
||||
if g.Match(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// sanitize binds
|
||||
sanitizedBinds := make([]string, 0, len(hostConfig.Binds))
|
||||
for _, bind := range hostConfig.Binds {
|
||||
parsed, err := loader.ParseVolume(bind)
|
||||
if err != nil {
|
||||
logger.Warnf("parse volume [%s] error: %v", bind, err)
|
||||
continue
|
||||
}
|
||||
if parsed.Source == "" {
|
||||
// anonymous volume
|
||||
sanitizedBinds = append(sanitizedBinds, bind)
|
||||
continue
|
||||
}
|
||||
if isValid(parsed.Source) {
|
||||
sanitizedBinds = append(sanitizedBinds, bind)
|
||||
} else {
|
||||
logger.Warnf("[%s] is not a valid volume, will be ignored", parsed.Source)
|
||||
}
|
||||
}
|
||||
hostConfig.Binds = sanitizedBinds
|
||||
// sanitize mounts
|
||||
sanitizedMounts := make([]mount.Mount, 0, len(hostConfig.Mounts))
|
||||
for _, mt := range hostConfig.Mounts {
|
||||
if isValid(mt.Source) {
|
||||
sanitizedMounts = append(sanitizedMounts, mt)
|
||||
} else {
|
||||
logger.Warnf("[%s] is not a valid volume, will be ignored", mt.Source)
|
||||
}
|
||||
}
|
||||
hostConfig.Mounts = sanitizedMounts
|
||||
} else {
|
||||
hostConfig.Binds = []string{}
|
||||
hostConfig.Mounts = []mount.Mount{}
|
||||
}
|
||||
|
||||
return config, hostConfig
|
||||
}
|
||||
|
@@ -9,8 +9,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
@@ -19,6 +23,7 @@ func TestDocker(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client, err := GetDockerClient(ctx)
|
||||
assert.NoError(t, err)
|
||||
defer client.Close()
|
||||
|
||||
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
|
||||
ContextDir: "testdata",
|
||||
@@ -78,7 +83,7 @@ type endlessReader struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (r endlessReader) Read(p []byte) (n int, err error) {
|
||||
func (r endlessReader) Read(_ []byte) (n int, err error) {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
@@ -166,3 +171,76 @@ func TestDockerExecFailure(t *testing.T) {
|
||||
|
||||
// Type assert containerReference implements ExecutionsEnvironment
|
||||
var _ ExecutionsEnvironment = &containerReference{}
|
||||
|
||||
func TestCheckVolumes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
validVolumes []string
|
||||
binds []string
|
||||
expectedBinds []string
|
||||
}{
|
||||
{
|
||||
desc: "match all volumes",
|
||||
validVolumes: []string{"**"},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no volumes can be matched",
|
||||
validVolumes: []string{},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{},
|
||||
},
|
||||
{
|
||||
desc: "only allowed volumes can be matched",
|
||||
validVolumes: []string{
|
||||
"shared_volume",
|
||||
"/home/test/data",
|
||||
"/etc/conf.d/*.json",
|
||||
},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
logger, _ := test.NewNullLogger()
|
||||
ctx := common.WithLogger(context.Background(), logger)
|
||||
cr := &containerReference{
|
||||
input: &NewContainerInput{
|
||||
ValidVolumes: tc.validVolumes,
|
||||
},
|
||||
}
|
||||
_, hostConf := cr.sanitizeConfig(ctx, &container.Config{}, &container.HostConfig{Binds: tc.binds})
|
||||
assert.Equal(t, tc.expectedBinds, hostConf.Binds)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
69
pkg/container/docker_stub.go
Normal file
69
pkg/container/docker_stub.go
Normal file
@@ -0,0 +1,69 @@
|
||||
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageExistsLocally returns a boolean indicating if an image with the
|
||||
// requested name, tag and architecture exists in the local docker image store
|
||||
func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {
|
||||
return false, errors.New("Unsupported Operation")
|
||||
}
|
||||
|
||||
// RemoveImage removes image from local store, the function is used to run different
|
||||
// container image architectures
|
||||
func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) {
|
||||
return false, errors.New("Unsupported Operation")
|
||||
}
|
||||
|
||||
// NewDockerBuildExecutor function to create a run executor for the container
|
||||
func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return errors.New("Unsupported Operation")
|
||||
}
|
||||
}
|
||||
|
||||
// NewDockerPullExecutor function to create a run executor for the container
|
||||
func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return errors.New("Unsupported Operation")
|
||||
}
|
||||
}
|
||||
|
||||
// NewContainer creates a reference to a container
|
||||
func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunnerArch(ctx context.Context) string {
|
||||
return runtime.GOOS
|
||||
}
|
||||
|
||||
func GetHostInfo(ctx context.Context) (info types.Info, err error) {
|
||||
return types.Info{}, nil
|
||||
}
|
||||
|
||||
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkCreateExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewDockerNetworkRemoveExecutor(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -1,13 +1,16 @@
|
||||
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
||||
func NewDockerVolumeRemoveExecutor(volumeName string, force bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
cli, err := GetDockerClient(ctx)
|
||||
if err != nil {
|
||||
@@ -15,14 +18,14 @@ func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
list, err := cli.VolumeList(ctx, filters.NewArgs())
|
||||
list, err := cli.VolumeList(ctx, volume.ListOptions{Filters: filters.NewArgs()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range list.Volumes {
|
||||
if vol.Name == volume {
|
||||
return removeExecutor(volume, force)(ctx)
|
||||
if vol.Name == volumeName {
|
||||
return removeExecutor(volumeName, force)(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -10,4 +10,6 @@ type ExecutionsEnvironment interface {
|
||||
DefaultPathVariable() string
|
||||
JoinPathVariable(...string) string
|
||||
GetRunnerContext(ctx context.Context) map[string]interface{}
|
||||
// On windows PATH and Path are the same key
|
||||
IsEnvironmentCaseInsensitive() bool
|
||||
}
|
||||
|
@@ -2,9 +2,9 @@ package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
@@ -15,14 +15,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/lookpath"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/filecollector"
|
||||
"github.com/nektos/act/pkg/lookpath"
|
||||
)
|
||||
|
||||
type HostEnvironment struct {
|
||||
@@ -35,7 +35,13 @@ type HostEnvironment struct {
|
||||
StdOut io.Writer
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Create(capAdd []string, capDrop []string) common.Executor {
|
||||
func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ConnectToNetwork(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
@@ -50,7 +56,7 @@ func (e *HostEnvironment) Close() common.Executor {
|
||||
func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
for _, f := range files {
|
||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0777); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(destPath, f.Name), []byte(f.Body), fs.FileMode(f.Mode)); err != nil {
|
||||
@@ -61,6 +67,33 @@ func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Exec
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||
if err := os.RemoveAll(destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
tr := tar.NewReader(tarStream)
|
||||
cp := &filecollector.CopyCollector{
|
||||
DstDir: destPath,
|
||||
}
|
||||
for {
|
||||
ti, err := tr.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if ti.FileInfo().IsDir() {
|
||||
continue
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("CopyTarStream has been cancelled")
|
||||
}
|
||||
if err := cp.WriteFile(ti.Name, ti.FileInfo(), ti.Linkname, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -78,16 +111,16 @@ func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore
|
||||
|
||||
ignorer = gitignore.NewMatcher(ps)
|
||||
}
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: ©Collector{
|
||||
Handler: &filecollector.CopyCollector{
|
||||
DstDir: destPath,
|
||||
},
|
||||
}
|
||||
return filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,21 +133,21 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc := &tarCollector{
|
||||
tc := &filecollector.TarCollector{
|
||||
TarWriter: tw,
|
||||
}
|
||||
if fi.IsDir() {
|
||||
srcPrefix := filepath.Dir(srcPath)
|
||||
srcPrefix := srcPath
|
||||
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
|
||||
srcPrefix += string(filepath.Separator)
|
||||
}
|
||||
fc := &fileCollector{
|
||||
Fs: &defaultFs{},
|
||||
fc := &filecollector.FileCollector{
|
||||
Fs: &filecollector.DefaultFs{},
|
||||
SrcPath: srcPath,
|
||||
SrcPrefix: srcPrefix,
|
||||
Handler: tc,
|
||||
}
|
||||
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
|
||||
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -141,13 +174,13 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
|
||||
return io.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Pull(forcePull bool) common.Executor {
|
||||
func (e *HostEnvironment) Pull(_ bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Start(attach bool) common.Executor {
|
||||
func (e *HostEnvironment) Start(_ bool) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
@@ -241,7 +274,7 @@ func copyPtyOutput(writer io.Writer, ppty io.Reader, finishLog context.CancelFun
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) UpdateFromImageEnv(env *map[string]string) common.Executor {
|
||||
func (e *HostEnvironment) UpdateFromImageEnv(_ *map[string]string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
@@ -255,7 +288,7 @@ func getEnvListFromMap(env map[string]string) []string {
|
||||
return envList
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, user, workdir string) error {
|
||||
func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, _, workdir string) error {
|
||||
envList := getEnvListFromMap(env)
|
||||
var wd string
|
||||
if workdir != "" {
|
||||
@@ -327,8 +360,12 @@ func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline st
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Exec(command []string /*cmdline string, */, env map[string]string, user, workdir string) common.Executor {
|
||||
return e.ExecWithCmdLine(command, "", env, user, workdir)
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ExecWithCmdLine(command []string, cmdline string, env map[string]string, user, workdir string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
if err := e.exec(ctx, command, "" /*cmdline*/, env, user, workdir); err != nil {
|
||||
if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("this step has been cancelled: %w", err)
|
||||
@@ -341,77 +378,7 @@ func (e *HostEnvironment) Exec(command []string /*cmdline string, */, env map[st
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, err := e.GetContainerArchive(ctx, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
singleLineEnv := strings.Index(line, "=")
|
||||
multiLineEnv := strings.Index(line, "<<")
|
||||
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
||||
localEnv[line[:singleLineEnv]] = line[singleLineEnv+1:]
|
||||
} else if multiLineEnv != -1 {
|
||||
multiLineEnvContent := ""
|
||||
multiLineEnvDelimiter := line[multiLineEnv+2:]
|
||||
delimiterFound := false
|
||||
for s.Scan() {
|
||||
content := s.Text()
|
||||
if content == multiLineEnvDelimiter {
|
||||
delimiterFound = true
|
||||
break
|
||||
}
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += content
|
||||
}
|
||||
if !delimiterFound {
|
||||
return fmt.Errorf("invalid format delimiter '%v' not found before end of file", multiLineEnvDelimiter)
|
||||
}
|
||||
localEnv[line[:multiLineEnv]] = multiLineEnvContent
|
||||
} else {
|
||||
return fmt.Errorf("invalid format '%v', expected a line with '=' or '<<'", line)
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
pathTar, err := e.GetContainerArchive(ctx, localEnv["GITHUB_PATH"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pathTar.Close()
|
||||
|
||||
reader := tar.NewReader(pathTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
pathSep := string(filepath.ListSeparator)
|
||||
localEnv[e.GetPathVariableName()] = fmt.Sprintf("%s%s%s", line, pathSep, localEnv[e.GetPathVariableName()])
|
||||
}
|
||||
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
return parseEnvFile(e, srcPath, env)
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Remove() common.Executor {
|
||||
@@ -433,7 +400,11 @@ func (e *HostEnvironment) ToContainerPath(path string) string {
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetActPath() string {
|
||||
return e.ActPath
|
||||
actPath := e.ActPath
|
||||
if runtime.GOOS == "windows" {
|
||||
actPath = strings.ReplaceAll(actPath, "\\", "/")
|
||||
}
|
||||
return actPath
|
||||
}
|
||||
|
||||
func (*HostEnvironment) GetPathVariableName() string {
|
||||
@@ -454,17 +425,45 @@ func (*HostEnvironment) JoinPathVariable(paths ...string) string {
|
||||
return strings.Join(paths, string(filepath.ListSeparator))
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetRunnerContext(ctx context.Context) map[string]interface{} {
|
||||
// Reference for Arch values for runner.arch
|
||||
// https://docs.github.com/en/actions/learn-github-actions/contexts#runner-context
|
||||
func goArchToActionArch(arch string) string {
|
||||
archMapper := map[string]string{
|
||||
"x86_64": "X64",
|
||||
"386": "X86",
|
||||
"aarch64": "ARM64",
|
||||
}
|
||||
if arch, ok := archMapper[arch]; ok {
|
||||
return arch
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
func goOsToActionOs(os string) string {
|
||||
osMapper := map[string]string{
|
||||
"darwin": "macOS",
|
||||
}
|
||||
if os, ok := osMapper[os]; ok {
|
||||
return os
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) GetRunnerContext(_ context.Context) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
"os": goOsToActionOs(runtime.GOOS),
|
||||
"arch": goArchToActionArch(runtime.GOARCH),
|
||||
"temp": e.TmpDir,
|
||||
"tool_cache": e.ToolCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (io.Writer, io.Writer) {
|
||||
func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, _ io.Writer) (io.Writer, io.Writer) {
|
||||
org := e.StdOut
|
||||
e.StdOut = stdout
|
||||
return org, org
|
||||
}
|
||||
|
||||
func (*HostEnvironment) IsEnvironmentCaseInsensitive() bool {
|
||||
return runtime.GOOS == "windows"
|
||||
}
|
||||
|
@@ -1,4 +1,71 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Type assert HostEnvironment implements ExecutionsEnvironment
|
||||
var _ ExecutionsEnvironment = &HostEnvironment{}
|
||||
|
||||
func TestCopyDir(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
ctx := context.Background()
|
||||
e := &HostEnvironment{
|
||||
Path: filepath.Join(dir, "path"),
|
||||
TmpDir: filepath.Join(dir, "tmp"),
|
||||
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||
ActPath: filepath.Join(dir, "act_path"),
|
||||
StdOut: os.Stdout,
|
||||
Workdir: path.Join("testdata", "scratch"),
|
||||
}
|
||||
_ = os.MkdirAll(e.Path, 0700)
|
||||
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||
_ = os.MkdirAll(e.ActPath, 0700)
|
||||
err = e.CopyDir(e.Workdir, e.Path, true)(ctx)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetContainerArchive(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "test-host-env-*")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
ctx := context.Background()
|
||||
e := &HostEnvironment{
|
||||
Path: filepath.Join(dir, "path"),
|
||||
TmpDir: filepath.Join(dir, "tmp"),
|
||||
ToolCache: filepath.Join(dir, "tool_cache"),
|
||||
ActPath: filepath.Join(dir, "act_path"),
|
||||
StdOut: os.Stdout,
|
||||
Workdir: path.Join("testdata", "scratch"),
|
||||
}
|
||||
_ = os.MkdirAll(e.Path, 0700)
|
||||
_ = os.MkdirAll(e.TmpDir, 0700)
|
||||
_ = os.MkdirAll(e.ToolCache, 0700)
|
||||
_ = os.MkdirAll(e.ActPath, 0700)
|
||||
expectedContent := []byte("sdde/7sh")
|
||||
err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600)
|
||||
assert.NoError(t, err)
|
||||
archive, err := e.GetContainerArchive(ctx, e.Path)
|
||||
assert.NoError(t, err)
|
||||
defer archive.Close()
|
||||
reader := tar.NewReader(archive)
|
||||
h, err := reader.Next()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "action.yml", h.Name)
|
||||
content, err := io.ReadAll(reader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedContent, content)
|
||||
_, err = reader.Next()
|
||||
assert.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
|
@@ -71,3 +71,7 @@ func (*LinuxContainerEnvironmentExtensions) GetRunnerContext(ctx context.Context
|
||||
"tool_cache": "/opt/hostedtoolcache",
|
||||
}
|
||||
}
|
||||
|
||||
func (*LinuxContainerEnvironmentExtensions) IsEnvironmentCaseInsensitive() bool {
|
||||
return false
|
||||
}
|
||||
|
60
pkg/container/parse_env_file.go
Normal file
60
pkg/container/parse_env_file.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
)
|
||||
|
||||
func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Executor {
|
||||
localEnv := *env
|
||||
return func(ctx context.Context) error {
|
||||
envTar, err := e.GetContainerArchive(ctx, srcPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer envTar.Close()
|
||||
reader := tar.NewReader(envTar)
|
||||
_, err = reader.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
singleLineEnv := strings.Index(line, "=")
|
||||
multiLineEnv := strings.Index(line, "<<")
|
||||
if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) {
|
||||
localEnv[line[:singleLineEnv]] = line[singleLineEnv+1:]
|
||||
} else if multiLineEnv != -1 {
|
||||
multiLineEnvContent := ""
|
||||
multiLineEnvDelimiter := line[multiLineEnv+2:]
|
||||
delimiterFound := false
|
||||
for s.Scan() {
|
||||
content := s.Text()
|
||||
if content == multiLineEnvDelimiter {
|
||||
delimiterFound = true
|
||||
break
|
||||
}
|
||||
if multiLineEnvContent != "" {
|
||||
multiLineEnvContent += "\n"
|
||||
}
|
||||
multiLineEnvContent += content
|
||||
}
|
||||
if !delimiterFound {
|
||||
return fmt.Errorf("invalid format delimiter '%v' not found before end of file", multiLineEnvDelimiter)
|
||||
}
|
||||
localEnv[line[:multiLineEnv]] = multiLineEnvContent
|
||||
} else {
|
||||
return fmt.Errorf("invalid format '%v', expected a line with '=' or '<<'", line)
|
||||
}
|
||||
}
|
||||
env = &localEnv
|
||||
return nil
|
||||
}
|
||||
}
|
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
1
pkg/container/testdata/scratch/test.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
testfile
|
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/creack/pty"
|
||||
)
|
||||
|
||||
func getSysProcAttr(cmdLine string, tty bool) *syscall.SysProcAttr {
|
||||
func getSysProcAttr(_ string, tty bool) *syscall.SysProcAttr {
|
||||
if tty {
|
||||
return &syscall.SysProcAttr{
|
||||
Setsid: true,
|
||||
|
@@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/rhysd/actionlint"
|
||||
)
|
||||
@@ -202,6 +203,9 @@ func (impl *interperterImpl) hashFiles(paths ...reflect.Value) (string, error) {
|
||||
|
||||
var files []string
|
||||
if err := filepath.Walk(impl.config.WorkingDir, func(path string, fi fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sansPrefix := strings.TrimPrefix(path, impl.config.WorkingDir+string(filepath.Separator))
|
||||
parts := strings.Split(sansPrefix, string(filepath.Separator))
|
||||
if fi.IsDir() || !matcher.Match(parts, fi.IsDir()) {
|
||||
|
@@ -230,6 +230,7 @@ func TestFunctionFormat(t *testing.T) {
|
||||
{"format('{0', '{1}', 'World')", nil, "Unclosed brackets. The following format string is invalid: '{0'", "format-invalid-format-string"},
|
||||
{"format('{2}', '{1}', 'World')", "", "The following format string references more arguments than were supplied: '{2}'", "format-invalid-replacement-reference"},
|
||||
{"format('{2147483648}')", "", "The following format string is invalid: '{2147483648}'", "format-invalid-replacement-reference"},
|
||||
{"format('{0} {1} {2} {3}', 1.0, 1.1, 1234567890.0, 12345678901234567890.0)", "1 1.1 1234567890 1.23456789012346E+19", nil, "format-floats"},
|
||||
}
|
||||
|
||||
env := &EvaluationEnvironment{
|
||||
|
@@ -12,16 +12,24 @@ import (
|
||||
)
|
||||
|
||||
type EvaluationEnvironment struct {
|
||||
Github *model.GithubContext
|
||||
Env map[string]string
|
||||
Job *model.JobContext
|
||||
Steps map[string]*model.StepResult
|
||||
Runner map[string]interface{}
|
||||
Secrets map[string]string
|
||||
Strategy map[string]interface{}
|
||||
Matrix map[string]interface{}
|
||||
Needs map[string]map[string]map[string]string
|
||||
Inputs map[string]interface{}
|
||||
Github *model.GithubContext
|
||||
Env map[string]string
|
||||
Job *model.JobContext
|
||||
Jobs *map[string]*model.WorkflowCallResult
|
||||
Steps map[string]*model.StepResult
|
||||
Runner map[string]interface{}
|
||||
Secrets map[string]string
|
||||
Vars map[string]string
|
||||
Strategy map[string]interface{}
|
||||
Matrix map[string]interface{}
|
||||
Needs map[string]Needs
|
||||
Inputs map[string]interface{}
|
||||
HashFiles func([]reflect.Value) (interface{}, error)
|
||||
}
|
||||
|
||||
type Needs struct {
|
||||
Outputs map[string]string `json:"outputs"`
|
||||
Result string `json:"result"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@@ -142,20 +150,30 @@ func (impl *interperterImpl) evaluateNode(exprNode actionlint.ExprNode) (interfa
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableNode) (interface{}, error) {
|
||||
switch strings.ToLower(variableNode.Name) {
|
||||
case "github":
|
||||
return impl.env.Github, nil
|
||||
case "gitea": // compatible with Gitea
|
||||
return impl.env.Github, nil
|
||||
case "env":
|
||||
return impl.env.Env, nil
|
||||
case "job":
|
||||
return impl.env.Job, nil
|
||||
case "jobs":
|
||||
if impl.env.Jobs == nil {
|
||||
return nil, fmt.Errorf("Unavailable context: jobs")
|
||||
}
|
||||
return impl.env.Jobs, nil
|
||||
case "steps":
|
||||
return impl.env.Steps, nil
|
||||
case "runner":
|
||||
return impl.env.Runner, nil
|
||||
case "secrets":
|
||||
return impl.env.Secrets, nil
|
||||
case "vars":
|
||||
return impl.env.Vars, nil
|
||||
case "strategy":
|
||||
return impl.env.Strategy, nil
|
||||
case "matrix":
|
||||
@@ -361,8 +379,16 @@ func (impl *interperterImpl) compareValues(leftValue reflect.Value, rightValue r
|
||||
|
||||
return impl.compareNumber(leftValue.Float(), rightValue.Float(), kind)
|
||||
|
||||
case reflect.Invalid:
|
||||
if rightValue.Kind() == reflect.Invalid {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// not possible situation - params are converted to the same type in code above
|
||||
return nil, fmt.Errorf("Compare params of Invalid type: left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("TODO: evaluateCompare not implemented! left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
return nil, fmt.Errorf("Compare not implemented for types: left: %+v, right: %+v", leftValue.Kind(), rightValue.Kind())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +449,7 @@ func (impl *interperterImpl) coerceToString(value reflect.Value) reflect.Value {
|
||||
} else if math.IsInf(value.Float(), -1) {
|
||||
return reflect.ValueOf("-Infinity")
|
||||
}
|
||||
return reflect.ValueOf(fmt.Sprint(value))
|
||||
return reflect.ValueOf(fmt.Sprintf("%.15G", value.Float()))
|
||||
|
||||
case reflect.Slice:
|
||||
return reflect.ValueOf("Array")
|
||||
@@ -531,6 +557,10 @@ func (impl *interperterImpl) evaluateLogicalCompare(compareNode *actionlint.Logi
|
||||
|
||||
leftValue := reflect.ValueOf(left)
|
||||
|
||||
if IsTruthy(left) == (compareNode.Kind == actionlint.LogicalOpNodeKindOr) {
|
||||
return impl.getSafeValue(leftValue), nil
|
||||
}
|
||||
|
||||
right, err := impl.evaluateNode(compareNode.Right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -540,17 +570,8 @@ func (impl *interperterImpl) evaluateLogicalCompare(compareNode *actionlint.Logi
|
||||
|
||||
switch compareNode.Kind {
|
||||
case actionlint.LogicalOpNodeKindAnd:
|
||||
if IsTruthy(left) {
|
||||
return impl.getSafeValue(rightValue), nil
|
||||
}
|
||||
|
||||
return impl.getSafeValue(leftValue), nil
|
||||
|
||||
return impl.getSafeValue(rightValue), nil
|
||||
case actionlint.LogicalOpNodeKindOr:
|
||||
if IsTruthy(left) {
|
||||
return impl.getSafeValue(leftValue), nil
|
||||
}
|
||||
|
||||
return impl.getSafeValue(rightValue), nil
|
||||
}
|
||||
|
||||
@@ -589,6 +610,9 @@ func (impl *interperterImpl) evaluateFuncCall(funcCallNode *actionlint.FuncCallN
|
||||
case "fromjson":
|
||||
return impl.fromJSON(args[0])
|
||||
case "hashfiles":
|
||||
if impl.env.HashFiles != nil {
|
||||
return impl.env.HashFiles(args)
|
||||
}
|
||||
return impl.hashFiles(args...)
|
||||
case "always":
|
||||
return impl.always()
|
||||
|
@@ -69,6 +69,11 @@ func TestOperators(t *testing.T) {
|
||||
{`true || false`, true, "or", ""},
|
||||
{`fromJSON('{}') && true`, true, "and-boolean-object", ""},
|
||||
{`fromJSON('{}') || false`, make(map[string]interface{}), "or-boolean-object", ""},
|
||||
{"github.event.commits[0].author.username != github.event.commits[1].author.username", true, "property-comparison1", ""},
|
||||
{"github.event.commits[0].author.username1 != github.event.commits[1].author.username", true, "property-comparison2", ""},
|
||||
{"github.event.commits[0].author.username != github.event.commits[1].author.username1", true, "property-comparison3", ""},
|
||||
{"github.event.commits[0].author.username1 != github.event.commits[1].author.username2", true, "property-comparison4", ""},
|
||||
{"secrets != env", nil, "property-comparison5", "Compare not implemented for types: left: map, right: map"},
|
||||
}
|
||||
|
||||
env := &EvaluationEnvironment{
|
||||
@@ -552,9 +557,11 @@ func TestContexts(t *testing.T) {
|
||||
// {"contains(steps.*.outputs.name, 'value')", true, "steps-context-array-outputs"},
|
||||
{"runner.os", "Linux", "runner-context"},
|
||||
{"secrets.name", "value", "secrets-context"},
|
||||
{"vars.name", "value", "vars-context"},
|
||||
{"strategy.fail-fast", true, "strategy-context"},
|
||||
{"matrix.os", "Linux", "matrix-context"},
|
||||
{"needs.job-id.outputs.output-name", "value", "needs-context"},
|
||||
{"needs.job-id.result", "success", "needs-context"},
|
||||
{"inputs.name", "value", "inputs-context"},
|
||||
}
|
||||
|
||||
@@ -587,17 +594,21 @@ func TestContexts(t *testing.T) {
|
||||
Secrets: map[string]string{
|
||||
"name": "value",
|
||||
},
|
||||
Vars: map[string]string{
|
||||
"name": "value",
|
||||
},
|
||||
Strategy: map[string]interface{}{
|
||||
"fail-fast": true,
|
||||
},
|
||||
Matrix: map[string]interface{}{
|
||||
"os": "Linux",
|
||||
},
|
||||
Needs: map[string]map[string]map[string]string{
|
||||
Needs: map[string]Needs{
|
||||
"job-id": {
|
||||
"outputs": {
|
||||
Outputs: map[string]string{
|
||||
"output-name": "value",
|
||||
},
|
||||
Result: "success",
|
||||
},
|
||||
},
|
||||
Inputs: map[string]interface{}{
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package filecollector
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
@@ -17,18 +17,18 @@ import (
|
||||
"github.com/go-git/go-git/v5/plumbing/format/index"
|
||||
)
|
||||
|
||||
type fileCollectorHandler interface {
|
||||
type Handler interface {
|
||||
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
|
||||
}
|
||||
|
||||
type tarCollector struct {
|
||||
type TarCollector struct {
|
||||
TarWriter *tar.Writer
|
||||
UID int
|
||||
GID int
|
||||
DstDir string
|
||||
}
|
||||
|
||||
func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
// create a new dir/file header
|
||||
header, err := tar.FileInfoHeader(fi, linkName)
|
||||
if err != nil {
|
||||
@@ -59,13 +59,13 @@ func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string,
|
||||
return nil
|
||||
}
|
||||
|
||||
type copyCollector struct {
|
||||
type CopyCollector struct {
|
||||
DstDir string
|
||||
}
|
||||
|
||||
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
|
||||
fdestpath := filepath.Join(cc.DstDir, fpath)
|
||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0777); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
if f == nil {
|
||||
@@ -82,29 +82,29 @@ func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileCollector struct {
|
||||
type FileCollector struct {
|
||||
Ignorer gitignore.Matcher
|
||||
SrcPath string
|
||||
SrcPrefix string
|
||||
Fs fileCollectorFs
|
||||
Handler fileCollectorHandler
|
||||
Fs Fs
|
||||
Handler Handler
|
||||
}
|
||||
|
||||
type fileCollectorFs interface {
|
||||
type Fs interface {
|
||||
Walk(root string, fn filepath.WalkFunc) error
|
||||
OpenGitIndex(path string) (*index.Index, error)
|
||||
Open(path string) (io.ReadCloser, error)
|
||||
Readlink(path string) (string, error)
|
||||
}
|
||||
|
||||
type defaultFs struct {
|
||||
type DefaultFs struct {
|
||||
}
|
||||
|
||||
func (*defaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
||||
func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, fn)
|
||||
}
|
||||
|
||||
func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||
func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||
r, err := git.PlainOpen(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -116,16 +116,16 @@ func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (*defaultFs) Open(path string) (io.ReadCloser, error) {
|
||||
func (*DefaultFs) Open(path string) (io.ReadCloser, error) {
|
||||
return os.Open(path)
|
||||
}
|
||||
|
||||
func (*defaultFs) Readlink(path string) (string, error) {
|
||||
func (*DefaultFs) Readlink(path string) (string, error) {
|
||||
return os.Readlink(path)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
|
||||
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
|
||||
return func(file string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
@@ -166,7 +166,7 @@ func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []strin
|
||||
}
|
||||
}
|
||||
if err == nil && entry.Mode == filemode.Submodule {
|
||||
err = fc.Fs.Walk(file, fc.collectFiles(ctx, split))
|
||||
err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package filecollector
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
@@ -76,7 +76,7 @@ func (mfs *memoryFs) Readlink(path string) (string, error) {
|
||||
|
||||
func TestIgnoredTrackedfile(t *testing.T) {
|
||||
fs := memfs.New()
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0777)
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
|
||||
dotgit, _ := fs.Chroot("mygitrepo/.git")
|
||||
worktree, _ := fs.Chroot("mygitrepo")
|
||||
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
|
||||
@@ -95,16 +95,16 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
||||
tw := tar.NewWriter(tmpTar)
|
||||
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||
ignorer := gitignore.NewMatcher(ps)
|
||||
fc := &fileCollector{
|
||||
fc := &FileCollector{
|
||||
Fs: &memoryFs{Filesystem: fs},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: "mygitrepo",
|
||||
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||
Handler: &tarCollector{
|
||||
Handler: &TarCollector{
|
||||
TarWriter: tw,
|
||||
},
|
||||
}
|
||||
err := fc.Fs.Walk("mygitrepo", fc.collectFiles(context.Background(), []string{}))
|
||||
err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||
assert.NoError(t, err, "successfully collect files")
|
||||
tw.Close()
|
||||
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||
@@ -115,3 +115,58 @@ func TestIgnoredTrackedfile(t *testing.T) {
|
||||
_, err = tr.Next()
|
||||
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
|
||||
}
|
||||
|
||||
func TestSymlinks(t *testing.T) {
|
||||
fs := memfs.New()
|
||||
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
|
||||
dotgit, _ := fs.Chroot("mygitrepo/.git")
|
||||
worktree, _ := fs.Chroot("mygitrepo")
|
||||
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
|
||||
// This file shouldn't be in the tar
|
||||
f, err := worktree.Create(".env")
|
||||
assert.NoError(t, err)
|
||||
_, err = f.Write([]byte("test=val1\n"))
|
||||
assert.NoError(t, err)
|
||||
f.Close()
|
||||
err = worktree.Symlink(".env", "test.env")
|
||||
assert.NoError(t, err)
|
||||
|
||||
w, err := repo.Worktree()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// .gitignore is in the tar after adding it to the index
|
||||
_, err = w.Add(".env")
|
||||
assert.NoError(t, err)
|
||||
_, err = w.Add("test.env")
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpTar, _ := fs.Create("temp.tar")
|
||||
tw := tar.NewWriter(tmpTar)
|
||||
ps, _ := gitignore.ReadPatterns(worktree, []string{})
|
||||
ignorer := gitignore.NewMatcher(ps)
|
||||
fc := &FileCollector{
|
||||
Fs: &memoryFs{Filesystem: fs},
|
||||
Ignorer: ignorer,
|
||||
SrcPath: "mygitrepo",
|
||||
SrcPrefix: "mygitrepo" + string(filepath.Separator),
|
||||
Handler: &TarCollector{
|
||||
TarWriter: tw,
|
||||
},
|
||||
}
|
||||
err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
|
||||
assert.NoError(t, err, "successfully collect files")
|
||||
tw.Close()
|
||||
_, _ = tmpTar.Seek(0, io.SeekStart)
|
||||
tr := tar.NewReader(tmpTar)
|
||||
h, err := tr.Next()
|
||||
files := map[string]tar.Header{}
|
||||
for err == nil {
|
||||
files[h.Name] = *h
|
||||
h, err = tr.Next()
|
||||
}
|
||||
|
||||
assert.Equal(t, ".env", files[".env"].Name)
|
||||
assert.Equal(t, "test.env", files["test.env"].Name)
|
||||
assert.Equal(t, ".env", files["test.env"].Linkname)
|
||||
assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF")
|
||||
}
|
@@ -41,11 +41,12 @@ func NewInterpeter(
|
||||
jobs := run.Workflow.Jobs
|
||||
jobNeeds := run.Job().Needs()
|
||||
|
||||
using := map[string]map[string]map[string]string{}
|
||||
using := map[string]exprparser.Needs{}
|
||||
for _, need := range jobNeeds {
|
||||
if v, ok := jobs[need]; ok {
|
||||
using[need] = map[string]map[string]string{
|
||||
"outputs": v.Outputs,
|
||||
using[need] = exprparser.Needs{
|
||||
Outputs: v.Outputs,
|
||||
Result: v.Result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -36,8 +36,17 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
}
|
||||
|
||||
var ret []*SingleWorkflow
|
||||
for id, job := range workflow.Jobs {
|
||||
for _, matrix := range getMatrixes(origin.GetJob(id)) {
|
||||
ids, jobs, err := workflow.jobs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid jobs: %w", err)
|
||||
}
|
||||
for i, id := range ids {
|
||||
job := jobs[i]
|
||||
matricxes, err := getMatrixes(origin.GetJob(id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getMatrixes: %w", err)
|
||||
}
|
||||
for _, matrix := range matricxes {
|
||||
job := job.Clone()
|
||||
if job.Name == "" {
|
||||
job.Name = id
|
||||
@@ -50,17 +59,18 @@ func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
runsOn[i] = evaluator.Interpolate(v)
|
||||
}
|
||||
job.RawRunsOn = encodeRunsOn(runsOn)
|
||||
job.EraseNeeds() // there will be only one job in SingleWorkflow, it cannot have needs
|
||||
ret = append(ret, &SingleWorkflow{
|
||||
swf := &SingleWorkflow{
|
||||
Name: workflow.Name,
|
||||
RawOn: workflow.RawOn,
|
||||
Env: workflow.Env,
|
||||
Jobs: map[string]*Job{id: job},
|
||||
Defaults: workflow.Defaults,
|
||||
})
|
||||
}
|
||||
if err := swf.SetJob(id, job); err != nil {
|
||||
return nil, fmt.Errorf("SetJob: %w", err)
|
||||
}
|
||||
ret = append(ret, swf)
|
||||
}
|
||||
}
|
||||
sortWorkflows(ret)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
@@ -83,12 +93,15 @@ type parseContext struct {
|
||||
|
||||
type ParseOption func(c *parseContext)
|
||||
|
||||
func getMatrixes(job *model.Job) []map[string]interface{} {
|
||||
ret := job.GetMatrixes()
|
||||
func getMatrixes(job *model.Job) ([]map[string]interface{}, error) {
|
||||
ret, err := job.GetMatrixes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetMatrixes: %w", err)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return matrixName(ret[i]) < matrixName(ret[j])
|
||||
})
|
||||
return ret
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func encodeMatrix(matrix map[string]interface{}) yaml.Node {
|
||||
@@ -135,19 +148,3 @@ func matrixName(m map[string]interface{}) string {
|
||||
|
||||
return fmt.Sprintf("(%s)", strings.Join(vs, ", "))
|
||||
}
|
||||
|
||||
func sortWorkflows(wfs []*SingleWorkflow) {
|
||||
sort.Slice(wfs, func(i, j int) bool {
|
||||
ki := ""
|
||||
for k := range wfs[i].Jobs {
|
||||
ki = k
|
||||
break
|
||||
}
|
||||
kj := ""
|
||||
for k := range wfs[j].Jobs {
|
||||
kj = k
|
||||
break
|
||||
}
|
||||
return ki < kj
|
||||
})
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -13,9 +11,6 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var f embed.FS
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -37,13 +32,26 @@ func TestParse(t *testing.T) {
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_with",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_secrets",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty_step",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
content, err := f.ReadFile(filepath.Join("testdata", tt.name+".in.yaml"))
|
||||
require.NoError(t, err)
|
||||
want, err := f.ReadFile(filepath.Join("testdata", tt.name+".out.yaml"))
|
||||
require.NoError(t, err)
|
||||
content := ReadTestdata(t, tt.name+".in.yaml")
|
||||
want := ReadTestdata(t, tt.name+".out.yaml")
|
||||
got, err := Parse(content, tt.options...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
@@ -57,7 +65,10 @@ func TestParse(t *testing.T) {
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
_ = encoder.Encode(v)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
id, job := v.Job()
|
||||
assert.NotEmpty(t, id)
|
||||
assert.NotNil(t, job)
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
|
@@ -12,17 +12,56 @@ type SingleWorkflow struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawOn yaml.Node `yaml:"on,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
Jobs map[string]*Job `yaml:"jobs,omitempty"`
|
||||
RawJobs yaml.Node `yaml:"jobs,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Job() (string, *Job) {
|
||||
for k, v := range w.Jobs {
|
||||
return k, v
|
||||
ids, jobs, _ := w.jobs()
|
||||
if len(ids) >= 1 {
|
||||
return ids[0], jobs[0]
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) jobs() ([]string, []*Job, error) {
|
||||
ids, jobs, err := parseMappingNode[*Job](&w.RawJobs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
steps := make([]*Step, 0, len(job.Steps))
|
||||
for _, s := range job.Steps {
|
||||
if s != nil {
|
||||
steps = append(steps, s)
|
||||
}
|
||||
}
|
||||
job.Steps = steps
|
||||
}
|
||||
|
||||
return ids, jobs, nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) SetJob(id string, job *Job) error {
|
||||
m := map[string]*Job{
|
||||
id: job,
|
||||
}
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node := yaml.Node{}
|
||||
if err := yaml.Unmarshal(out, &node); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(node.Content) != 1 || node.Content[0].Kind != yaml.MappingNode {
|
||||
return fmt.Errorf("can not set job: %q", out)
|
||||
}
|
||||
w.RawJobs = *node.Content[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Marshal() ([]byte, error) {
|
||||
return yaml.Marshal(w)
|
||||
}
|
||||
@@ -41,6 +80,8 @@ type Job struct {
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
Outputs map[string]string `yaml:"outputs,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
With map[string]interface{} `yaml:"with,omitempty"`
|
||||
RawSecrets yaml.Node `yaml:"secrets,omitempty"`
|
||||
}
|
||||
|
||||
func (j *Job) Clone() *Job {
|
||||
@@ -61,6 +102,8 @@ func (j *Job) Clone() *Job {
|
||||
Defaults: j.Defaults,
|
||||
Outputs: j.Outputs,
|
||||
Uses: j.Uses,
|
||||
With: j.With,
|
||||
RawSecrets: j.RawSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,8 +111,9 @@ func (j *Job) Needs() []string {
|
||||
return (&model.Job{RawNeeds: j.RawNeeds}).Needs()
|
||||
}
|
||||
|
||||
func (j *Job) EraseNeeds() {
|
||||
func (j *Job) EraseNeeds() *Job {
|
||||
j.RawNeeds = yaml.Node{}
|
||||
return j
|
||||
}
|
||||
|
||||
func (j *Job) RunsOn() []string {
|
||||
@@ -92,6 +136,9 @@ type Step struct {
|
||||
|
||||
// String gets the name of step
|
||||
func (s *Step) String() string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return (&model.Step{
|
||||
ID: s.ID,
|
||||
Name: s.Name,
|
||||
@@ -107,6 +154,7 @@ type ContainerSpec struct {
|
||||
Volumes []string `yaml:"volumes,omitempty"`
|
||||
Options string `yaml:"options,omitempty"`
|
||||
Credentials map[string]string `yaml:"credentials,omitempty"`
|
||||
Cmd []string `yaml:"cmd,omitempty"`
|
||||
}
|
||||
|
||||
type Strategy struct {
|
||||
@@ -125,8 +173,21 @@ type RunDefaults struct {
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Name string
|
||||
Acts map[string][]string
|
||||
Name string
|
||||
acts map[string][]string
|
||||
schedules []map[string]string
|
||||
}
|
||||
|
||||
func (evt *Event) IsSchedule() bool {
|
||||
return evt.schedules != nil
|
||||
}
|
||||
|
||||
func (evt *Event) Acts() map[string][]string {
|
||||
return evt.acts
|
||||
}
|
||||
|
||||
func (evt *Event) Schedules() []map[string]string {
|
||||
return evt.schedules
|
||||
}
|
||||
|
||||
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
@@ -157,23 +218,30 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
}
|
||||
return res, nil
|
||||
case yaml.MappingNode:
|
||||
var val map[string]interface{}
|
||||
err := rawOn.Decode(&val)
|
||||
events, triggers, err := parseMappingNode[interface{}](rawOn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for k, v := range val {
|
||||
res := make([]*Event, 0, len(events))
|
||||
for i, k := range events {
|
||||
v := triggers[i]
|
||||
if v == nil {
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
continue
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case []string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: map[string][]string{},
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case map[string]interface{}:
|
||||
acts := make(map[string][]string, len(t))
|
||||
@@ -186,7 +254,10 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
case []interface{}:
|
||||
acts[act] = make([]string, len(b))
|
||||
for i, v := range b {
|
||||
acts[act][i] = v.(string)
|
||||
var ok bool
|
||||
if acts[act][i], ok = v.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
@@ -194,7 +265,29 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
Acts: acts,
|
||||
acts: acts,
|
||||
})
|
||||
case []interface{}:
|
||||
if k != "schedule" {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules := make([]map[string]string, len(t))
|
||||
for i, tt := range t {
|
||||
vv, ok := tt.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules[i] = make(map[string]string, len(vv))
|
||||
for k, vvv := range vv {
|
||||
var ok bool
|
||||
if schedules[i][k], ok = vvv.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
schedules: schedules,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
@@ -205,3 +298,36 @@ func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// parseMappingNode parse a mapping node and preserve order.
|
||||
func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return nil, nil, fmt.Errorf("input node is not a mapping node")
|
||||
}
|
||||
|
||||
var scalars []string
|
||||
var datas []T
|
||||
expectKey := true
|
||||
for _, item := range node.Content {
|
||||
if expectKey {
|
||||
if item.Kind != yaml.ScalarNode {
|
||||
return nil, nil, fmt.Errorf("not a valid scalar node: %v", item.Value)
|
||||
}
|
||||
scalars = append(scalars, item.Value)
|
||||
expectKey = false
|
||||
} else {
|
||||
var val T
|
||||
if err := item.Decode(&val); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
datas = append(datas, val)
|
||||
expectKey = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(scalars) != len(datas) {
|
||||
return nil, nil, fmt.Errorf("invalid definition of on: %v", node.Value)
|
||||
}
|
||||
|
||||
return scalars, datas, nil
|
||||
}
|
||||
|
@@ -6,7 +6,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseRawOn(t *testing.T) {
|
||||
@@ -47,7 +50,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"master",
|
||||
},
|
||||
@@ -60,7 +63,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "branch_protection_rule",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
@@ -74,7 +77,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "project",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
@@ -83,7 +86,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "milestone",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
"deleted",
|
||||
@@ -97,7 +100,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
@@ -113,7 +116,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
},
|
||||
@@ -121,7 +124,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
@@ -137,7 +140,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
"releases/**",
|
||||
@@ -151,7 +154,7 @@ func TestParseRawOn(t *testing.T) {
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
Acts: map[string][]string{
|
||||
acts: map[string][]string{
|
||||
"tags": {
|
||||
"v1.**",
|
||||
},
|
||||
@@ -170,6 +173,19 @@ func TestParseRawOn(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "schedule",
|
||||
schedules: []map[string]string{
|
||||
{
|
||||
"cron": "20 6 * * *",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
@@ -182,3 +198,109 @@ func TestParseRawOn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleWorkflow_SetJob(t *testing.T) {
|
||||
t.Run("erase needs", func(t *testing.T) {
|
||||
content := ReadTestdata(t, "erase_needs.in.yaml")
|
||||
want := ReadTestdata(t, "erase_needs.out.yaml")
|
||||
swf, err := Parse(content)
|
||||
require.NoError(t, err)
|
||||
builder := &strings.Builder{}
|
||||
for _, v := range swf {
|
||||
id, job := v.Job()
|
||||
require.NoError(t, v.SetJob(id, job.EraseNeeds()))
|
||||
|
||||
if builder.Len() > 0 {
|
||||
builder.WriteString("---\n")
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMappingNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
scalars []string
|
||||
datas []interface{}
|
||||
}{
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
scalars: []string{"push"},
|
||||
datas: []interface {
|
||||
}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"master"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
|
||||
scalars: []string{"branch_protection_rule"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
scalars: []string{"project", "milestone"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
|
||||
scalars: []string{"pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"releases/**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
|
||||
scalars: []string{"push", "pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"main"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
scalars: []string{"schedule"},
|
||||
datas: []interface{}{
|
||||
[]interface{}{map[string]interface{}{
|
||||
"cron": "20 6 * * *",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
workflow, err := model.ReadWorkflow(strings.NewReader(test.input))
|
||||
assert.NoError(t, err)
|
||||
|
||||
scalars, datas, err := parseMappingNode[interface{}](&workflow.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, test.scalars, scalars, fmt.Sprintf("%#v", scalars))
|
||||
assert.EqualValues(t, test.datas, datas, fmt.Sprintf("%#v", datas))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
Normal file
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
||||
-
|
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
Normal file
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
Normal file
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: job1
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: [job1, job2]
|
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
Normal file
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
2
pkg/jobparser/testdata/has_needs.out.yaml
vendored
2
pkg/jobparser/testdata/has_needs.out.yaml
vendored
@@ -10,6 +10,7 @@ name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
needs: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
@@ -18,6 +19,7 @@ name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
needs: [job1, job2]
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
|
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
Normal file
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
Normal file
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
Normal file
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
Normal file
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
10
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
10
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
@@ -1,5 +1,9 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
@@ -11,4 +15,8 @@ jobs:
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
- run: uname -a && go version
|
||||
aaa:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
|
16
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
16
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
@@ -1,4 +1,12 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
name: zzz
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
@@ -21,3 +29,11 @@ jobs:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
aaa:
|
||||
name: aaa
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
|
18
pkg/jobparser/testdata_test.go
Normal file
18
pkg/jobparser/testdata_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var testdata embed.FS
|
||||
|
||||
func ReadTestdata(t *testing.T, name string) []byte {
|
||||
content, err := testdata.ReadFile(filepath.Join("testdata", name))
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
@@ -20,7 +20,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
// Force input to lowercase for case insensitive comparison
|
||||
format := ActionRunsUsing(strings.ToLower(using))
|
||||
switch format {
|
||||
case ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite:
|
||||
case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo:
|
||||
*a = format
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{
|
||||
@@ -28,6 +28,8 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
ActionRunsUsingDocker,
|
||||
ActionRunsUsingNode12,
|
||||
ActionRunsUsingNode16,
|
||||
ActionRunsUsingNode20,
|
||||
ActionRunsUsingGo,
|
||||
}, format))
|
||||
}
|
||||
return nil
|
||||
@@ -36,8 +38,10 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
const (
|
||||
// ActionRunsUsingNode12 for running with node12
|
||||
ActionRunsUsingNode12 = "node12"
|
||||
// ActionRunsUsingNode12 for running with node16
|
||||
// ActionRunsUsingNode16 for running with node16
|
||||
ActionRunsUsingNode16 = "node16"
|
||||
// ActionRunsUsingNode20 for running with node20
|
||||
ActionRunsUsingNode20 = "node20"
|
||||
// ActionRunsUsingDocker for running with docker
|
||||
ActionRunsUsingDocker = "docker"
|
||||
// ActionRunsUsingComposite for running composite
|
||||
|
@@ -3,6 +3,7 @@ package model
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/common/git"
|
||||
@@ -35,6 +36,9 @@ type GithubContext struct {
|
||||
RetentionDays string `json:"retention_days"`
|
||||
RunnerPerflog string `json:"runner_perflog"`
|
||||
RunnerTrackingID string `json:"runner_tracking_id"`
|
||||
ServerURL string `json:"server_url"`
|
||||
APIURL string `json:"api_url"`
|
||||
GraphQLURL string `json:"graphql_url"`
|
||||
}
|
||||
|
||||
func asString(v interface{}) string {
|
||||
@@ -89,26 +93,22 @@ func withDefaultBranch(ctx context.Context, b string, event map[string]interface
|
||||
var findGitRef = git.FindGitRef
|
||||
var findGitRevision = git.FindGitRevision
|
||||
|
||||
func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string, repoPath string) {
|
||||
func (ghc *GithubContext) SetRef(ctx context.Context, defaultBranch string, repoPath string) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows
|
||||
// https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
|
||||
switch ghc.EventName {
|
||||
case "pull_request_target":
|
||||
ghc.Ref = fmt.Sprintf("refs/heads/%s", ghc.BaseRef)
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha"))
|
||||
case "pull_request", "pull_request_review", "pull_request_review_comment":
|
||||
ghc.Ref = fmt.Sprintf("refs/pull/%.0f/merge", ghc.Event["number"])
|
||||
case "deployment", "deployment_status":
|
||||
ghc.Ref = asString(nestedMapLookup(ghc.Event, "deployment", "ref"))
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha"))
|
||||
case "release":
|
||||
ghc.Ref = asString(nestedMapLookup(ghc.Event, "release", "tag_name"))
|
||||
ghc.Ref = fmt.Sprintf("refs/tags/%s", asString(nestedMapLookup(ghc.Event, "release", "tag_name")))
|
||||
case "push", "create", "workflow_dispatch":
|
||||
ghc.Ref = asString(ghc.Event["ref"])
|
||||
if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted {
|
||||
ghc.Sha = asString(ghc.Event["after"])
|
||||
}
|
||||
default:
|
||||
defaultBranch := asString(nestedMapLookup(ghc.Event, "repository", "default_branch"))
|
||||
if defaultBranch != "" {
|
||||
@@ -136,6 +136,23 @@ func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string
|
||||
ghc.Ref = fmt.Sprintf("refs/heads/%s", asString(nestedMapLookup(ghc.Event, "repository", "default_branch")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetSha(ctx context.Context, repoPath string) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows
|
||||
// https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
|
||||
switch ghc.EventName {
|
||||
case "pull_request_target":
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha"))
|
||||
case "deployment", "deployment_status":
|
||||
ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha"))
|
||||
case "push", "create", "workflow_dispatch":
|
||||
if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted {
|
||||
ghc.Sha = asString(ghc.Event["after"])
|
||||
}
|
||||
}
|
||||
|
||||
if ghc.Sha == "" {
|
||||
_, sha, err := findGitRevision(ctx, repoPath)
|
||||
@@ -146,3 +163,51 @@ func (ghc *GithubContext) SetRefAndSha(ctx context.Context, defaultBranch string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInstance string, remoteName string, repoPath string) {
|
||||
if ghc.Repository == "" {
|
||||
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
|
||||
if err != nil {
|
||||
common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err)
|
||||
return
|
||||
}
|
||||
ghc.Repository = repo
|
||||
}
|
||||
ghc.RepositoryOwner = strings.Split(ghc.Repository, "/")[0]
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetRefTypeAndName() {
|
||||
var refType, refName string
|
||||
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables
|
||||
if strings.HasPrefix(ghc.Ref, "refs/tags/") {
|
||||
refType = "tag"
|
||||
refName = ghc.Ref[len("refs/tags/"):]
|
||||
} else if strings.HasPrefix(ghc.Ref, "refs/heads/") {
|
||||
refType = "branch"
|
||||
refName = ghc.Ref[len("refs/heads/"):]
|
||||
} else if strings.HasPrefix(ghc.Ref, "refs/pull/") {
|
||||
refType = ""
|
||||
refName = ghc.Ref[len("refs/pull/"):]
|
||||
}
|
||||
|
||||
if ghc.RefType == "" {
|
||||
ghc.RefType = refType
|
||||
}
|
||||
|
||||
if ghc.RefName == "" {
|
||||
ghc.RefName = refName
|
||||
}
|
||||
}
|
||||
|
||||
func (ghc *GithubContext) SetBaseAndHeadRef() {
|
||||
if ghc.EventName == "pull_request" || ghc.EventName == "pull_request_target" {
|
||||
if ghc.BaseRef == "" {
|
||||
ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref"))
|
||||
}
|
||||
|
||||
if ghc.HeadRef == "" {
|
||||
ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetRefAndSha(t *testing.T) {
|
||||
func TestSetRef(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
oldFindGitRef := findGitRef
|
||||
@@ -29,38 +29,31 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
eventName string
|
||||
event map[string]interface{}
|
||||
ref string
|
||||
sha string
|
||||
refName string
|
||||
}{
|
||||
{
|
||||
eventName: "pull_request_target",
|
||||
event: map[string]interface{}{
|
||||
"pull_request": map[string]interface{}{
|
||||
"base": map[string]interface{}{
|
||||
"sha": "pr-base-sha",
|
||||
},
|
||||
},
|
||||
},
|
||||
ref: "refs/heads/master",
|
||||
sha: "pr-base-sha",
|
||||
event: map[string]interface{}{},
|
||||
ref: "refs/heads/master",
|
||||
refName: "master",
|
||||
},
|
||||
{
|
||||
eventName: "pull_request",
|
||||
event: map[string]interface{}{
|
||||
"number": 1234.,
|
||||
},
|
||||
ref: "refs/pull/1234/merge",
|
||||
sha: "1234fakesha",
|
||||
ref: "refs/pull/1234/merge",
|
||||
refName: "1234/merge",
|
||||
},
|
||||
{
|
||||
eventName: "deployment",
|
||||
event: map[string]interface{}{
|
||||
"deployment": map[string]interface{}{
|
||||
"ref": "refs/heads/somebranch",
|
||||
"sha": "deployment-sha",
|
||||
},
|
||||
},
|
||||
ref: "refs/heads/somebranch",
|
||||
sha: "deployment-sha",
|
||||
ref: "refs/heads/somebranch",
|
||||
refName: "somebranch",
|
||||
},
|
||||
{
|
||||
eventName: "release",
|
||||
@@ -69,18 +62,16 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
"tag_name": "v1.0.0",
|
||||
},
|
||||
},
|
||||
ref: "v1.0.0",
|
||||
sha: "1234fakesha",
|
||||
ref: "refs/tags/v1.0.0",
|
||||
refName: "v1.0.0",
|
||||
},
|
||||
{
|
||||
eventName: "push",
|
||||
event: map[string]interface{}{
|
||||
"ref": "refs/heads/somebranch",
|
||||
"after": "push-sha",
|
||||
"deleted": false,
|
||||
"ref": "refs/heads/somebranch",
|
||||
},
|
||||
ref: "refs/heads/somebranch",
|
||||
sha: "push-sha",
|
||||
ref: "refs/heads/somebranch",
|
||||
refName: "somebranch",
|
||||
},
|
||||
{
|
||||
eventName: "unknown",
|
||||
@@ -89,14 +80,14 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
"default_branch": "main",
|
||||
},
|
||||
},
|
||||
ref: "refs/heads/main",
|
||||
sha: "1234fakesha",
|
||||
ref: "refs/heads/main",
|
||||
refName: "main",
|
||||
},
|
||||
{
|
||||
eventName: "no-event",
|
||||
event: map[string]interface{}{},
|
||||
ref: "refs/heads/master",
|
||||
sha: "1234fakesha",
|
||||
refName: "master",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -108,10 +99,11 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
Event: table.event,
|
||||
}
|
||||
|
||||
ghc.SetRefAndSha(context.Background(), "main", "/some/dir")
|
||||
ghc.SetRef(context.Background(), "main", "/some/dir")
|
||||
ghc.SetRefTypeAndName()
|
||||
|
||||
assert.Equal(t, table.ref, ghc.Ref)
|
||||
assert.Equal(t, table.sha, ghc.Sha)
|
||||
assert.Equal(t, table.refName, ghc.RefName)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -125,9 +117,96 @@ func TestSetRefAndSha(t *testing.T) {
|
||||
Event: map[string]interface{}{},
|
||||
}
|
||||
|
||||
ghc.SetRefAndSha(context.Background(), "", "/some/dir")
|
||||
ghc.SetRef(context.Background(), "", "/some/dir")
|
||||
|
||||
assert.Equal(t, "refs/heads/master", ghc.Ref)
|
||||
assert.Equal(t, "1234fakesha", ghc.Sha)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetSha(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
oldFindGitRef := findGitRef
|
||||
oldFindGitRevision := findGitRevision
|
||||
defer func() { findGitRef = oldFindGitRef }()
|
||||
defer func() { findGitRevision = oldFindGitRevision }()
|
||||
|
||||
findGitRef = func(ctx context.Context, file string) (string, error) {
|
||||
return "refs/heads/master", nil
|
||||
}
|
||||
|
||||
findGitRevision = func(ctx context.Context, file string) (string, string, error) {
|
||||
return "", "1234fakesha", nil
|
||||
}
|
||||
|
||||
tables := []struct {
|
||||
eventName string
|
||||
event map[string]interface{}
|
||||
sha string
|
||||
}{
|
||||
{
|
||||
eventName: "pull_request_target",
|
||||
event: map[string]interface{}{
|
||||
"pull_request": map[string]interface{}{
|
||||
"base": map[string]interface{}{
|
||||
"sha": "pr-base-sha",
|
||||
},
|
||||
},
|
||||
},
|
||||
sha: "pr-base-sha",
|
||||
},
|
||||
{
|
||||
eventName: "pull_request",
|
||||
event: map[string]interface{}{
|
||||
"number": 1234.,
|
||||
},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "deployment",
|
||||
event: map[string]interface{}{
|
||||
"deployment": map[string]interface{}{
|
||||
"sha": "deployment-sha",
|
||||
},
|
||||
},
|
||||
sha: "deployment-sha",
|
||||
},
|
||||
{
|
||||
eventName: "release",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "push",
|
||||
event: map[string]interface{}{
|
||||
"after": "push-sha",
|
||||
"deleted": false,
|
||||
},
|
||||
sha: "push-sha",
|
||||
},
|
||||
{
|
||||
eventName: "unknown",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
{
|
||||
eventName: "no-event",
|
||||
event: map[string]interface{}{},
|
||||
sha: "1234fakesha",
|
||||
},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
t.Run(table.eventName, func(t *testing.T) {
|
||||
ghc := &GithubContext{
|
||||
EventName: table.eventName,
|
||||
BaseRef: "master",
|
||||
Event: table.event,
|
||||
}
|
||||
|
||||
ghc.SetSha(context.Background(), "/some/dir")
|
||||
|
||||
assert.Equal(t, table.sha, ghc.Sha)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -15,9 +15,9 @@ import (
|
||||
|
||||
// WorkflowPlanner contains methods for creating plans
|
||||
type WorkflowPlanner interface {
|
||||
PlanEvent(eventName string) *Plan
|
||||
PlanJob(jobName string) *Plan
|
||||
PlanAll() *Plan
|
||||
PlanEvent(eventName string) (*Plan, error)
|
||||
PlanJob(jobName string) (*Plan, error)
|
||||
PlanAll() (*Plan, error)
|
||||
GetEvents() []string
|
||||
}
|
||||
|
||||
@@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||
workflow.Name = wf.workflowDirEntry.Name()
|
||||
}
|
||||
|
||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||
for k := range workflow.Jobs {
|
||||
if ok := jobNameRegex.MatchString(k); !ok {
|
||||
_ = f.Close()
|
||||
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||
}
|
||||
err = validateJobName(workflow)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wp.workflows = append(wp.workflows, workflow)
|
||||
@@ -171,52 +169,117 @@ func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
|
||||
}
|
||||
}
|
||||
|
||||
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
|
||||
wp := new(workflowPlanner)
|
||||
|
||||
log.Debugf("Reading workflow %s", name)
|
||||
workflow, err := ReadWorkflow(f)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
|
||||
}
|
||||
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
|
||||
}
|
||||
workflow.File = name
|
||||
if workflow.Name == "" {
|
||||
workflow.Name = name
|
||||
}
|
||||
|
||||
err = validateJobName(workflow)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wp.workflows = append(wp.workflows, workflow)
|
||||
|
||||
return wp, nil
|
||||
}
|
||||
|
||||
func validateJobName(workflow *Workflow) error {
|
||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||
for k := range workflow.Jobs {
|
||||
if ok := jobNameRegex.MatchString(k); !ok {
|
||||
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type workflowPlanner struct {
|
||||
workflows []*Workflow
|
||||
}
|
||||
|
||||
// PlanEvent builds a new list of runs to execute in parallel for an event name
|
||||
func (wp *workflowPlanner) PlanEvent(eventName string) *Plan {
|
||||
func (wp *workflowPlanner) PlanEvent(eventName string) (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no events found for workflow: %s", eventName)
|
||||
log.Debug("no workflows found by planner")
|
||||
return plan, nil
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
for _, e := range w.On() {
|
||||
events := w.On()
|
||||
if len(events) == 0 {
|
||||
log.Debugf("no events found for workflow: %s", w.File)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
if e == eventName {
|
||||
plan.mergeStages(createStages(w, w.GetJobIDs()...))
|
||||
stages, err := createStages(w, w.GetJobIDs()...)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return plan
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// PlanJob builds a new run to execute in parallel for a job name
|
||||
func (wp *workflowPlanner) PlanJob(jobName string) *Plan {
|
||||
func (wp *workflowPlanner) PlanJob(jobName string) (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no jobs found for workflow: %s", jobName)
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
plan.mergeStages(createStages(w, jobName))
|
||||
stages, err := createStages(w, jobName)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
}
|
||||
return plan
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// PlanAll builds a new run to execute in parallel all
|
||||
func (wp *workflowPlanner) PlanAll() *Plan {
|
||||
func (wp *workflowPlanner) PlanAll() (*Plan, error) {
|
||||
plan := new(Plan)
|
||||
if len(wp.workflows) == 0 {
|
||||
log.Debugf("no jobs found for loaded workflows")
|
||||
log.Debug("no workflows found by planner")
|
||||
return plan, nil
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
for _, w := range wp.workflows {
|
||||
plan.mergeStages(createStages(w, w.GetJobIDs()...))
|
||||
stages, err := createStages(w, w.GetJobIDs()...)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
lastErr = err
|
||||
} else {
|
||||
plan.mergeStages(stages)
|
||||
}
|
||||
}
|
||||
|
||||
return plan
|
||||
return plan, lastErr
|
||||
}
|
||||
|
||||
// GetEvents gets all the events in the workflows file
|
||||
@@ -289,7 +352,7 @@ func (p *Plan) mergeStages(stages []*Stage) {
|
||||
p.Stages = newStages
|
||||
}
|
||||
|
||||
func createStages(w *Workflow, jobIDs ...string) []*Stage {
|
||||
func createStages(w *Workflow, jobIDs ...string) ([]*Stage, error) {
|
||||
// first, build a list of all the necessary jobs to run, and their dependencies
|
||||
jobDependencies := make(map[string][]string)
|
||||
for len(jobIDs) > 0 {
|
||||
@@ -321,12 +384,16 @@ func createStages(w *Workflow, jobIDs ...string) []*Stage {
|
||||
}
|
||||
}
|
||||
if len(stage.Runs) == 0 {
|
||||
log.Fatalf("Unable to build dependency graph!")
|
||||
return nil, fmt.Errorf("unable to build dependency graph for %s (%s)", w.Name, w.File)
|
||||
}
|
||||
stages = append(stages, stage)
|
||||
}
|
||||
|
||||
return stages
|
||||
if len(stages) == 0 {
|
||||
return nil, fmt.Errorf("Could not find any stages to run. View the valid jobs with `act --list`. Use `act --help` to find how to filter by Job ID/Workflow/Event Name")
|
||||
}
|
||||
|
||||
return stages, nil
|
||||
}
|
||||
|
||||
// return true iff all strings in srcList exist in at least one of the stages
|
||||
|
@@ -39,3 +39,25 @@ func TestPlanner(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkflow(t *testing.T) {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
workflow := Workflow{
|
||||
Jobs: map[string]*Job{
|
||||
"valid_job": {
|
||||
Name: "valid_job",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Check that an invalid job id returns error
|
||||
result, err := createStages(&workflow, "invalid_job_id")
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, result)
|
||||
|
||||
// Check that an valid job id returns non-error
|
||||
result, err = createStages(&workflow, "valid_job")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, result)
|
||||
}
|
||||
|
@@ -42,5 +42,4 @@ type StepResult struct {
|
||||
Outputs map[string]string `json:"outputs"`
|
||||
Conclusion stepStatus `json:"conclusion"`
|
||||
Outcome stepStatus `json:"outcome"`
|
||||
State map[string]string
|
||||
}
|
||||
|
@@ -58,9 +58,8 @@ func (w *Workflow) On() []string {
|
||||
func (w *Workflow) OnEvent(event string) interface{} {
|
||||
if w.RawOn.Kind == yaml.MappingNode {
|
||||
var val map[string]interface{}
|
||||
err := w.RawOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
return val[event]
|
||||
}
|
||||
@@ -104,21 +103,78 @@ type WorkflowDispatch struct {
|
||||
}
|
||||
|
||||
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
|
||||
if w.RawOn.Kind != yaml.MappingNode {
|
||||
switch w.RawOn.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
if val == "workflow_dispatch" {
|
||||
return &WorkflowDispatch{}
|
||||
}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
for _, v := range val {
|
||||
if v == "workflow_dispatch" {
|
||||
return &WorkflowDispatch{}
|
||||
}
|
||||
}
|
||||
case yaml.MappingNode:
|
||||
var val map[string]yaml.Node
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, found := val["workflow_dispatch"]
|
||||
var workflowDispatch WorkflowDispatch
|
||||
if found && decodeNode(n, &workflowDispatch) {
|
||||
return &workflowDispatch
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WorkflowCallInput struct {
|
||||
Description string `yaml:"description"`
|
||||
Required bool `yaml:"required"`
|
||||
Default string `yaml:"default"`
|
||||
Type string `yaml:"type"`
|
||||
}
|
||||
|
||||
type WorkflowCallOutput struct {
|
||||
Description string `yaml:"description"`
|
||||
Value string `yaml:"value"`
|
||||
}
|
||||
|
||||
type WorkflowCall struct {
|
||||
Inputs map[string]WorkflowCallInput `yaml:"inputs"`
|
||||
Outputs map[string]WorkflowCallOutput `yaml:"outputs"`
|
||||
}
|
||||
|
||||
type WorkflowCallResult struct {
|
||||
Outputs map[string]string
|
||||
}
|
||||
|
||||
func (w *Workflow) WorkflowCallConfig() *WorkflowCall {
|
||||
if w.RawOn.Kind != yaml.MappingNode {
|
||||
// The callers expect for "on: workflow_call" and "on: [ workflow_call ]" a non nil return value
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
var val map[string]yaml.Node
|
||||
err := w.RawOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(w.RawOn, &val) {
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
var config WorkflowDispatch
|
||||
node := val["workflow_dispatch"]
|
||||
err = node.Decode(&config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
var config WorkflowCall
|
||||
node := val["workflow_call"]
|
||||
if !decodeNode(node, &config) {
|
||||
return &WorkflowCall{}
|
||||
}
|
||||
|
||||
return &config
|
||||
@@ -139,6 +195,8 @@ type Job struct {
|
||||
Defaults Defaults `yaml:"defaults"`
|
||||
Outputs map[string]string `yaml:"outputs"`
|
||||
Uses string `yaml:"uses"`
|
||||
With map[string]interface{} `yaml:"with"`
|
||||
RawSecrets yaml.Node `yaml:"secrets"`
|
||||
Result string
|
||||
}
|
||||
|
||||
@@ -193,21 +251,45 @@ func (s Strategy) GetFailFast() bool {
|
||||
return failFast
|
||||
}
|
||||
|
||||
func (j *Job) InheritSecrets() bool {
|
||||
if j.RawSecrets.Kind != yaml.ScalarNode {
|
||||
return false
|
||||
}
|
||||
|
||||
var val string
|
||||
if !decodeNode(j.RawSecrets, &val) {
|
||||
return false
|
||||
}
|
||||
|
||||
return val == "inherit"
|
||||
}
|
||||
|
||||
func (j *Job) Secrets() map[string]string {
|
||||
if j.RawSecrets.Kind != yaml.MappingNode {
|
||||
return nil
|
||||
}
|
||||
|
||||
var val map[string]string
|
||||
if !decodeNode(j.RawSecrets, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
// Container details for the job
|
||||
func (j *Job) Container() *ContainerSpec {
|
||||
var val *ContainerSpec
|
||||
switch j.RawContainer.Kind {
|
||||
case yaml.ScalarNode:
|
||||
val = new(ContainerSpec)
|
||||
err := j.RawContainer.Decode(&val.Image)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawContainer, &val.Image) {
|
||||
return nil
|
||||
}
|
||||
case yaml.MappingNode:
|
||||
val = new(ContainerSpec)
|
||||
err := j.RawContainer.Decode(val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawContainer, val) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return val
|
||||
@@ -218,16 +300,14 @@ func (j *Job) Needs() []string {
|
||||
switch j.RawNeeds.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := j.RawNeeds.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawNeeds, &val) {
|
||||
return nil
|
||||
}
|
||||
return []string{val}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
err := j.RawNeeds.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.RawNeeds, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -237,18 +317,40 @@ func (j *Job) Needs() []string {
|
||||
// RunsOn list for Job
|
||||
func (j *Job) RunsOn() []string {
|
||||
switch j.RawRunsOn.Kind {
|
||||
case yaml.MappingNode:
|
||||
var val struct {
|
||||
Group string
|
||||
Labels yaml.Node
|
||||
}
|
||||
|
||||
if !decodeNode(j.RawRunsOn, &val) {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := nodeAsStringSlice(val.Labels)
|
||||
|
||||
if val.Group != "" {
|
||||
labels = append(labels, val.Group)
|
||||
}
|
||||
|
||||
return labels
|
||||
default:
|
||||
return nodeAsStringSlice(j.RawRunsOn)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeAsStringSlice(node yaml.Node) []string {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := j.RawRunsOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(node, &val) {
|
||||
return nil
|
||||
}
|
||||
return []string{val}
|
||||
case yaml.SequenceNode:
|
||||
var val []string
|
||||
err := j.RawRunsOn.Decode(&val)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(node, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -258,8 +360,8 @@ func (j *Job) RunsOn() []string {
|
||||
func environment(yml yaml.Node) map[string]string {
|
||||
env := make(map[string]string)
|
||||
if yml.Kind == yaml.MappingNode {
|
||||
if err := yml.Decode(&env); err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(yml, &env) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return env
|
||||
@@ -274,8 +376,8 @@ func (j *Job) Environment() map[string]string {
|
||||
func (j *Job) Matrix() map[string][]interface{} {
|
||||
if j.Strategy.RawMatrix.Kind == yaml.MappingNode {
|
||||
var val map[string][]interface{}
|
||||
if err := j.Strategy.RawMatrix.Decode(&val); err != nil {
|
||||
log.Fatal(err)
|
||||
if !decodeNode(j.Strategy.RawMatrix, &val) {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
@@ -286,7 +388,7 @@ func (j *Job) Matrix() map[string][]interface{} {
|
||||
// It skips includes and hard fails excludes for non-existing keys
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
func (j *Job) GetMatrixes() ([]map[string]interface{}, error) {
|
||||
matrixes := make([]map[string]interface{}, 0)
|
||||
if j.Strategy != nil {
|
||||
j.Strategy.FailFast = j.Strategy.GetFailFast()
|
||||
@@ -337,7 +439,7 @@ func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
excludes = append(excludes, e)
|
||||
} else {
|
||||
// We fail completely here because that's what GitHub does for non-existing matrix keys, fail on exclude, silent skip on include
|
||||
log.Fatalf("The workflow is not valid. Matrix exclude key '%s' does not match any key within the matrix", k)
|
||||
return nil, fmt.Errorf("the workflow is not valid. Matrix exclude key %q does not match any key within the matrix", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -381,8 +483,9 @@ func (j *Job) GetMatrixes() []map[string]interface{} {
|
||||
}
|
||||
} else {
|
||||
matrixes = append(matrixes, make(map[string]interface{}))
|
||||
log.Debugf("Empty Strategy, matrixes=%v", matrixes)
|
||||
}
|
||||
return matrixes
|
||||
return matrixes, nil
|
||||
}
|
||||
|
||||
func commonKeysMatch(a map[string]interface{}, b map[string]interface{}) bool {
|
||||
@@ -408,14 +511,17 @@ func commonKeysMatch2(a map[string]interface{}, b map[string]interface{}, m map[
|
||||
type JobType int
|
||||
|
||||
const (
|
||||
// StepTypeRun is all steps that have a `run` attribute
|
||||
// JobTypeDefault is all jobs that have a `run` attribute
|
||||
JobTypeDefault JobType = iota
|
||||
|
||||
// StepTypeReusableWorkflowLocal is all steps that have a `uses` that is a local workflow in the .github/workflows directory
|
||||
// JobTypeReusableWorkflowLocal is all jobs that have a `uses` that is a local workflow in the .github/workflows directory
|
||||
JobTypeReusableWorkflowLocal
|
||||
|
||||
// JobTypeReusableWorkflowRemote is all steps that have a `uses` that references a workflow file in a github repo
|
||||
// JobTypeReusableWorkflowRemote is all jobs that have a `uses` that references a workflow file in a github repo
|
||||
JobTypeReusableWorkflowRemote
|
||||
|
||||
// JobTypeInvalid represents a job which is not configured correctly
|
||||
JobTypeInvalid
|
||||
)
|
||||
|
||||
func (j JobType) String() string {
|
||||
@@ -431,13 +537,28 @@ func (j JobType) String() string {
|
||||
}
|
||||
|
||||
// Type returns the type of the job
|
||||
func (j *Job) Type() JobType {
|
||||
if strings.HasPrefix(j.Uses, "./.github/workflows") && (strings.HasSuffix(j.Uses, ".yml") || strings.HasSuffix(j.Uses, ".yaml")) {
|
||||
return JobTypeReusableWorkflowLocal
|
||||
} else if !strings.HasPrefix(j.Uses, "./") && strings.Contains(j.Uses, ".github/workflows") && (strings.Contains(j.Uses, ".yml@") || strings.Contains(j.Uses, ".yaml@")) {
|
||||
return JobTypeReusableWorkflowRemote
|
||||
func (j *Job) Type() (JobType, error) {
|
||||
isReusable := j.Uses != ""
|
||||
|
||||
if isReusable {
|
||||
isYaml, _ := regexp.MatchString(`\.(ya?ml)(?:$|@)`, j.Uses)
|
||||
|
||||
if isYaml {
|
||||
isLocalPath := strings.HasPrefix(j.Uses, "./")
|
||||
isRemotePath, _ := regexp.MatchString(`^[^.](.+?/){2,}.+\.ya?ml@`, j.Uses)
|
||||
hasVersion, _ := regexp.MatchString(`\.ya?ml@`, j.Uses)
|
||||
|
||||
if isLocalPath {
|
||||
return JobTypeReusableWorkflowLocal, nil
|
||||
} else if isRemotePath && hasVersion {
|
||||
return JobTypeReusableWorkflowRemote, nil
|
||||
}
|
||||
}
|
||||
|
||||
return JobTypeInvalid, fmt.Errorf("`uses` key references invalid workflow path '%s'. Must start with './' if it's a local workflow, or must start with '<org>/<repo>/' and include an '@' if it's a remote workflow", j.Uses)
|
||||
}
|
||||
return JobTypeDefault
|
||||
|
||||
return JobTypeDefault, nil
|
||||
}
|
||||
|
||||
// ContainerSpec is the specification of the container to use for the job
|
||||
@@ -452,6 +573,9 @@ type ContainerSpec struct {
|
||||
Args string
|
||||
Name string
|
||||
Reuse bool
|
||||
|
||||
// Gitea specific
|
||||
Cmd []string `yaml:"cmd"`
|
||||
}
|
||||
|
||||
// Step is the structure of one step in a job
|
||||
@@ -483,16 +607,8 @@ func (s *Step) String() string {
|
||||
}
|
||||
|
||||
// Environments returns string-based key=value map for a step
|
||||
// Note: all keys are uppercase
|
||||
func (s *Step) Environment() map[string]string {
|
||||
env := environment(s.Env)
|
||||
|
||||
for k, v := range env {
|
||||
delete(env, k)
|
||||
env[strings.ToUpper(k)] = v
|
||||
}
|
||||
|
||||
return env
|
||||
return environment(s.Env)
|
||||
}
|
||||
|
||||
// GetEnv gets the env for a step
|
||||
@@ -522,7 +638,7 @@ func (s *Step) ShellCommand() string {
|
||||
case "sh":
|
||||
shellCommand = "sh -e {0}"
|
||||
case "cmd":
|
||||
shellCommand = "%ComSpec% /D /E:ON /V:OFF /S /C \"CALL \"{0}\"\""
|
||||
shellCommand = "cmd /D /E:ON /V:OFF /S /C \"CALL \"{0}\"\""
|
||||
case "powershell":
|
||||
shellCommand = "powershell -command . '{0}'"
|
||||
default:
|
||||
@@ -631,3 +747,17 @@ func (w *Workflow) GetJobIDs() []string {
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
var OnDecodeNodeError = func(node yaml.Node, out interface{}, err error) {
|
||||
log.Fatalf("Failed to decode node %v into %T: %v", node, out, err)
|
||||
}
|
||||
|
||||
func decodeNode(node yaml.Node, out interface{}) bool {
|
||||
if err := node.Decode(out); err != nil {
|
||||
if OnDecodeNodeError != nil {
|
||||
OnDecodeNodeError(node, out, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@@ -153,6 +153,41 @@ jobs:
|
||||
assert.Contains(t, workflow.On(), "pull_request")
|
||||
}
|
||||
|
||||
func TestReadWorkflow_RunsOnLabels(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
||||
jobs:
|
||||
test:
|
||||
container: nginx:latest
|
||||
runs-on:
|
||||
labels: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"})
|
||||
}
|
||||
|
||||
func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
||||
jobs:
|
||||
test:
|
||||
container: nginx:latest
|
||||
runs-on:
|
||||
labels: [ubuntu-latest]
|
||||
group: linux
|
||||
steps:
|
||||
- uses: ./actions/docker-url`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"})
|
||||
}
|
||||
|
||||
func TestReadWorkflow_StringContainer(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
@@ -229,20 +264,81 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo
|
||||
remote-reusable-workflow:
|
||||
runs-on: ubuntu-latest
|
||||
uses: remote/repo/.github/workflows/workflow.yml@main
|
||||
local-reusable-workflow:
|
||||
runs-on: ubuntu-latest
|
||||
uses: ./.github/workflows/workflow.yml
|
||||
remote-reusable-workflow-yml:
|
||||
uses: remote/repo/some/path/to/workflow.yml@main
|
||||
remote-reusable-workflow-yaml:
|
||||
uses: remote/repo/some/path/to/workflow.yaml@main
|
||||
remote-reusable-workflow-custom-path:
|
||||
uses: remote/repo/path/to/workflow.yml@main
|
||||
local-reusable-workflow-yml:
|
||||
uses: ./some/path/to/workflow.yml
|
||||
local-reusable-workflow-yaml:
|
||||
uses: ./some/path/to/workflow.yaml
|
||||
`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Len(t, workflow.Jobs, 3)
|
||||
assert.Equal(t, workflow.Jobs["default-job"].Type(), JobTypeDefault)
|
||||
assert.Equal(t, workflow.Jobs["remote-reusable-workflow"].Type(), JobTypeReusableWorkflowRemote)
|
||||
assert.Equal(t, workflow.Jobs["local-reusable-workflow"].Type(), JobTypeReusableWorkflowLocal)
|
||||
assert.Len(t, workflow.Jobs, 6)
|
||||
|
||||
jobType, err := workflow.Jobs["default-job"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeDefault, jobType)
|
||||
|
||||
jobType, err = workflow.Jobs["remote-reusable-workflow-yml"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeReusableWorkflowRemote, jobType)
|
||||
|
||||
jobType, err = workflow.Jobs["remote-reusable-workflow-yaml"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeReusableWorkflowRemote, jobType)
|
||||
|
||||
jobType, err = workflow.Jobs["remote-reusable-workflow-custom-path"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeReusableWorkflowRemote, jobType)
|
||||
|
||||
jobType, err = workflow.Jobs["local-reusable-workflow-yml"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeReusableWorkflowLocal, jobType)
|
||||
|
||||
jobType, err = workflow.Jobs["local-reusable-workflow-yaml"].Type()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, JobTypeReusableWorkflowLocal, jobType)
|
||||
}
|
||||
|
||||
func TestReadWorkflow_JobTypes_InvalidPath(t *testing.T) {
|
||||
yaml := `
|
||||
name: invalid job definition
|
||||
|
||||
jobs:
|
||||
remote-reusable-workflow-missing-version:
|
||||
uses: remote/repo/some/path/to/workflow.yml
|
||||
remote-reusable-workflow-bad-extension:
|
||||
uses: remote/repo/some/path/to/workflow.json
|
||||
local-reusable-workflow-bad-extension:
|
||||
uses: ./some/path/to/workflow.json
|
||||
local-reusable-workflow-bad-path:
|
||||
uses: some/path/to/workflow.yaml
|
||||
`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
assert.Len(t, workflow.Jobs, 4)
|
||||
|
||||
jobType, err := workflow.Jobs["remote-reusable-workflow-missing-version"].Type()
|
||||
assert.Equal(t, JobTypeInvalid, jobType)
|
||||
assert.NotEqual(t, nil, err)
|
||||
|
||||
jobType, err = workflow.Jobs["remote-reusable-workflow-bad-extension"].Type()
|
||||
assert.Equal(t, JobTypeInvalid, jobType)
|
||||
assert.NotEqual(t, nil, err)
|
||||
|
||||
jobType, err = workflow.Jobs["local-reusable-workflow-bad-extension"].Type()
|
||||
assert.Equal(t, JobTypeInvalid, jobType)
|
||||
assert.NotEqual(t, nil, err)
|
||||
|
||||
jobType, err = workflow.Jobs["local-reusable-workflow-bad-path"].Type()
|
||||
assert.Equal(t, JobTypeInvalid, jobType)
|
||||
assert.NotEqual(t, nil, err)
|
||||
}
|
||||
|
||||
func TestReadWorkflow_StepsTypes(t *testing.T) {
|
||||
@@ -323,7 +419,8 @@ func TestReadWorkflow_Strategy(t *testing.T) {
|
||||
w, err := NewWorkflowPlanner("testdata/strategy/push.yml", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p := w.PlanJob("strategy-only-max-parallel")
|
||||
p, err := w.PlanJob("strategy-only-max-parallel")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(p.Stages), 1)
|
||||
assert.Equal(t, len(p.Stages[0].Runs), 1)
|
||||
@@ -331,25 +428,33 @@ func TestReadWorkflow_Strategy(t *testing.T) {
|
||||
wf := p.Stages[0].Runs[0].Workflow
|
||||
|
||||
job := wf.Jobs["strategy-only-max-parallel"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err := job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 2)
|
||||
assert.Equal(t, job.Strategy.FailFast, true)
|
||||
|
||||
job = wf.Jobs["strategy-only-fail-fast"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 4)
|
||||
assert.Equal(t, job.Strategy.FailFast, false)
|
||||
|
||||
job = wf.Jobs["strategy-no-matrix"]
|
||||
assert.Equal(t, job.GetMatrixes(), []map[string]interface{}{{}})
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes, []map[string]interface{}{{}})
|
||||
assert.Equal(t, job.Matrix(), map[string][]interface{}(nil))
|
||||
assert.Equal(t, job.Strategy.MaxParallel, 2)
|
||||
assert.Equal(t, job.Strategy.FailFast, false)
|
||||
|
||||
job = wf.Jobs["strategy-all"]
|
||||
assert.Equal(t, job.GetMatrixes(),
|
||||
matrixes, err = job.GetMatrixes()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, matrixes,
|
||||
[]map[string]interface{}{
|
||||
{"datacenter": "site-c", "node-version": "14.x", "site": "staging"},
|
||||
{"datacenter": "site-c", "node-version": "16.x", "site": "staging"},
|
||||
@@ -394,3 +499,107 @@ func TestStep_ShellCommand(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
`
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch := workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: push
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: workflow_dispatch
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, pull_request]
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, workflow_dispatch]
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
- push
|
||||
- workflow_dispatch
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Nil(t, workflowDispatch.Inputs)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.Nil(t, workflowDispatch)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
logLevel:
|
||||
description: 'Log level'
|
||||
required: true
|
||||
default: 'warning'
|
||||
type: choice
|
||||
options:
|
||||
- info
|
||||
- warning
|
||||
- debug
|
||||
`
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
workflowDispatch = workflow.WorkflowDispatchConfig()
|
||||
assert.NotNil(t, workflowDispatch)
|
||||
assert.Equal(t, WorkflowDispatchInput{
|
||||
Default: "warning",
|
||||
Description: "Log level",
|
||||
Options: []string{
|
||||
"info",
|
||||
"warning",
|
||||
"debug",
|
||||
},
|
||||
Required: true,
|
||||
Type: "choice",
|
||||
}, workflowDispatch.Inputs["logLevel"])
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package runner
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
@@ -29,10 +31,9 @@ type actionStep interface {
|
||||
|
||||
type readAction func(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error)
|
||||
|
||||
type (
|
||||
actionYamlReader func(filename string) (io.Reader, io.Closer, error)
|
||||
fileWriter func(filename string, data []byte, perm fs.FileMode) error
|
||||
)
|
||||
type actionYamlReader func(filename string) (io.Reader, io.Closer, error)
|
||||
|
||||
type fileWriter func(filename string, data []byte, perm fs.FileMode) error
|
||||
|
||||
type runAction func(step actionStep, actionDir string, remoteAction *remoteAction) common.Executor
|
||||
|
||||
@@ -41,11 +42,24 @@ var trampoline embed.FS
|
||||
|
||||
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
|
||||
logger := common.Logger(ctx)
|
||||
allErrors := []error{}
|
||||
addError := func(fileName string, err error) {
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step %w", fileName, step.String(), actionPath, err))
|
||||
} else {
|
||||
// One successful read, clear error state
|
||||
allErrors = nil
|
||||
}
|
||||
}
|
||||
reader, closer, err := readFile("action.yml")
|
||||
addError("action.yml", err)
|
||||
if os.IsNotExist(err) {
|
||||
reader, closer, err = readFile("action.yaml")
|
||||
if err != nil {
|
||||
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
|
||||
addError("action.yaml", err)
|
||||
if os.IsNotExist(err) {
|
||||
_, closer, err := readFile("Dockerfile")
|
||||
addError("Dockerfile", err)
|
||||
if err == nil {
|
||||
closer.Close()
|
||||
action := &model.Action{
|
||||
Name: "(Synthetic)",
|
||||
@@ -90,10 +104,10 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
|
||||
return action, nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if allErrors != nil {
|
||||
return nil, errors.Join(allErrors...)
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
@@ -110,9 +124,6 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
||||
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
||||
return nil
|
||||
}
|
||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var containerActionDirCopy string
|
||||
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
|
||||
@@ -121,6 +132,21 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
||||
if !strings.HasSuffix(containerActionDirCopy, `/`) {
|
||||
containerActionDirCopy += `/`
|
||||
}
|
||||
|
||||
if rc.Config != nil && rc.Config.ActionCache != nil {
|
||||
raction := step.(*stepActionRemote)
|
||||
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ta.Close()
|
||||
return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta)
|
||||
}
|
||||
|
||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
|
||||
}
|
||||
|
||||
@@ -149,13 +175,15 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
logger.Debugf("type=%v actionDir=%s actionPath=%s workdir=%s actionCacheDir=%s actionName=%s containerActionDir=%s", stepModel.Type(), actionDir, actionPath, rc.Config.Workdir, rc.ActionCacheDir(), actionName, containerActionDir)
|
||||
|
||||
switch action.Runs.Using {
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16:
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16, model.ActionRunsUsingNode20:
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Main)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
case model.ActionRunsUsingDocker:
|
||||
location := actionLocation
|
||||
@@ -173,21 +201,31 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
containerArgs := []string{"go", "run", path.Join(containerActionDir, action.Runs.Main)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Main)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Main}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key must be one of: %v, got %s", []string{
|
||||
model.ActionRunsUsingDocker,
|
||||
model.ActionRunsUsingNode12,
|
||||
model.ActionRunsUsingNode16,
|
||||
model.ActionRunsUsingNode20,
|
||||
model.ActionRunsUsingComposite,
|
||||
model.ActionRunsUsingGo,
|
||||
}, action.Runs.Using))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupActionEnv(ctx context.Context, step actionStep, remoteAction *remoteAction) error {
|
||||
func setupActionEnv(ctx context.Context, step actionStep, _ *remoteAction) error {
|
||||
rc := step.getRunContext()
|
||||
|
||||
// A few fields in the environment (e.g. GITHUB_ACTION_REPOSITORY)
|
||||
@@ -228,14 +266,17 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
|
||||
var prepImage common.Executor
|
||||
var image string
|
||||
forcePull := false
|
||||
if strings.HasPrefix(action.Runs.Image, "docker://") {
|
||||
image = strings.TrimPrefix(action.Runs.Image, "docker://")
|
||||
// Apply forcePull only for prebuild docker images
|
||||
forcePull = rc.Config.ForcePull
|
||||
} else {
|
||||
// "-dockeraction" enshures that "./", "./test " won't get converted to "act-:latest", "act-test-:latest" which are invalid docker image names
|
||||
image = fmt.Sprintf("%s-dockeraction:%s", regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-"), "latest")
|
||||
image = fmt.Sprintf("act-%s", strings.TrimLeft(image, "-"))
|
||||
image = strings.ToLower(image)
|
||||
contextDir := filepath.Join(basedir, action.Runs.Main)
|
||||
contextDir, fileName := filepath.Split(filepath.Join(basedir, action.Runs.Image))
|
||||
|
||||
anyArchExists, err := container.ImageExistsLocally(ctx, image, "any")
|
||||
if err != nil {
|
||||
@@ -259,15 +300,27 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
|
||||
if !correctArchExists || rc.Config.ForceRebuild {
|
||||
logger.Debugf("image '%s' for architecture '%s' will be built from context '%s", image, rc.Config.ContainerArchitecture, contextDir)
|
||||
var actionContainer container.Container
|
||||
var buildContext io.ReadCloser
|
||||
if localAction {
|
||||
actionContainer = rc.JobContainer
|
||||
buildContext, err = rc.JobContainer.GetContainerArchive(ctx, contextDir+"/.")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer buildContext.Close()
|
||||
} else if rc.Config.ActionCache != nil {
|
||||
rstep := step.(*stepActionRemote)
|
||||
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer buildContext.Close()
|
||||
}
|
||||
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||
ContextDir: contextDir,
|
||||
ImageTag: image,
|
||||
Container: actionContainer,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
ContextDir: contextDir,
|
||||
Dockerfile: fileName,
|
||||
ImageTag: image,
|
||||
BuildContext: buildContext,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
})
|
||||
} else {
|
||||
logger.Debugf("image '%s' for architecture '%s' already exists", image, rc.Config.ContainerArchitecture)
|
||||
@@ -296,7 +349,7 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||
stepContainer := newStepContainer(ctx, step, image, cmd, entrypoint)
|
||||
return common.NewPipelineExecutor(
|
||||
prepImage,
|
||||
stepContainer.Pull(rc.Config.ForcePull),
|
||||
stepContainer.Pull(forcePull),
|
||||
stepContainer.Remove().IfBool(!rc.Config.ReuseContainers),
|
||||
stepContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
|
||||
stepContainer.Start(true),
|
||||
@@ -320,13 +373,13 @@ func evalDockerArgs(ctx context.Context, step step, action *model.Action, cmd *[
|
||||
inputs[k] = eval.Interpolate(ctx, v)
|
||||
}
|
||||
}
|
||||
mergeIntoMap(step.getEnv(), inputs)
|
||||
mergeIntoMap(step, step.getEnv(), inputs)
|
||||
|
||||
stepEE := rc.NewStepExpressionEvaluator(ctx, step)
|
||||
for i, v := range *cmd {
|
||||
(*cmd)[i] = stepEE.Interpolate(ctx, v)
|
||||
}
|
||||
mergeIntoMap(step.getEnv(), action.Runs.Env)
|
||||
mergeIntoMap(step, step.getEnv(), action.Runs.Env)
|
||||
|
||||
ee := rc.NewStepExpressionEvaluator(ctx, step)
|
||||
for k, v := range *step.getEnv() {
|
||||
@@ -357,33 +410,38 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
|
||||
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
|
||||
networkMode := fmt.Sprintf("container:%s", rc.jobContainerName())
|
||||
if rc.IsHostEnv(ctx) {
|
||||
networkMode = "default"
|
||||
}
|
||||
stepContainer := container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkMode,
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.Config.ContainerOptions,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
|
||||
func populateEnvsFromSavedState(env *map[string]string, step actionStep, rc *RunContext) {
|
||||
stepResult := rc.StepResults[step.getStepModel().ID]
|
||||
if stepResult != nil {
|
||||
for name, value := range stepResult.State {
|
||||
state, ok := rc.IntraActionState[step.getStepModel().ID]
|
||||
if ok {
|
||||
for name, value := range state {
|
||||
envName := fmt.Sprintf("STATE_%s", name)
|
||||
(*env)[envName] = value
|
||||
}
|
||||
@@ -450,7 +508,9 @@ func hasPreStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode20 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Pre != "")
|
||||
}
|
||||
}
|
||||
@@ -465,7 +525,7 @@ func runPreStep(step actionStep) common.Executor {
|
||||
action := step.getActionModel()
|
||||
|
||||
switch action.Runs.Using {
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16:
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16, model.ActionRunsUsingNode20:
|
||||
// defaults in pre steps were missing, however provided inputs are available
|
||||
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
|
||||
// todo: refactor into step
|
||||
@@ -473,7 +533,7 @@ func runPreStep(step actionStep) common.Executor {
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), strings.ReplaceAll(stepModel.Uses, "/", "-"))
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
@@ -495,6 +555,8 @@ func runPreStep(step actionStep) common.Executor {
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Pre)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
case model.ActionRunsUsingComposite:
|
||||
@@ -502,8 +564,48 @@ func runPreStep(step actionStep) common.Executor {
|
||||
step.getCompositeRunContext(ctx)
|
||||
}
|
||||
|
||||
return step.getCompositeSteps().pre(ctx)
|
||||
if steps := step.getCompositeSteps(); steps != nil && steps.pre != nil {
|
||||
return steps.pre(ctx)
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
// defaults in pre steps were missing, however provided inputs are available
|
||||
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
|
||||
// todo: refactor into step
|
||||
var actionDir string
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
}
|
||||
|
||||
actionLocation := ""
|
||||
if actionPath != "" {
|
||||
actionLocation = path.Join(actionDir, actionPath)
|
||||
} else {
|
||||
actionLocation = actionDir
|
||||
}
|
||||
|
||||
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
|
||||
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Pre)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Pre}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -540,7 +642,9 @@ func hasPostStep(step actionStep) common.Conditional {
|
||||
action := step.getActionModel()
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode20 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Post != "")
|
||||
}
|
||||
}
|
||||
@@ -559,7 +663,7 @@ func runPostStep(step actionStep) common.Executor {
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), strings.ReplaceAll(stepModel.Uses, "/", "-"))
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
@@ -575,13 +679,15 @@ func runPostStep(step actionStep) common.Executor {
|
||||
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
|
||||
|
||||
switch action.Runs.Using {
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16:
|
||||
case model.ActionRunsUsingNode12, model.ActionRunsUsingNode16, model.ActionRunsUsingNode20:
|
||||
|
||||
populateEnvsFromSavedState(step.getEnv(), step, rc)
|
||||
|
||||
containerArgs := []string{"node", path.Join(containerActionDir, action.Runs.Post)}
|
||||
logger.Debugf("executing remote job container: %s", containerArgs)
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx)
|
||||
|
||||
case model.ActionRunsUsingComposite:
|
||||
@@ -589,7 +695,23 @@ func runPostStep(step actionStep) common.Executor {
|
||||
return err
|
||||
}
|
||||
|
||||
return step.getCompositeSteps().post(ctx)
|
||||
if steps := step.getCompositeSteps(); steps != nil && steps.post != nil {
|
||||
return steps.post(ctx)
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
populateEnvsFromSavedState(step.getEnv(), step, rc)
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Post)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Post}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
|
||||
default:
|
||||
return nil
|
||||
|
181
pkg/runner/action_cache.go
Normal file
181
pkg/runner/action_cache.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
git "github.com/go-git/go-git/v5"
|
||||
config "github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
)
|
||||
|
||||
type ActionCache interface {
|
||||
Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error)
|
||||
GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type GoGitActionCache struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) {
|
||||
gitPath := path.Join(c.Path, safeFilename(cacheDir)+".git")
|
||||
gogitrepo, err := git.PlainInit(gitPath, true)
|
||||
if errors.Is(err, git.ErrRepositoryAlreadyExists) {
|
||||
gogitrepo, err = git.PlainOpen(gitPath)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tmpBranch := make([]byte, 12)
|
||||
if _, err := rand.Read(tmpBranch); err != nil {
|
||||
return "", err
|
||||
}
|
||||
branchName := hex.EncodeToString(tmpBranch)
|
||||
var refSpec config.RefSpec
|
||||
spec := config.RefSpec(ref + ":" + branchName)
|
||||
tagOrSha := false
|
||||
if spec.IsExactSHA1() {
|
||||
refSpec = spec
|
||||
} else if strings.HasPrefix(ref, "refs/") {
|
||||
refSpec = config.RefSpec(ref + ":refs/heads/" + branchName)
|
||||
} else {
|
||||
tagOrSha = true
|
||||
refSpec = config.RefSpec("refs/*/" + ref + ":refs/heads/*/" + branchName)
|
||||
}
|
||||
var auth transport.AuthMethod
|
||||
if token != "" {
|
||||
auth = &http.BasicAuth{
|
||||
Username: "token",
|
||||
Password: token,
|
||||
}
|
||||
}
|
||||
remote, err := gogitrepo.CreateRemoteAnonymous(&config.RemoteConfig{
|
||||
Name: "anonymous",
|
||||
URLs: []string{
|
||||
url,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
if refs, err := gogitrepo.References(); err == nil {
|
||||
_ = refs.ForEach(func(r *plumbing.Reference) error {
|
||||
if strings.Contains(r.Name().String(), branchName) {
|
||||
return gogitrepo.DeleteBranch(r.Name().String())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}()
|
||||
if err := remote.FetchContext(ctx, &git.FetchOptions{
|
||||
RefSpecs: []config.RefSpec{
|
||||
refSpec,
|
||||
},
|
||||
Auth: auth,
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
if tagOrSha && errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return "", fmt.Errorf("couldn't find remote ref \"%s\"", ref)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
if tagOrSha {
|
||||
for _, prefix := range []string{"refs/heads/tags/", "refs/heads/heads/"} {
|
||||
hash, err := gogitrepo.ResolveRevision(plumbing.Revision(prefix + branchName))
|
||||
if err == nil {
|
||||
return hash.String(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
hash, err := gogitrepo.ResolveRevision(plumbing.Revision(branchName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hash.String(), nil
|
||||
}
|
||||
|
||||
func (c GoGitActionCache) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) {
|
||||
gitPath := path.Join(c.Path, safeFilename(cacheDir)+".git")
|
||||
gogitrepo, err := git.PlainOpen(gitPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commit, err := gogitrepo.CommitObject(plumbing.NewHash(sha))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := commit.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpipe, wpipe := io.Pipe()
|
||||
// Interrupt io.Copy using ctx
|
||||
ch := make(chan int, 1)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
wpipe.CloseWithError(ctx.Err())
|
||||
case <-ch:
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wpipe.Close()
|
||||
defer close(ch)
|
||||
tw := tar.NewWriter(wpipe)
|
||||
cleanIncludePrefix := path.Clean(includePrefix)
|
||||
wpipe.CloseWithError(files.ForEach(func(f *object.File) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
name := f.Name
|
||||
if strings.HasPrefix(name, cleanIncludePrefix+"/") {
|
||||
name = name[len(cleanIncludePrefix)+1:]
|
||||
} else if cleanIncludePrefix != "." && name != cleanIncludePrefix {
|
||||
return nil
|
||||
}
|
||||
fmode, err := f.Mode.ToOSFileMode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fmode&fs.ModeSymlink == fs.ModeSymlink {
|
||||
content, err := f.Contents()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tw.WriteHeader(&tar.Header{
|
||||
Name: name,
|
||||
Mode: int64(fmode),
|
||||
Linkname: content,
|
||||
})
|
||||
}
|
||||
err = tw.WriteHeader(&tar.Header{
|
||||
Name: name,
|
||||
Mode: int64(fmode),
|
||||
Size: f.Size,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader, err := f.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(tw, reader)
|
||||
return err
|
||||
}))
|
||||
}()
|
||||
return rpipe, err
|
||||
}
|
37
pkg/runner/action_cache_test.go
Normal file
37
pkg/runner/action_cache_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
//nolint:gosec
|
||||
func TestActionCache(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
cache := &GoGitActionCache{
|
||||
Path: os.TempDir(),
|
||||
}
|
||||
ctx := context.Background()
|
||||
sha, err := cache.Fetch(ctx, "christopherhx/script", "https://github.com/christopherhx/script", "main", "")
|
||||
a.NoError(err)
|
||||
a.NotEmpty(sha)
|
||||
atar, err := cache.GetTarArchive(ctx, "christopherhx/script", sha, "node_modules")
|
||||
a.NoError(err)
|
||||
a.NotEmpty(atar)
|
||||
mytar := tar.NewReader(atar)
|
||||
th, err := mytar.Next()
|
||||
a.NoError(err)
|
||||
a.NotEqual(0, th.Size)
|
||||
buf := &bytes.Buffer{}
|
||||
// G110: Potential DoS vulnerability via decompression bomb (gosec)
|
||||
_, err = io.Copy(buf, mytar)
|
||||
a.NoError(err)
|
||||
str := buf.String()
|
||||
a.NotEmpty(str)
|
||||
}
|
@@ -37,6 +37,9 @@ func evaluateCompositeInputAndEnv(ctx context.Context, parent *RunContext, step
|
||||
env[envKey] = ee.Interpolate(ctx, input.Default)
|
||||
}
|
||||
}
|
||||
gh := step.getGithubContext(ctx)
|
||||
env["GITHUB_ACTION_REPOSITORY"] = gh.ActionRepository
|
||||
env["GITHUB_ACTION_REF"] = gh.ActionRef
|
||||
|
||||
return env
|
||||
}
|
||||
@@ -53,11 +56,11 @@ func newCompositeRunContext(ctx context.Context, parent *RunContext, step action
|
||||
Name: parent.Name,
|
||||
JobName: parent.JobName,
|
||||
Run: &model.Run{
|
||||
JobID: "composite-job",
|
||||
JobID: parent.Run.JobID,
|
||||
Workflow: &model.Workflow{
|
||||
Name: parent.Run.Workflow.Name,
|
||||
Jobs: map[string]*model.Job{
|
||||
"composite-job": {},
|
||||
parent.Run.JobID: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -66,6 +69,7 @@ func newCompositeRunContext(ctx context.Context, parent *RunContext, step action
|
||||
JobContainer: parent.JobContainer,
|
||||
ActionPath: actionPath,
|
||||
Env: env,
|
||||
GlobalEnv: parent.GlobalEnv,
|
||||
Masks: parent.Masks,
|
||||
ExtraPath: parent.ExtraPath,
|
||||
Parent: parent,
|
||||
@@ -85,6 +89,10 @@ func execAsComposite(step actionStep) common.Executor {
|
||||
|
||||
steps := step.getCompositeSteps()
|
||||
|
||||
if steps == nil || steps.main == nil {
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
}
|
||||
|
||||
ctx = WithCompositeLogger(ctx, &compositeRC.Masks)
|
||||
|
||||
err := steps.main(ctx)
|
||||
@@ -99,6 +107,16 @@ func execAsComposite(step actionStep) common.Executor {
|
||||
|
||||
rc.Masks = append(rc.Masks, compositeRC.Masks...)
|
||||
rc.ExtraPath = compositeRC.ExtraPath
|
||||
// compositeRC.Env is dirty, contains INPUT_ and merged step env, only rely on compositeRC.GlobalEnv
|
||||
mergeIntoMap := mergeIntoMapCaseSensitive
|
||||
if rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
mergeIntoMap = mergeIntoMapCaseInsensitive
|
||||
}
|
||||
if rc.GlobalEnv == nil {
|
||||
rc.GlobalEnv = map[string]string{}
|
||||
}
|
||||
mergeIntoMap(rc.GlobalEnv, compositeRC.GlobalEnv)
|
||||
mergeIntoMap(rc.Env, compositeRC.GlobalEnv)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@@ -201,10 +201,11 @@ func TestActionRunner(t *testing.T) {
|
||||
},
|
||||
CurrentStep: "post-step",
|
||||
StepResults: map[string]*model.StepResult{
|
||||
"step": {},
|
||||
},
|
||||
IntraActionState: map[string]map[string]string{
|
||||
"step": {
|
||||
State: map[string]string{
|
||||
"name": "state value",
|
||||
},
|
||||
"name": "state value",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
77
pkg/runner/command.go
Executable file → Normal file
77
pkg/runner/command.go
Executable file → Normal file
@@ -16,22 +16,27 @@ func init() {
|
||||
commandPatternADO = regexp.MustCompile("^##\\[([^ ]+)( (.+))?]([^\r\n]*)[\r\n]+$")
|
||||
}
|
||||
|
||||
func tryParseRawActionCommand(line string) (command string, kvPairs map[string]string, arg string, ok bool) {
|
||||
if m := commandPatternGA.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ",")
|
||||
arg = m[4]
|
||||
ok = true
|
||||
} else if m := commandPatternADO.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ";")
|
||||
arg = m[4]
|
||||
ok = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
logger := common.Logger(ctx)
|
||||
resumeCommand := ""
|
||||
return func(line string) bool {
|
||||
var command string
|
||||
var kvPairs map[string]string
|
||||
var arg string
|
||||
if m := commandPatternGA.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ",")
|
||||
arg = m[4]
|
||||
} else if m := commandPatternADO.FindStringSubmatch(line); m != nil {
|
||||
command = m[1]
|
||||
kvPairs = parseKeyValuePairs(m[3], ";")
|
||||
arg = m[4]
|
||||
} else {
|
||||
command, kvPairs, arg, ok := tryParseRawActionCommand(line)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -66,20 +71,35 @@ func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
case "save-state":
|
||||
logger.Infof(" \U0001f4be %s", line)
|
||||
rc.saveState(ctx, kvPairs, arg)
|
||||
case "add-matcher":
|
||||
logger.Infof(" \U00002753 add-matcher %s", arg)
|
||||
default:
|
||||
logger.Infof(" \U00002753 %s", line)
|
||||
}
|
||||
|
||||
return false
|
||||
// return true to let gitea's logger handle these special outputs also
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) setEnv(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
common.Logger(ctx).Infof(" \U00002699 ::set-env:: %s=%s", kvPairs["name"], arg)
|
||||
name := kvPairs["name"]
|
||||
common.Logger(ctx).Infof(" \U00002699 ::set-env:: %s=%s", name, arg)
|
||||
if rc.Env == nil {
|
||||
rc.Env = make(map[string]string)
|
||||
}
|
||||
rc.Env[kvPairs["name"]] = arg
|
||||
if rc.GlobalEnv == nil {
|
||||
rc.GlobalEnv = map[string]string{}
|
||||
}
|
||||
newenv := map[string]string{
|
||||
name: arg,
|
||||
}
|
||||
mergeIntoMap := mergeIntoMapCaseSensitive
|
||||
if rc.JobContainer != nil && rc.JobContainer.IsEnvironmentCaseInsensitive() {
|
||||
mergeIntoMap = mergeIntoMapCaseInsensitive
|
||||
}
|
||||
mergeIntoMap(rc.Env, newenv)
|
||||
mergeIntoMap(rc.GlobalEnv, newenv)
|
||||
}
|
||||
func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
logger := common.Logger(ctx)
|
||||
@@ -101,7 +121,13 @@ func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string,
|
||||
}
|
||||
func (rc *RunContext) addPath(ctx context.Context, arg string) {
|
||||
common.Logger(ctx).Infof(" \U00002699 ::add-path:: %s", arg)
|
||||
rc.ExtraPath = append(rc.ExtraPath, arg)
|
||||
extraPath := []string{arg}
|
||||
for _, v := range rc.ExtraPath {
|
||||
if v != arg {
|
||||
extraPath = append(extraPath, v)
|
||||
}
|
||||
}
|
||||
rc.ExtraPath = extraPath
|
||||
}
|
||||
|
||||
func parseKeyValuePairs(kvPairs string, separator string) map[string]string {
|
||||
@@ -146,14 +172,17 @@ func unescapeKvPairs(kvPairs map[string]string) map[string]string {
|
||||
return kvPairs
|
||||
}
|
||||
|
||||
func (rc *RunContext) saveState(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
if rc.CurrentStep != "" {
|
||||
stepResult := rc.StepResults[rc.CurrentStep]
|
||||
if stepResult != nil {
|
||||
if stepResult.State == nil {
|
||||
stepResult.State = map[string]string{}
|
||||
}
|
||||
stepResult.State[kvPairs["name"]] = arg
|
||||
func (rc *RunContext) saveState(_ context.Context, kvPairs map[string]string, arg string) {
|
||||
stepID := rc.CurrentStep
|
||||
if stepID != "" {
|
||||
if rc.IntraActionState == nil {
|
||||
rc.IntraActionState = map[string]map[string]string{}
|
||||
}
|
||||
state, ok := rc.IntraActionState[stepID]
|
||||
if !ok {
|
||||
state = map[string]string{}
|
||||
rc.IntraActionState[stepID] = state
|
||||
}
|
||||
state[kvPairs["name"]] = arg
|
||||
}
|
||||
}
|
||||
|
@@ -64,7 +64,7 @@ func TestAddpath(t *testing.T) {
|
||||
a.Equal("/zoo", rc.ExtraPath[0])
|
||||
|
||||
handler("::add-path::/boo\n")
|
||||
a.Equal("/boo", rc.ExtraPath[1])
|
||||
a.Equal("/boo", rc.ExtraPath[0])
|
||||
}
|
||||
|
||||
func TestStopCommands(t *testing.T) {
|
||||
@@ -102,7 +102,7 @@ func TestAddpathADO(t *testing.T) {
|
||||
a.Equal("/zoo", rc.ExtraPath[0])
|
||||
|
||||
handler("##[add-path]/boo\n")
|
||||
a.Equal("/boo", rc.ExtraPath[1])
|
||||
a.Equal("/boo", rc.ExtraPath[0])
|
||||
}
|
||||
|
||||
func TestAddmask(t *testing.T) {
|
||||
@@ -177,11 +177,7 @@ func TestAddmaskUsemask(t *testing.T) {
|
||||
func TestSaveState(t *testing.T) {
|
||||
rc := &RunContext{
|
||||
CurrentStep: "step",
|
||||
StepResults: map[string]*model.StepResult{
|
||||
"step": {
|
||||
State: map[string]string{},
|
||||
},
|
||||
},
|
||||
StepResults: map[string]*model.StepResult{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -189,5 +185,5 @@ func TestSaveState(t *testing.T) {
|
||||
handler := rc.commandHandler(ctx)
|
||||
handler("::save-state name=state-name::state-value\n")
|
||||
|
||||
assert.Equal(t, "state-value", rc.StepResults["step"].State["state-name"])
|
||||
assert.Equal(t, "state-value", rc.IntraActionState["step"]["state-name"])
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
@@ -49,11 +50,6 @@ func (cm *containerMock) UpdateFromImageEnv(env *map[string]string) common.Execu
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) UpdateFromPath(env *map[string]string) common.Executor {
|
||||
args := cm.Called(env)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) Copy(destPath string, files ...*container.FileEntry) common.Executor {
|
||||
args := cm.Called(destPath, files)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
@@ -63,7 +59,17 @@ func (cm *containerMock) CopyDir(destPath string, srcPath string, useGitIgnore b
|
||||
args := cm.Called(destPath, srcPath, useGitIgnore)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) Exec(command []string, env map[string]string, user, workdir string) common.Executor {
|
||||
args := cm.Called(command, env, user, workdir)
|
||||
return args.Get(0).(func(context.Context) error)
|
||||
}
|
||||
|
||||
func (cm *containerMock) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) {
|
||||
args := cm.Called(ctx, srcPath)
|
||||
err, hasErr := args.Get(1).(error)
|
||||
if !hasErr {
|
||||
err = nil
|
||||
}
|
||||
return args.Get(0).(io.ReadCloser), err
|
||||
}
|
||||
|
@@ -1,12 +1,19 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"gopkg.in/yaml.v3"
|
||||
@@ -21,8 +28,14 @@ type ExpressionEvaluator interface {
|
||||
|
||||
// NewExpressionEvaluator creates a new evaluator
|
||||
func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEvaluator {
|
||||
return rc.NewExpressionEvaluatorWithEnv(ctx, rc.GetEnv())
|
||||
}
|
||||
|
||||
func (rc *RunContext) NewExpressionEvaluatorWithEnv(ctx context.Context, env map[string]string) ExpressionEvaluator {
|
||||
var workflowCallResult map[string]*model.WorkflowCallResult
|
||||
|
||||
// todo: cleanup EvaluationEnvironment creation
|
||||
using := make(map[string]map[string]map[string]string)
|
||||
using := make(map[string]exprparser.Needs)
|
||||
strategy := make(map[string]interface{})
|
||||
if rc.Run != nil {
|
||||
job := rc.Run.Job()
|
||||
@@ -35,8 +48,26 @@ func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEval
|
||||
jobNeeds := rc.Run.Job().Needs()
|
||||
|
||||
for _, needs := range jobNeeds {
|
||||
using[needs] = map[string]map[string]string{
|
||||
"outputs": jobs[needs].Outputs,
|
||||
using[needs] = exprparser.Needs{
|
||||
Outputs: jobs[needs].Outputs,
|
||||
Result: jobs[needs].Result,
|
||||
}
|
||||
}
|
||||
|
||||
// only setup jobs context in case of workflow_call
|
||||
// and existing expression evaluator (this means, jobs are at
|
||||
// least ready to run)
|
||||
if rc.caller != nil && rc.ExprEval != nil {
|
||||
workflowCallResult = map[string]*model.WorkflowCallResult{}
|
||||
|
||||
for jobName, job := range jobs {
|
||||
result := model.WorkflowCallResult{
|
||||
Outputs: map[string]string{},
|
||||
}
|
||||
for k, v := range job.Outputs {
|
||||
result.Outputs[k] = v
|
||||
}
|
||||
workflowCallResult[jobName] = &result
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -46,16 +77,19 @@ func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEval
|
||||
|
||||
ee := &exprparser.EvaluationEnvironment{
|
||||
Github: ghc,
|
||||
Env: rc.GetEnv(),
|
||||
Env: env,
|
||||
Job: rc.getJobContext(),
|
||||
Jobs: &workflowCallResult,
|
||||
// todo: should be unavailable
|
||||
// but required to interpolate/evaluate the step outputs on the job
|
||||
Steps: rc.getStepsContext(),
|
||||
Secrets: rc.Config.Secrets,
|
||||
Strategy: strategy,
|
||||
Matrix: rc.Matrix,
|
||||
Needs: using,
|
||||
Inputs: inputs,
|
||||
Steps: rc.getStepsContext(),
|
||||
Secrets: getWorkflowSecrets(ctx, rc),
|
||||
Vars: getWorkflowVars(ctx, rc),
|
||||
Strategy: strategy,
|
||||
Matrix: rc.Matrix,
|
||||
Needs: using,
|
||||
Inputs: inputs,
|
||||
HashFiles: getHashFilesFunction(ctx, rc),
|
||||
}
|
||||
if rc.JobContainer != nil {
|
||||
ee.Runner = rc.JobContainer.GetRunnerContext(ctx)
|
||||
@@ -69,6 +103,9 @@ func (rc *RunContext) NewExpressionEvaluator(ctx context.Context) ExpressionEval
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed hashfiles/index.js
|
||||
var hashfiles string
|
||||
|
||||
// NewExpressionEvaluator creates a new evaluator
|
||||
func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step) ExpressionEvaluator {
|
||||
// todo: cleanup EvaluationEnvironment creation
|
||||
@@ -82,10 +119,11 @@ func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step)
|
||||
jobs := rc.Run.Workflow.Jobs
|
||||
jobNeeds := rc.Run.Job().Needs()
|
||||
|
||||
using := make(map[string]map[string]map[string]string)
|
||||
using := make(map[string]exprparser.Needs)
|
||||
for _, needs := range jobNeeds {
|
||||
using[needs] = map[string]map[string]string{
|
||||
"outputs": jobs[needs].Outputs,
|
||||
using[needs] = exprparser.Needs{
|
||||
Outputs: jobs[needs].Outputs,
|
||||
Result: jobs[needs].Result,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,13 +135,15 @@ func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step)
|
||||
Env: *step.getEnv(),
|
||||
Job: rc.getJobContext(),
|
||||
Steps: rc.getStepsContext(),
|
||||
Secrets: rc.Config.Secrets,
|
||||
Secrets: getWorkflowSecrets(ctx, rc),
|
||||
Vars: getWorkflowVars(ctx, rc),
|
||||
Strategy: strategy,
|
||||
Matrix: rc.Matrix,
|
||||
Needs: using,
|
||||
// todo: should be unavailable
|
||||
// but required to interpolate/evaluate the inputs in actions/composite
|
||||
Inputs: inputs,
|
||||
Inputs: inputs,
|
||||
HashFiles: getHashFilesFunction(ctx, rc),
|
||||
}
|
||||
if rc.JobContainer != nil {
|
||||
ee.Runner = rc.JobContainer.GetRunnerContext(ctx)
|
||||
@@ -117,6 +157,67 @@ func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step)
|
||||
}
|
||||
}
|
||||
|
||||
func getHashFilesFunction(ctx context.Context, rc *RunContext) func(v []reflect.Value) (interface{}, error) {
|
||||
hashFiles := func(v []reflect.Value) (interface{}, error) {
|
||||
if rc.JobContainer != nil {
|
||||
timeed, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
name := "workflow/hashfiles/index.js"
|
||||
hout := &bytes.Buffer{}
|
||||
herr := &bytes.Buffer{}
|
||||
patterns := []string{}
|
||||
followSymlink := false
|
||||
|
||||
for i, p := range v {
|
||||
s := p.String()
|
||||
if i == 0 {
|
||||
if strings.HasPrefix(s, "--") {
|
||||
if strings.EqualFold(s, "--follow-symbolic-links") {
|
||||
followSymlink = true
|
||||
continue
|
||||
}
|
||||
return "", fmt.Errorf("Invalid glob option %s, available option: '--follow-symbolic-links'", s)
|
||||
}
|
||||
}
|
||||
patterns = append(patterns, s)
|
||||
}
|
||||
env := map[string]string{}
|
||||
for k, v := range rc.Env {
|
||||
env[k] = v
|
||||
}
|
||||
env["patterns"] = strings.Join(patterns, "\n")
|
||||
if followSymlink {
|
||||
env["followSymbolicLinks"] = "true"
|
||||
}
|
||||
|
||||
stdout, stderr := rc.JobContainer.ReplaceLogWriter(hout, herr)
|
||||
_ = rc.JobContainer.Copy(rc.JobContainer.GetActPath(), &container.FileEntry{
|
||||
Name: name,
|
||||
Mode: 0o644,
|
||||
Body: hashfiles,
|
||||
}).
|
||||
Then(rc.execJobContainer([]string{"node", path.Join(rc.JobContainer.GetActPath(), name)},
|
||||
env, "", "")).
|
||||
Finally(func(context.Context) error {
|
||||
rc.JobContainer.ReplaceLogWriter(stdout, stderr)
|
||||
return nil
|
||||
})(timeed)
|
||||
output := hout.String() + "\n" + herr.String()
|
||||
guard := "__OUTPUT__"
|
||||
outstart := strings.Index(output, guard)
|
||||
if outstart != -1 {
|
||||
outstart += len(guard)
|
||||
outend := strings.Index(output[outstart:], guard)
|
||||
if outend != -1 {
|
||||
return output[outstart : outstart+outend], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
return hashFiles
|
||||
}
|
||||
|
||||
type expressionEvaluator struct {
|
||||
interpreter exprparser.Interpreter
|
||||
}
|
||||
@@ -132,67 +233,117 @@ func (ee expressionEvaluator) evaluate(ctx context.Context, in string, defaultSt
|
||||
return evaluated, err
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) evaluateScalarYamlNode(ctx context.Context, node *yaml.Node) error {
|
||||
func (ee expressionEvaluator) evaluateScalarYamlNode(ctx context.Context, node *yaml.Node) (*yaml.Node, error) {
|
||||
var in string
|
||||
if err := node.Decode(&in); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
expr, _ := rewriteSubExpression(ctx, in, false)
|
||||
res, err := ee.evaluate(ctx, expr, exprparser.DefaultStatusCheckNone)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return node.Encode(res)
|
||||
ret := &yaml.Node{}
|
||||
if err := ret.Encode(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) evaluateMappingYamlNode(ctx context.Context, node *yaml.Node) error {
|
||||
func (ee expressionEvaluator) evaluateMappingYamlNode(ctx context.Context, node *yaml.Node) (*yaml.Node, error) {
|
||||
var ret *yaml.Node
|
||||
// GitHub has this undocumented feature to merge maps, called insert directive
|
||||
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
|
||||
for i := 0; i < len(node.Content)/2; {
|
||||
for i := 0; i < len(node.Content)/2; i++ {
|
||||
changed := func() error {
|
||||
if ret == nil {
|
||||
ret = &yaml.Node{}
|
||||
if err := ret.Encode(node); err != nil {
|
||||
return err
|
||||
}
|
||||
ret.Content = ret.Content[:i*2]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
k := node.Content[i*2]
|
||||
v := node.Content[i*2+1]
|
||||
if err := ee.EvaluateYamlNode(ctx, v); err != nil {
|
||||
return err
|
||||
ev, err := ee.evaluateYamlNodeInternal(ctx, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ev != nil {
|
||||
if err := changed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ev = v
|
||||
}
|
||||
var sk string
|
||||
// Merge the nested map of the insert directive
|
||||
if k.Decode(&sk) == nil && insertDirective.MatchString(sk) {
|
||||
node.Content = append(append(node.Content[:i*2], v.Content...), node.Content[(i+1)*2:]...)
|
||||
i += len(v.Content) / 2
|
||||
} else {
|
||||
if err := ee.EvaluateYamlNode(ctx, k); err != nil {
|
||||
return err
|
||||
if ev.Kind != yaml.MappingNode {
|
||||
return nil, fmt.Errorf("failed to insert node %v into mapping %v unexpected type %v expected MappingNode", ev, node, ev.Kind)
|
||||
}
|
||||
if err := changed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret.Content = append(ret.Content, ev.Content...)
|
||||
} else {
|
||||
ek, err := ee.evaluateYamlNodeInternal(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ek != nil {
|
||||
if err := changed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ek = k
|
||||
}
|
||||
if ret != nil {
|
||||
ret.Content = append(ret.Content, ek, ev)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) evaluateSequenceYamlNode(ctx context.Context, node *yaml.Node) error {
|
||||
for i := 0; i < len(node.Content); {
|
||||
func (ee expressionEvaluator) evaluateSequenceYamlNode(ctx context.Context, node *yaml.Node) (*yaml.Node, error) {
|
||||
var ret *yaml.Node
|
||||
for i := 0; i < len(node.Content); i++ {
|
||||
v := node.Content[i]
|
||||
// Preserve nested sequences
|
||||
wasseq := v.Kind == yaml.SequenceNode
|
||||
if err := ee.EvaluateYamlNode(ctx, v); err != nil {
|
||||
return err
|
||||
ev, err := ee.evaluateYamlNodeInternal(ctx, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// GitHub has this undocumented feature to merge sequences / arrays
|
||||
// We have a nested sequence via evaluation, merge the arrays
|
||||
if v.Kind == yaml.SequenceNode && !wasseq {
|
||||
node.Content = append(append(node.Content[:i], v.Content...), node.Content[i+1:]...)
|
||||
i += len(v.Content)
|
||||
} else {
|
||||
i++
|
||||
if ev != nil {
|
||||
if ret == nil {
|
||||
ret = &yaml.Node{}
|
||||
if err := ret.Encode(node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret.Content = ret.Content[:i]
|
||||
}
|
||||
// GitHub has this undocumented feature to merge sequences / arrays
|
||||
// We have a nested sequence via evaluation, merge the arrays
|
||||
if ev.Kind == yaml.SequenceNode && !wasseq {
|
||||
ret.Content = append(ret.Content, ev.Content...)
|
||||
} else {
|
||||
ret.Content = append(ret.Content, ev)
|
||||
}
|
||||
} else if ret != nil {
|
||||
ret.Content = append(ret.Content, v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) EvaluateYamlNode(ctx context.Context, node *yaml.Node) error {
|
||||
func (ee expressionEvaluator) evaluateYamlNodeInternal(ctx context.Context, node *yaml.Node) (*yaml.Node, error) {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
return ee.evaluateScalarYamlNode(ctx, node)
|
||||
@@ -201,10 +352,21 @@ func (ee expressionEvaluator) EvaluateYamlNode(ctx context.Context, node *yaml.N
|
||||
case yaml.SequenceNode:
|
||||
return ee.evaluateSequenceYamlNode(ctx, node)
|
||||
default:
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) EvaluateYamlNode(ctx context.Context, node *yaml.Node) error {
|
||||
ret, err := ee.evaluateYamlNodeInternal(ctx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ret != nil {
|
||||
return ret.Decode(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ee expressionEvaluator) Interpolate(ctx context.Context, in string) string {
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return in
|
||||
@@ -308,9 +470,12 @@ func rewriteSubExpression(ctx context.Context, in string, forceFormat bool) (str
|
||||
return out, nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *model.GithubContext) map[string]interface{} {
|
||||
inputs := map[string]interface{}{}
|
||||
|
||||
setupWorkflowInputs(ctx, &inputs, rc)
|
||||
|
||||
var env map[string]string
|
||||
if step != nil {
|
||||
env = *step.getEnv()
|
||||
@@ -341,5 +506,76 @@ func getEvaluatorInputs(ctx context.Context, rc *RunContext, step step, ghc *mod
|
||||
}
|
||||
}
|
||||
|
||||
if ghc.EventName == "workflow_call" {
|
||||
config := rc.Run.Workflow.WorkflowCallConfig()
|
||||
if config != nil && config.Inputs != nil {
|
||||
for k, v := range config.Inputs {
|
||||
value := nestedMapLookup(ghc.Event, "inputs", k)
|
||||
if value == nil {
|
||||
value = v.Default
|
||||
}
|
||||
if v.Type == "boolean" {
|
||||
inputs[k] = value == "true"
|
||||
} else {
|
||||
inputs[k] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return inputs
|
||||
}
|
||||
|
||||
func setupWorkflowInputs(ctx context.Context, inputs *map[string]interface{}, rc *RunContext) {
|
||||
if rc.caller != nil {
|
||||
config := rc.Run.Workflow.WorkflowCallConfig()
|
||||
|
||||
for name, input := range config.Inputs {
|
||||
value := rc.caller.runContext.Run.Job().With[name]
|
||||
if value != nil {
|
||||
if str, ok := value.(string); ok {
|
||||
// evaluate using the calling RunContext (outside)
|
||||
value = rc.caller.runContext.ExprEval.Interpolate(ctx, str)
|
||||
}
|
||||
}
|
||||
|
||||
if value == nil && config != nil && config.Inputs != nil {
|
||||
value = input.Default
|
||||
if rc.ExprEval != nil {
|
||||
if str, ok := value.(string); ok {
|
||||
// evaluate using the called RunContext (inside)
|
||||
value = rc.ExprEval.Interpolate(ctx, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(*inputs)[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getWorkflowSecrets(ctx context.Context, rc *RunContext) map[string]string {
|
||||
if rc.caller != nil {
|
||||
job := rc.caller.runContext.Run.Job()
|
||||
secrets := job.Secrets()
|
||||
|
||||
if secrets == nil && job.InheritSecrets() {
|
||||
secrets = rc.caller.runContext.Config.Secrets
|
||||
}
|
||||
|
||||
if secrets == nil {
|
||||
secrets = map[string]string{}
|
||||
}
|
||||
|
||||
for k, v := range secrets {
|
||||
secrets[k] = rc.caller.runContext.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
|
||||
return secrets
|
||||
}
|
||||
|
||||
return rc.Config.Secrets
|
||||
}
|
||||
|
||||
func getWorkflowVars(_ context.Context, rc *RunContext) map[string]string {
|
||||
return rc.Config.Vars
|
||||
}
|
||||
|
@@ -28,6 +28,9 @@ func createRunContext(t *testing.T) *RunContext {
|
||||
Secrets: map[string]string{
|
||||
"CASE_INSENSITIVE_SECRET": "value",
|
||||
},
|
||||
Vars: map[string]string{
|
||||
"CASE_INSENSITIVE_VAR": "value",
|
||||
},
|
||||
},
|
||||
Env: map[string]string{
|
||||
"key": "value",
|
||||
@@ -122,6 +125,8 @@ func TestEvaluateRunContext(t *testing.T) {
|
||||
{"env.key", "value", ""},
|
||||
{"secrets.CASE_INSENSITIVE_SECRET", "value", ""},
|
||||
{"secrets.case_insensitive_secret", "value", ""},
|
||||
{"vars.CASE_INSENSITIVE_VAR", "value", ""},
|
||||
{"vars.case_insensitive_var", "value", ""},
|
||||
{"format('{{0}}', 'test')", "{0}", ""},
|
||||
{"format('{{{0}}}', 'test')", "{test}", ""},
|
||||
{"format('}}')", "}", ""},
|
||||
@@ -195,6 +200,9 @@ func TestInterpolate(t *testing.T) {
|
||||
Secrets: map[string]string{
|
||||
"CASE_INSENSITIVE_SECRET": "value",
|
||||
},
|
||||
Vars: map[string]string{
|
||||
"CASE_INSENSITIVE_VAR": "value",
|
||||
},
|
||||
},
|
||||
Env: map[string]string{
|
||||
"KEYWITHNOTHING": "valuewithnothing",
|
||||
@@ -229,6 +237,8 @@ func TestInterpolate(t *testing.T) {
|
||||
{" ${{ env.KEY_WITH_UNDERSCORES }} ", " value_with_underscores "},
|
||||
{"${{ secrets.CASE_INSENSITIVE_SECRET }}", "value"},
|
||||
{"${{ secrets.case_insensitive_secret }}", "value"},
|
||||
{"${{ vars.CASE_INSENSITIVE_VAR }}", "value"},
|
||||
{"${{ vars.case_insensitive_var }}", "value"},
|
||||
{"${{ env.UNKNOWN }}", ""},
|
||||
{"${{ env.SOMETHING_TRUE }}", "true"},
|
||||
{"${{ env.SOMETHING_FALSE }}", "false"},
|
||||
|
5103
pkg/runner/hashfiles/index.js
Normal file
5103
pkg/runner/hashfiles/index.js
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user