mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
1 Commits
commit-ser
...
temp-cherr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f8f5dd5ad |
@@ -11,17 +11,3 @@ cmd/**/debug
|
||||
debug.test
|
||||
coverage.out
|
||||
ui/node_modules/
|
||||
test-results/
|
||||
test/
|
||||
manifests/
|
||||
hack/
|
||||
docs/
|
||||
examples/
|
||||
.github/
|
||||
!test/fixture
|
||||
!test/container
|
||||
!hack/installers
|
||||
!hack/gpg-wrapper.sh
|
||||
!hack/git-verify-wrapper.sh
|
||||
!hack/tool-versions.sh
|
||||
!hack/install.sh
|
||||
32
.github/ISSUE_TEMPLATE/release.md
vendored
32
.github/ISSUE_TEMPLATE/release.md
vendored
@@ -1,32 +0,0 @@
|
||||
---
|
||||
name: Argo CD Release
|
||||
about: Used by our Release Champion to track progress of a minor release
|
||||
title: 'Argo CD Release vX.X'
|
||||
labels: 'release'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
- [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing)
|
||||
- [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar)
|
||||
- [ ] Include Zoom link in the invite
|
||||
- [ ] Post in #argo-cd and #argo-contributors one week before the meeting
|
||||
- [ ] Post again one hour before the meeting
|
||||
- [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that)
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Create new release branch
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
3
.github/cherry-pick-bot.yml
vendored
3
.github/cherry-pick-bot.yml
vendored
@@ -1,3 +0,0 @@
|
||||
enabled: true
|
||||
preservePullRequestTitle: true
|
||||
|
||||
43
.github/dependabot.yml
vendored
43
.github/dependabot.yml
vendored
@@ -1,43 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
- dependency-name: k8s.io/*
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/ui/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/container/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/e2e/multiarch-container/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/remote/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/ui-test/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
15
.github/pr-title-checker-config.json
vendored
15
.github/pr-title-checker-config.json
vendored
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "title needs formatting",
|
||||
"color": "EEEEEE"
|
||||
},
|
||||
"CHECKS": {
|
||||
"prefixes": ["[Bot] docs: "],
|
||||
"regexp": "^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
},
|
||||
"MESSAGES": {
|
||||
"success": "PR title is valid",
|
||||
"failure": "PR title is invalid",
|
||||
"notice": "PR Title needs to pass regex '^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
}
|
||||
}
|
||||
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -6,16 +6,12 @@ Checklist:
|
||||
|
||||
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
|
||||
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
|
||||
* [ ] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)
|
||||
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
|
||||
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
|
||||
* [ ] Does this PR require documentation updates?
|
||||
* [ ] I've updated documentation as required by this PR.
|
||||
* [ ] Optional. My organization is added to USERS.md.
|
||||
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal)
|
||||
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)
|
||||
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
|
||||
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
|
||||
* [ ] My new feature complies with the [feature status](https://github.com/argoproj/argoproj/blob/master/community/feature-status.md) guidelines.
|
||||
* [ ] I have added a brief description of why this PR is necessary and/or what this PR solves.
|
||||
|
||||
Please see [Contribution FAQs](https://argo-cd.readthedocs.io/en/latest/developer-guide/faq/) if you have questions about your pull-request.
|
||||
|
||||
38
.github/workflows/README.md
vendored
38
.github/workflows/README.md
vendored
@@ -1,38 +0,0 @@
|
||||
# Workflows
|
||||
|
||||
| Workflow | Description |
|
||||
|--------------------|----------------------------------------------------------------|
|
||||
| ci-build.yaml | Build, lint, test, codegen, build-ui, analyze, e2e-test |
|
||||
| codeql.yaml | CodeQL analysis |
|
||||
| image-reuse.yaml | Build, push, and Sign container images |
|
||||
| image.yaml | Build container image for PR's & publish for push events |
|
||||
| pr-title-check.yaml| Lint PR for semantic information |
|
||||
| init-release.yaml | Build manifests and version then create a PR for release branch|
|
||||
| release.yaml | Build images, cli-binaries, provenances, and post actions |
|
||||
| update-snyk.yaml | Scheduled snyk reports |
|
||||
|
||||
# Reusable workflows
|
||||
|
||||
## image-reuse.yaml
|
||||
|
||||
- The resuable workflow can be used to publish or build images with multiple container registries(Quay,GHCR, dockerhub), and then sign them with cosign when an image is published.
|
||||
- A GO version `must` be specified e.g. 1.19
|
||||
- The image name for each registry *must* contain the tag. Note: multiple tags are allowed for each registry using a CSV type.
|
||||
- Multiple platforms can be specified e.g. linux/amd64,linux/arm64
|
||||
- Images are not published by default. A boolean value must be set to `true` to push images.
|
||||
- An optional target can be specified.
|
||||
|
||||
| Inputs | Description | Type | Required | Defaults |
|
||||
|-------------------|-------------------------------------|-------------|----------|-----------------|
|
||||
| go-version | Version of Go to be used | string | true | none |
|
||||
| quay_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| ghcr_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| docker_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| platforms | Platforms to build (linux/amd64) | CSV, string | false | linux/amd64 |
|
||||
| push | Whether to push image/s to registry | boolean | false | false |
|
||||
| target | Target build stage | string | false | none |
|
||||
|
||||
| Outputs | Description | Type |
|
||||
|-------------|------------------------------------------|-------|
|
||||
|image-digest | Image digest of image container created | string|
|
||||
|
||||
51
.github/workflows/ci-build.yaml
vendored
51
.github/workflows/ci-build.yaml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.20'
|
||||
GOLANG_VERSION: '1.19'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -28,9 +28,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -46,9 +46,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
@@ -70,16 +70,16 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
with:
|
||||
version: v1.51.0
|
||||
args: --enable gofmt --timeout 10m --exclude SA5011 --verbose --max-issues-per-linter 0 --max-same-issues 0
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
@@ -93,11 +93,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
path: test-results/
|
||||
|
||||
test-go-race:
|
||||
name: Run unit tests with -race for Go packages
|
||||
name: Run unit tests with -race, for Go packages
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- build-go
|
||||
@@ -160,11 +160,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -215,9 +215,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -263,11 +263,11 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
with:
|
||||
node-version: '20.4.0'
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
@@ -300,7 +300,7 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
@@ -325,7 +325,7 @@ jobs:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # v3.1.4
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -361,7 +361,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.27.2, v1.26.0, v1.25.4, v1.24.3]
|
||||
k3s-version: [v1.24.3, v1.23.3, v1.22.6]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -379,9 +379,9 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
@@ -397,7 +397,6 @@ jobs:
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube
|
||||
sudo k3s kubectl config view --raw > $HOME/.kube/config
|
||||
sudo chown runner $HOME/.kube/config
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
165
.github/workflows/image-reuse.yaml
vendored
165
.github/workflows/image-reuse.yaml
vendored
@@ -1,165 +0,0 @@
|
||||
name: Publish and Sign Container Image
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
go-version:
|
||||
required: true
|
||||
type: string
|
||||
quay_image_name:
|
||||
required: false
|
||||
type: string
|
||||
ghcr_image_name:
|
||||
required: false
|
||||
type: string
|
||||
docker_image_name:
|
||||
required: false
|
||||
type: string
|
||||
platforms:
|
||||
required: true
|
||||
type: string
|
||||
default: linux/amd64
|
||||
push:
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
target:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
secrets:
|
||||
quay_username:
|
||||
required: false
|
||||
quay_password:
|
||||
required: false
|
||||
ghcr_username:
|
||||
required: false
|
||||
ghcr_password:
|
||||
required: false
|
||||
docker_username:
|
||||
required: false
|
||||
docker_password:
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
image-digest:
|
||||
description: "sha256 digest of container image"
|
||||
value: ${{ jobs.publish.outputs.image-digest }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # Used to push images to `ghcr.io` if used.
|
||||
id-token: write # Needed to create an OIDC token for keyless signing
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 # v3.1.1
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # v2.9.1
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
IMAGE_TAGS=$(for str in \
|
||||
${{ inputs.quay_image_name }} \
|
||||
${{ inputs.ghcr_image_name }} \
|
||||
${{ inputs.docker_image_name}}; do
|
||||
echo -n "${str}",;done | sed 's/,$//')
|
||||
|
||||
echo $IMAGE_TAGS
|
||||
echo "TAGS=$IMAGE_TAGS" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup image namespace for signing, strip off the tag
|
||||
run: |
|
||||
TAGS=$(for tag in \
|
||||
${{ inputs.quay_image_name }} \
|
||||
${{ inputs.ghcr_image_name }} \
|
||||
${{ inputs.docker_image_name}}; do
|
||||
echo -n "${tag}" | awk -F ":" '{print $1}' -;done)
|
||||
|
||||
echo $TAGS
|
||||
echo 'SIGNING_TAGS<<EOF' >> $GITHUB_ENV
|
||||
echo $TAGS >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
password: ${{ secrets.quay_password }}
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
password: ${{ secrets.ghcr_password }}
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
if: ${{ inputs.docker_image_name && inputs.push }}
|
||||
|
||||
- name: Set up build args for container image
|
||||
run: |
|
||||
echo "GIT_TAG=$(if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)" >> $GITHUB_ENV
|
||||
echo "GIT_COMMIT=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: ${{ inputs.push }}
|
||||
tags: ${{ env.TAGS }}
|
||||
target: ${{ inputs.target }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
build-args: |
|
||||
GIT_TAG=${{env.GIT_TAG}}
|
||||
GIT_COMMIT=${{env.GIT_COMMIT}}
|
||||
BUILD_DATE=${{env.BUILD_DATE}}
|
||||
GIT_TREE_STATE=${{env.GIT_TREE_STATE}}
|
||||
|
||||
- name: Sign container images
|
||||
run: |
|
||||
for signing_tag in $SIGNING_TAGS; do
|
||||
cosign sign \
|
||||
-a "repo=${{ github.repository }}" \
|
||||
-a "workflow=${{ github.workflow }}" \
|
||||
-a "sha=${{ github.sha }}" \
|
||||
-y \
|
||||
"$signing_tag"@${{ steps.image.outputs.digest }}
|
||||
done
|
||||
if: ${{ inputs.push }}
|
||||
141
.github/workflows/image.yaml
vendored
141
.github/workflows/image.yaml
vendored
@@ -9,109 +9,96 @@ on:
|
||||
- master
|
||||
types: [ labeled, unlabeled, opened, synchronize, reopened ]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.19'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
set-vars:
|
||||
publish:
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
- uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
# get image tag
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
- name: Determine image platforms to use
|
||||
id: platforms
|
||||
run: |
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.TOKEN }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-multi-image') }}" == "true" ]]
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
then
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
echo "platforms=$IMAGE_PLATFORMS" >> $GITHUB_OUTPUT
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
build-only:
|
||||
needs: [set-vars]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name != 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
build-and-publish:
|
||||
needs: [set-vars]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
build-and-publish-provenance: # Push attestations to GHCR, latest image is polluting quay.io
|
||||
needs:
|
||||
- build-and-publish
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
registry-username: ${{ github.actor }}
|
||||
secrets:
|
||||
registry-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
Deploy:
|
||||
needs:
|
||||
- build-and-publish
|
||||
- set-vars
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
|
||||
# deploy
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ needs.set-vars.outputs.image-tag }}' && git push)
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
if: github.event_name == 'push'
|
||||
working-directory: argoproj-deployments/argocd
|
||||
|
||||
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811
|
||||
|
||||
77
.github/workflows/init-release.yaml
vendored
77
.github/workflows/init-release.yaml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Init ArgoCD Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
TARGET_BRANCH:
|
||||
description: 'TARGET_BRANCH to checkout (e.g. release-2.5)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
TARGET_VERSION:
|
||||
description: 'TARGET_VERSION to build manifests (e.g. 2.5.0-rc1) Note: the `v` prefix is not used'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: ${{ inputs.TARGET_BRANCH }}
|
||||
|
||||
- name: Check if TARGET_VERSION is well formed.
|
||||
run: |
|
||||
set -xue
|
||||
# Target version must not contain 'v' prefix
|
||||
if echo "${{ inputs.TARGET_VERSION }}" | grep -e '^v'; then
|
||||
echo "::error::Target version '${{ inputs.TARGET_VERSION }}' should not begin with a 'v' prefix, refusing to continue." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create VERSION information
|
||||
run: |
|
||||
set -ue
|
||||
echo "Bumping version from $(cat VERSION) to ${{ inputs.TARGET_VERSION }}"
|
||||
echo "${{ inputs.TARGET_VERSION }}" > VERSION
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
- name: Generate new set of manifests
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
make manifests-local VERSION=${{ inputs.TARGET_VERSION }}
|
||||
git diff
|
||||
|
||||
- name: Generate version compatibility table
|
||||
run: |
|
||||
git stash
|
||||
bash hack/update-supported-versions.sh
|
||||
git add -u .
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
body: Updating VERSION and manifests to ${{ inputs.TARGET_VERSION }}
|
||||
branch: update-version
|
||||
branch-suffix: random
|
||||
signoff: true
|
||||
labels: release
|
||||
|
||||
|
||||
29
.github/workflows/pr-title-check.yml
vendored
29
.github/workflows/pr-title-check.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
|
||||
# IMPORTANT: No checkout actions, scripts, or builds should be added to this workflow. Permissions should always be used
|
||||
# with extreme caution. https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
|
||||
permissions: {}
|
||||
|
||||
# PR updates can happen in quick succession leading to this
|
||||
# workflow being trigger a number of times. This limits it
|
||||
# to one run per PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@0cf5902181e78341bb97bb06646396e5bd354b3f # v1.4.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
configuration_path: ".github/pr-title-checker-config.json"
|
||||
492
.github/workflows/release.yaml
vendored
492
.github/workflows/release.yaml
vendored
@@ -1,153 +1,270 @@
|
||||
name: Publish ArgoCD Release
|
||||
name: Create ArgoCD release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!v2.4*'
|
||||
- '!v2.5*'
|
||||
- '!v2.6*'
|
||||
|
||||
permissions: {}
|
||||
- "release-v*"
|
||||
- "!release-v1.5*"
|
||||
- "!release-v1.4*"
|
||||
- "!release-v1.3*"
|
||||
- "!release-v1.2*"
|
||||
- "!release-v1.1*"
|
||||
- "!release-v1.0*"
|
||||
- "!release-v0*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.20' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.19'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # used to push images to `ghcr.io` if used.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
goreleaser:
|
||||
needs:
|
||||
- argocd-image
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
contents: write # To push changes to release branch
|
||||
name: Perform automatic release on trigger ${{ github.ref }}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
|
||||
env:
|
||||
# The name of the tag as supplied by the GitHub event
|
||||
SOURCE_TAG: ${{ github.ref }}
|
||||
# The image namespace where Docker image will be published to
|
||||
IMAGE_NAMESPACE: quay.io/argoproj
|
||||
# Whether to create & push image and release assets
|
||||
DRY_RUN: false
|
||||
# Whether a draft release should be created, instead of public one
|
||||
DRAFT_RELEASE: false
|
||||
# Whether to update homebrew with this release as well
|
||||
# Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs
|
||||
# access to public repositories
|
||||
UPDATE_HOMEBREW: false
|
||||
# Name of the GitHub user for Git config
|
||||
GIT_USERNAME: argo-bot
|
||||
# E-Mail of the GitHub user for Git config
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Fetch all tags
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in realease branches.
|
||||
- name: Check if the published tag is well formed and setup vars
|
||||
run: |
|
||||
set -xue
|
||||
if echo ${{ github.ref_name }} | grep -E -- '-rc1+$';then
|
||||
echo "GORELEASER_PREVIOUS_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n 2 | head -n 1)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "This is not the first release on the branch, Using GoReleaser defaults"
|
||||
# Target version must match major.minor.patch and optional -rcX suffix
|
||||
# where X must be a number.
|
||||
TARGET_VERSION=${SOURCE_TAG#*release-v}
|
||||
if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
|
||||
echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Target branch is the release branch we're going to operate on
|
||||
# Its name is 'release-<major>.<minor>'
|
||||
TARGET_BRANCH="release-${TARGET_VERSION%\.[0-9]*}"
|
||||
|
||||
# The release tag is the source tag, minus the release- prefix
|
||||
RELEASE_TAG="${SOURCE_TAG#*release-}"
|
||||
|
||||
# Whether this is a pre-release (indicated by -rc suffix)
|
||||
PRE_RELEASE=false
|
||||
if echo "${RELEASE_TAG}" | egrep -- '-rc[0-9]+$'; then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# We must not have a release trigger within the same release branch,
|
||||
# because that means a release for this branch is already running.
|
||||
if git tag -l | grep "release-v${TARGET_VERSION%\.[0-9]*}" | grep -v "release-v${TARGET_VERSION}"; then
|
||||
echo "::error::Another release for branch ${TARGET_BRANCH} is currently in progress."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that release do not yet exist
|
||||
if git rev-parse ${RELEASE_TAG}; then
|
||||
echo "::error::Release tag ${RELEASE_TAG} already exists in repository. Refusing to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make the variables available in follow-up steps
|
||||
echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV
|
||||
echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV
|
||||
echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV
|
||||
|
||||
- name: Check if our release tag has a correct annotation
|
||||
run: |
|
||||
set -ue
|
||||
# Fetch all tag information as well
|
||||
git fetch --prune --tags --force
|
||||
|
||||
echo "=========== BEGIN COMMIT MESSAGE ============="
|
||||
git show ${SOURCE_TAG}
|
||||
echo "============ END COMMIT MESSAGE =============="
|
||||
|
||||
# Quite dirty hack to get the release notes from the annotated tag
|
||||
# into a temporary file.
|
||||
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
|
||||
|
||||
prefix=true
|
||||
begin=false
|
||||
git show ${SOURCE_TAG} | while read line; do
|
||||
# Whatever is in commit history for the tag, we only want that
|
||||
# annotation from our tag. We discard everything else.
|
||||
if test "$begin" = "false"; then
|
||||
if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
|
||||
continue
|
||||
fi
|
||||
if test "$prefix" = "true"; then
|
||||
if test -z "$line"; then prefix=false; fi
|
||||
else
|
||||
if echo "$line" | egrep -q '^commit [0-9a-f]+'; then
|
||||
break
|
||||
fi
|
||||
echo "$line" >> ${RELEASE_NOTES}
|
||||
fi
|
||||
done
|
||||
|
||||
# For debug purposes
|
||||
echo "============BEGIN RELEASE NOTES================="
|
||||
cat ${RELEASE_NOTES}
|
||||
echo "=============END RELEASE NOTES=================="
|
||||
|
||||
# Too short release notes are suspicious. We need at least 100 bytes.
|
||||
relNoteLen=$(stat -c '%s' $RELEASE_NOTES)
|
||||
if test $relNoteLen -lt 100; then
|
||||
echo "::error::No release notes provided in tag annotation (or tag is not annotated)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for magic string '## Quick Start' in head of release notes
|
||||
if ! head -2 ${RELEASE_NOTES} | grep -iq '## Quick Start'; then
|
||||
echo "::error::Release notes seem invalid, quick start section not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We store path to temporary release notes file for later reading, we
|
||||
# need it when creating release.
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Set environment variables for ldflags
|
||||
id: set_ldflag
|
||||
- name: Setup Git author information
|
||||
run: |
|
||||
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
set -ue
|
||||
git config --global user.email "${GIT_EMAIL}"
|
||||
git config --global user.name "${GIT_USERNAME}"
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v4.3.0
|
||||
id: run-goreleaser
|
||||
- name: Checkout corresponding release branch
|
||||
run: |
|
||||
set -ue
|
||||
echo "Switching to release branch '${TARGET_BRANCH}'"
|
||||
if ! git checkout ${TARGET_BRANCH}; then
|
||||
echo "::error::Checking out release branch '${TARGET_BRANCH}' for target version '${TARGET_VERSION}' (tagged '${RELEASE_TAG}') failed. Does it exist in repo?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create VERSION information
|
||||
run: |
|
||||
set -ue
|
||||
echo "Bumping version from $(cat VERSION) to ${TARGET_VERSION}"
|
||||
echo "${TARGET_VERSION}" > VERSION
|
||||
git commit -m "Bump version to ${TARGET_VERSION}" VERSION
|
||||
|
||||
- name: Generate new set of manifests
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
|
||||
- name: Create the release tag
|
||||
run: |
|
||||
set -ue
|
||||
echo "Creating release ${RELEASE_TAG}"
|
||||
git tag ${RELEASE_TAG}
|
||||
|
||||
- name: Login to docker repositories
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
|
||||
QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --timeout 55m
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
run: |
|
||||
set -ue
|
||||
git push origin ${TARGET_BRANCH}
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
|
||||
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
env:
|
||||
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
hashes=$(echo $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
|
||||
if test "$hashes" = ""; then # goreleaser < v1.13.0
|
||||
checksum_file=$(echo "$ARTIFACTS" | jq -r '.[] | select (.type=="Checksum") | .path')
|
||||
hashes=$(cat $checksum_file | base64 -w0)
|
||||
fi
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
goreleaser-provenance:
|
||||
needs: [goreleaser]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
generate-sbom:
|
||||
name: Create SBOM and generate hash
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
id: create_release
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
release_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
if: ${{ env.DRY_RUN == 'true' }}
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
@@ -160,7 +277,7 @@ jobs:
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
@@ -178,116 +295,43 @@ jobs:
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
|
||||
- name: Generate SBOM hash
|
||||
shell: bash
|
||||
id: sbom-hash
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Sign sbom
|
||||
run: |
|
||||
# sha256sum generates sha256 hash for sbom.
|
||||
# base64 -w0 encodes to base64 and outputs on a single line.
|
||||
# sha256sum /tmp/sbom.tar.gz ... | base64 -w0
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
name: ${{ env.RELEASE_TAG }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
sbom-provenance:
|
||||
needs: [generate-sbom]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
post-release:
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- generate-sbom
|
||||
permissions:
|
||||
contents: write # Needed to push commit to update stable tag
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
/tmp/sbom.tar.gz.sig
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Setup Git author information
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
|
||||
|
||||
- name: Delete original request tag from repository
|
||||
run: |
|
||||
set -ue
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
|
||||
- name: Check if tag is the latest version and not a pre-release
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# Ensure latest tag matches github.ref_name & not a pre-release
|
||||
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
|
||||
echo "TAG_STABLE=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_STABLE=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update stable tag to latest version
|
||||
run: |
|
||||
git tag -f stable ${{ github.ref_name }}
|
||||
git push -f origin stable
|
||||
if: ${{ env.TAG_STABLE == 'true' }}
|
||||
|
||||
- name: Check to see if VERSION should be updated on master branch
|
||||
run: |
|
||||
set -xue
|
||||
SOURCE_TAG=${{ github.ref_name }}
|
||||
VERSION_REF="${SOURCE_TAG#*v}"
|
||||
if echo "$VERSION_REF" | grep -E -- '^[0-9]+\.[0-9]+\.0-rc1';then
|
||||
VERSION=$(awk 'BEGIN {FS=OFS="."} {$2++; print}' <<< "${VERSION_REF%-rc1}")
|
||||
echo "Updating VERSION to: $VERSION"
|
||||
echo "UPDATE_VERSION=true" >> $GITHUB_ENV
|
||||
echo "NEW_VERSION=$VERSION" >> $GITHUB_ENV
|
||||
else
|
||||
echo "Not updating VERSION"
|
||||
echo "UPDATE_VERSION=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update VERSION on master branch
|
||||
run: |
|
||||
echo ${{ env.NEW_VERSION }} > VERSION
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: "chore: Bump version in master"
|
||||
body: All images built from master should indicate which version we are on track for.
|
||||
signoff: true
|
||||
branch: update-version
|
||||
branch-suffix: random
|
||||
base: master
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
git push --delete origin ${SOURCE_TAG}
|
||||
if: ${{ always() }}
|
||||
|
||||
67
.github/workflows/scorecard.yaml
vendored
67
.github/workflows/scorecard.yaml
vendored
@@ -1,67 +0,0 @@
|
||||
name: Scorecards supply-chain security
|
||||
on:
|
||||
# Only the default branch is supported.
|
||||
branch_protection_rule:
|
||||
schedule:
|
||||
- cron: "39 9 * * 2"
|
||||
push:
|
||||
branches: ["master"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Used to receive a badge. (Upcoming feature)
|
||||
id-token: write
|
||||
# Needs for private repositories.
|
||||
contents: read
|
||||
actions: read
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) Read-only PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecards on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
|
||||
# repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@3ebbd71c74ef574dbc558c82f70e52732c8b44fe # v2.2.1
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
17
.github/workflows/update-snyk.yaml
vendored
17
.github/workflows/update-snyk.yaml
vendored
@@ -1,6 +1,5 @@
|
||||
name: Snyk report update
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
schedule:
|
||||
- cron: '0 0 * * 0' # midnight every Sunday
|
||||
|
||||
@@ -10,27 +9,23 @@ permissions:
|
||||
jobs:
|
||||
snyk-report:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
contents: write # To push snyk reports
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
name: Update Snyk report in the docs directory
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
make snyk-report
|
||||
pr_branch="snyk-update-$(echo $RANDOM | md5sum | head -c 20)"
|
||||
git checkout -b "$pr_branch"
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git add docs/snyk
|
||||
git commit -m "[Bot] docs: Update Snyk reports" --signoff
|
||||
git push --set-upstream origin "$pr_branch"
|
||||
gh pr create -B master -H "$pr_branch" --title '[Bot] docs: Update Snyk report' --body ''
|
||||
git add docs/snyk/index.md
|
||||
git add docs/snyk/*/*.html
|
||||
git commit -m "[Bot] Update Snyk reports"
|
||||
git push
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -17,8 +17,6 @@ test-results
|
||||
node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
.envrc.remote
|
||||
.*.swp
|
||||
|
||||
# ignore built binaries
|
||||
cmd/argocd/argocd
|
||||
|
||||
4
.gitpod.Dockerfile
vendored
4
.gitpod.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM gitpod/workspace-full@sha256:d5787229cd062aceae91109f1690013d3f25062916492fb7f444d13de3186178
|
||||
FROM gitpod/workspace-full
|
||||
|
||||
USER root
|
||||
|
||||
@@ -13,8 +13,6 @@ ENV GOCACHE=/go-build-cache
|
||||
RUN apt-get install redis-server -y
|
||||
RUN go install github.com/mattn/goreman@latest
|
||||
|
||||
RUN chown -R gitpod:gitpod /go-build-cache
|
||||
|
||||
USER gitpod
|
||||
|
||||
ENV ARGOCD_REDIS_LOCAL=true
|
||||
|
||||
121
.goreleaser.yaml
121
.goreleaser.yaml
@@ -1,121 +0,0 @@
|
||||
project_name: argocd
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
- make build-ui
|
||||
|
||||
builds:
|
||||
- id: argocd-cli
|
||||
main: ./cmd
|
||||
binary: argocd-{{ .Os}}-{{ .Arch}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -v
|
||||
ldflags:
|
||||
- -X github.com/argoproj/argo-cd/v2/common.version={{ .Version }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.buildDate={{ .Date }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.gitCommit={{ .FullCommit }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.gitTreeState={{ .Env.GIT_TREE_STATE }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.kubectlVersion={{ .Env.KUBECTL_VERSION }}
|
||||
- -extldflags="-static"
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- s390x
|
||||
- ppc64le
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
archives:
|
||||
- id: argocd-archive
|
||||
builds:
|
||||
- argocd-cli
|
||||
name_template: |-
|
||||
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
|
||||
format: binary
|
||||
|
||||
checksum:
|
||||
name_template: 'cli_checksums.txt'
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: false
|
||||
header: |
|
||||
## Quick Start
|
||||
|
||||
### Non-HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/install.yaml
|
||||
```
|
||||
|
||||
### HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/ha/install.yaml
|
||||
```
|
||||
|
||||
## Release Signatures and Provenance
|
||||
|
||||
All Argo CD container images are signed by cosign. A Provenance is generated for container images and CLI binaries which meet the SLSA Level 3 specifications. See the [documentation](https://argo-cd.readthedocs.io/en/stable/operator-manual/signed-release-assets) on how to verify.
|
||||
|
||||
|
||||
## Upgrading
|
||||
|
||||
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
|
||||
footer: |
|
||||
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
|
||||
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
|
||||
|
||||
|
||||
snapshot: #### To be removed for PR
|
||||
name_template: "2.6.0"
|
||||
|
||||
changelog:
|
||||
use:
|
||||
github
|
||||
sort: asc
|
||||
abbrev: 0
|
||||
groups: # Regex use RE2 syntax as defined here: https://github.com/google/re2/wiki/Syntax.
|
||||
- title: 'Features'
|
||||
regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$'
|
||||
order: 100
|
||||
- title: 'Bug fixes'
|
||||
regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$'
|
||||
order: 200
|
||||
- title: 'Documentation'
|
||||
regexp: '^.*?docs(\([[:word:]]+\))??!?:.+$'
|
||||
order: 300
|
||||
- title: 'Dependency updates'
|
||||
regexp: '^.*?(feat|fix|chore)\(deps?.+\)!?:.+$'
|
||||
order: 400
|
||||
- title: 'Other work'
|
||||
order: 999
|
||||
filters:
|
||||
exclude:
|
||||
- '^test:'
|
||||
- '^.*?Bump(\([[:word:]]+\))?.+$'
|
||||
- '^.*?[Bot](\([[:word:]]+\))?.+$'
|
||||
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
|
||||
@@ -4,8 +4,4 @@ mkdocs:
|
||||
fail_on_warning: false
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
build:
|
||||
os: "ubuntu-22.04"
|
||||
tools:
|
||||
python: "3.7"
|
||||
- requirements: docs/requirements.txt
|
||||
24
Dockerfile
24
Dockerfile
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:ac58ff7fe25edc58bdf0067ca99df00014dbd032e2246d30a722fa348fd799a5
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.20.6@sha256:8e5a0067e6b387263a01d06b91ef1a983f90e9638564f6e25392fd2695f7ab6c AS builder
|
||||
FROM docker.io/library/golang:1.19 AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -36,8 +36,6 @@ RUN ./install.sh helm-linux && \
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/argoproj/argo-cd"
|
||||
|
||||
USER root
|
||||
|
||||
ENV ARGOCD_USER_ID=999
|
||||
@@ -83,7 +81,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:20.4.0@sha256:b3ca7d32f0c12291df6e45a914d4ee60011a3fce4a978df5e609e356a4a2cb88 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -101,7 +99,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.20.6@sha256:8e5a0067e6b387263a01d06b91ef1a983f90e9638564f6e25392fd2695f7ab6c AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.19 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
@@ -113,18 +111,7 @@ COPY . .
|
||||
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
# These build args are optional; if not specified the defaults will be taken from the Makefile
|
||||
ARG GIT_TAG
|
||||
ARG BUILD_DATE
|
||||
ARG GIT_TREE_STATE
|
||||
ARG GIT_COMMIT
|
||||
RUN GIT_COMMIT=$GIT_COMMIT \
|
||||
GIT_TREE_STATE=$GIT_TREE_STATE \
|
||||
GIT_TAG=$GIT_TAG \
|
||||
BUILD_DATE=$BUILD_DATE \
|
||||
GOOS=$TARGETOS \
|
||||
GOARCH=$TARGETARCH \
|
||||
make argocd-all
|
||||
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
|
||||
|
||||
####################################################################################################
|
||||
# Final image
|
||||
@@ -143,4 +130,3 @@ RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
|
||||
USER $ARGOCD_USER_ID
|
||||
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||
|
||||
122
Makefile
122
Makefile
@@ -9,13 +9,11 @@ GEN_RESOURCES_CLI_NAME=argocd-resources-gen
|
||||
HOST_OS:=$(shell go env GOOS)
|
||||
HOST_ARCH:=$(shell go env GOARCH)
|
||||
|
||||
TARGET_ARCH?=linux/amd64
|
||||
|
||||
VERSION=$(shell cat ${CURRENT_DIR}/VERSION)
|
||||
BUILD_DATE:=$(if $(BUILD_DATE),$(BUILD_DATE),$(shell date -u +'%Y-%m-%dT%H:%M:%SZ'))
|
||||
GIT_COMMIT:=$(if $(GIT_COMMIT),$(GIT_COMMIT),$(shell git rev-parse HEAD))
|
||||
GIT_TAG:=$(if $(GIT_TAG),$(GIT_TAG),$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi))
|
||||
GIT_TREE_STATE:=$(if $(GIT_TREE_STATE),$(GIT_TREE_STATE),$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi))
|
||||
BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
GIT_COMMIT=$(shell git rev-parse HEAD)
|
||||
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
|
||||
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
|
||||
VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi)
|
||||
KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)
|
||||
|
||||
@@ -66,20 +64,13 @@ else
|
||||
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
|
||||
endif
|
||||
|
||||
# User and group IDs to map to the test container
|
||||
CONTAINER_UID=$(shell id -u)
|
||||
CONTAINER_GID=$(shell id -g)
|
||||
|
||||
# Set SUDO to sudo to run privileged commands with sudo
|
||||
SUDO?=
|
||||
|
||||
# Runs any command in the argocd-test-utils container in server mode
|
||||
# Server mode container will start with uid 0 and drop privileges during runtime
|
||||
define run-in-test-server
|
||||
$(SUDO) docker run --rm -it \
|
||||
docker run --rm -it \
|
||||
--name argocd-test-server \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e USER_ID=$(CONTAINER_UID) \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-e USER_ID=$(shell id -u) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
@@ -107,9 +98,9 @@ endef
|
||||
|
||||
# Runs any command in the argocd-test-utils container in client mode
|
||||
define run-in-test-client
|
||||
$(SUDO) docker run --rm -it \
|
||||
docker run --rm -it \
|
||||
--name argocd-test-client \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
@@ -128,7 +119,7 @@ endef
|
||||
|
||||
#
|
||||
define exec-in-test-server
|
||||
$(SUDO) docker exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
@@ -148,8 +139,7 @@ override LDFLAGS += \
|
||||
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
|
||||
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}\
|
||||
-X "${PACKAGE}.extraBuildInfo=${EXTRA_BUILD_INFO}"
|
||||
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
|
||||
|
||||
ifeq (${STATIC_BUILD}, true)
|
||||
override LDFLAGS += -extldflags "-static"
|
||||
@@ -222,7 +212,7 @@ clidocsgen: ensure-gopath
|
||||
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
|
||||
codegen-local: ensure-gopath mod-vendor-local notification-docs notification-catalog gogen protogen clientgen openapigen clidocsgen manifests-local
|
||||
rm -rf vendor/
|
||||
|
||||
.PHONY: codegen
|
||||
@@ -235,11 +225,11 @@ cli: test-tools-image
|
||||
|
||||
.PHONY: cli-local
|
||||
cli-local: clean-debug
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
|
||||
.PHONY: gen-resources-cli-local
|
||||
gen-resources-cli-local: clean-debug
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug build-ui
|
||||
@@ -254,8 +244,8 @@ release-cli: clean-debug build-ui
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
ifndef SKIP_TEST_TOOLS_IMAGE
|
||||
$(SUDO) docker build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
$(SUDO) docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
endif
|
||||
|
||||
.PHONY: manifests-local
|
||||
@@ -269,23 +259,23 @@ manifests: test-tools-image
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
|
||||
.PHONY: repo-server
|
||||
repo-server:
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
|
||||
.PHONY: controller
|
||||
controller:
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui .
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
@@ -296,18 +286,18 @@ ifeq ($(DEV_IMAGE), true)
|
||||
# the dist directory is under .dockerignore.
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
DOCKER_BUILDKIT=1 docker build --platform=linux/amd64 -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
|
||||
cp Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 docker build --platform=linux/amd64 -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
@@ -336,7 +326,7 @@ mod-vendor: test-tools-image
|
||||
mod-vendor-local: mod-download-local
|
||||
go mod vendor
|
||||
|
||||
# Deprecated - replace by install-tools-local
|
||||
# Deprecated - replace by install-local-tools
|
||||
.PHONY: install-lint-tools
|
||||
install-lint-tools:
|
||||
./hack/install.sh lint-tools
|
||||
@@ -352,7 +342,7 @@ lint-local:
|
||||
golangci-lint --version
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --enable gofmt --fix --verbose --timeout 3000s --max-issues-per-linter 0 --max-same-issues 0
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 3000s
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui: test-tools-image
|
||||
@@ -371,7 +361,7 @@ build: test-tools-image
|
||||
# Build all Go code (local version)
|
||||
.PHONY: build-local
|
||||
build-local:
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
|
||||
# Run all unit tests
|
||||
#
|
||||
@@ -459,8 +449,6 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
@@ -584,7 +572,7 @@ list:
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
@@ -601,55 +589,3 @@ snyk-non-container-tests:
|
||||
.PHONY: snyk-report
|
||||
snyk-report:
|
||||
./hack/snyk-report.sh $(target_branch)
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Note: Generally an item w/ (-local) will run inside docker unless you use the -local variant'
|
||||
@echo
|
||||
@echo 'Common targets'
|
||||
@echo
|
||||
@echo 'all -- make cli and image'
|
||||
@echo
|
||||
@echo 'components:'
|
||||
@echo ' applicationset-controller -- applicationset controller'
|
||||
@echo ' cli(-local) -- argocd cli program'
|
||||
@echo ' controller -- controller (orchestrator)'
|
||||
@echo ' repo-server -- repo server (manage repository instances)'
|
||||
@echo ' server -- argocd web application'
|
||||
@echo
|
||||
@echo 'build:'
|
||||
@echo ' image -- make image of the following items'
|
||||
@echo ' build(-local) -- compile go'
|
||||
@echo ' build-docs(-local) -- build docs'
|
||||
@echo ' build-ui -- compile typescript'
|
||||
@echo
|
||||
@echo 'run:'
|
||||
@echo ' run -- run the components locally'
|
||||
@echo ' serve-docs(-local) -- expose the documents for viewing in a browser'
|
||||
@echo
|
||||
@echo 'release:'
|
||||
@echo ' release-cli'
|
||||
@echo ' release-precheck'
|
||||
@echo ' checksums'
|
||||
@echo
|
||||
@echo 'docs:'
|
||||
@echo ' build-docs(-local)'
|
||||
@echo ' serve-docs(-local)'
|
||||
@echo ' notification-docs'
|
||||
@echo ' clidocsgen'
|
||||
@echo
|
||||
@echo 'testing:'
|
||||
@echo ' test(-local)'
|
||||
@echo ' start-e2e(-local)'
|
||||
@echo ' test-e2e(-local)'
|
||||
@echo ' test-race(-local)'
|
||||
@echo
|
||||
@echo 'debug:'
|
||||
@echo ' list -- list all make targets'
|
||||
@echo ' install-tools-local -- install all the tools below'
|
||||
@echo ' install-lint-tools(-local)'
|
||||
@echo
|
||||
@echo 'codegen:'
|
||||
@echo ' codegen(-local) -- if using -local, run the following targets first'
|
||||
@echo ' install-codegen-tools-local -- run this to install the codegen tools'
|
||||
@echo ' install-go-tools-local -- run this to install go libraries for codegen'
|
||||
|
||||
3
OWNERS
3
OWNERS
@@ -27,6 +27,3 @@ reviewers:
|
||||
- wanghong230
|
||||
- ciiay
|
||||
- saumeya
|
||||
- zachaller
|
||||
- 34fathombelow
|
||||
- alexef
|
||||
|
||||
16
Procfile
16
Procfile
@@ -1,12 +1,12 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
|
||||
18
README.md
18
README.md
@@ -1,18 +1,4 @@
|
||||
**Releases:**
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://artifacthub.io/packages/helm/argo/argo-cd)
|
||||
[](https://slsa.dev)
|
||||
|
||||
**Code:**
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
[](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-cd)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield)
|
||||
|
||||
**Social:**
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
@@ -82,7 +68,7 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)
|
||||
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
|
||||
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
|
||||
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72)
|
||||
|
||||
|
||||
31
SECURITY.md
31
SECURITY.md
@@ -1,6 +1,6 @@
|
||||
# Security Policy for Argo CD
|
||||
|
||||
Version: **v1.5 (2023-03-06)**
|
||||
Version: **v1.4 (2022-01-23)**
|
||||
|
||||
## Preface
|
||||
|
||||
@@ -35,11 +35,13 @@ impact on Argo CD before opening an issue at least roughly.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We currently support the last 3 minor versions of Argo CD with security and bug fixes.
|
||||
We currently support the most recent release (`N`, e.g. `1.8`) and the release
|
||||
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
|
||||
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
|
||||
|
||||
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
|
||||
supported versions, which will contain fixes for security vulnerabilities and
|
||||
important bugs. Prior releases might receive critical security fixes on best
|
||||
important bugs. Prior releases might receive critical security fixes on a best
|
||||
effort basis, however, it cannot be guaranteed that security fixes get
|
||||
back-ported to these unsupported versions.
|
||||
|
||||
@@ -59,28 +61,13 @@ and disclosure with you. Sometimes, it might take a little longer for us to
|
||||
react (e.g. out of office conditions), so please bear with us in these cases.
|
||||
|
||||
We will publish security advisories using the
|
||||
[GitHub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well-informed, and will credit you for your
|
||||
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well informed, and will credit you for your
|
||||
findings (unless you prefer to stay anonymous, of course).
|
||||
|
||||
There are two ways to report a vulnerability to the Argo CD team:
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
|
||||
* By opening a draft GitHub security advisory: https://github.com/argoproj/argo-cd/security/advisories/new
|
||||
* By e-mail to the following address: cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Internet Bug Bounty collaboration
|
||||
|
||||
We're happy to announce that the Argo project is collaborating with the great
|
||||
folks over at
|
||||
[Hacker One](https://hackerone.com/) and their
|
||||
[Internet Bug Bounty program](https://hackerone.com/ibb)
|
||||
to reward the awesome people who find security vulnerabilities in the four
|
||||
main Argo projects (CD, Events, Rollouts and Workflows) and then work with
|
||||
us to fix and disclose them in a responsible manner.
|
||||
|
||||
If you report a vulnerability to us as outlined in this security policy, we
|
||||
will work together with you to find out whether your finding is eligible for
|
||||
claiming a bounty, and also on how to claim it.
|
||||
* cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Securing your Argo CD Instance
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://github.com/argoproj/argo-cd/security/policy
|
||||
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
|
||||
|
||||
alexmt
|
||||
edlee2121
|
||||
|
||||
68
USERS.md
68
USERS.md
@@ -11,20 +11,15 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [Adyen](https://www.adyen.com)
|
||||
1. [AirQo](https://airqo.net/)
|
||||
1. [Akuity](https://akuity.io/)
|
||||
1. [Albert Heijn](https://ah.nl/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Amadeus IT Group](https://amadeus.com/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
|
||||
2. [Autodesk](https://www.autodesk.com)
|
||||
1. [Axual B.V.](https://axual.com)
|
||||
1. [Back Market](https://www.backmarket.com)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
@@ -39,40 +34,28 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [CERN](https://home.cern/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chainnodes](https://chainnodes.org)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cloud Posse](https://www.cloudposse.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [Cloudmate](https://cloudmt.co.kr/)
|
||||
1. [Cloudogu](https://cloudogu.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [Coralogix](https://coralogix.com/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [D2iQ](https://www.d2iq.com)
|
||||
1. [DaoCloud](https://daocloud.io/)
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [DigitalOcean](https://www.digitalocean.com)
|
||||
1. [Divistant](https://divistant.com)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
1. [Elastic](https://elastic.co/)
|
||||
@@ -82,11 +65,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [Farfetch](https://www.farfetch.com)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flexport](https://www.flexport.com/)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
@@ -95,8 +75,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
1. [Gepardec](https://gepardec.com/)
|
||||
1. [GetYourGuide](https://www.getyourguide.com/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [gloat](https://gloat.com/)
|
||||
@@ -105,11 +83,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [GlueOps](https://glueops.dev)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [GoTo](https://www.goto.com/)
|
||||
1. [GoTo Financial](https://gotofinancial.com/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
1. [Gridfuse](https://gridfuse.com/)
|
||||
1. [Groww](https://groww.in)
|
||||
1. [Grupo MasMovil](https://grupomasmovil.com/en/)
|
||||
1. [Handelsbanken](https://www.handelsbanken.se)
|
||||
1. [Healy](https://www.healyworld.net)
|
||||
@@ -118,12 +93,10 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [hipages](https://hipages.com.au/)
|
||||
1. [Hiya](https://hiya.com)
|
||||
1. [Honestbank](https://honestbank.com)
|
||||
1. [Hostinger](https://www.hostinger.com)
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
@@ -132,11 +105,10 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [Kandji](https://www.kandji.io/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [Kasa](https://kasa.co.kr/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [KelkooGroup](https://www.kelkoogroup.com)
|
||||
1. [Keptn](https://keptn.sh)
|
||||
1. [Kinguin](https://www.kinguin.net/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
@@ -145,11 +117,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lian Chu Securities](https://lczq.com)
|
||||
1. [Liatrio](https://www.liatrio.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Loom](https://www.loom.com/)
|
||||
1. [Lucid Motors](https://www.lucidmotors.com/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Magic Leap](https://www.magicleap.com/)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
@@ -160,12 +129,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Meican](https://meican.com/)
|
||||
1. [Meilleurs Agents](https://www.meilleursagents.com/)
|
||||
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [Mission Lane](https://missionlane.com)
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
1. [Moengage](https://www.moengage.com/)
|
||||
1. [Money Forward](https://corp.moneyforward.com/en/)
|
||||
@@ -177,44 +143,32 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
1. [NYCU, CS IT Center](https://it.cs.nycu.edu.tw)
|
||||
1. [Objective](https://www.objective.com.br/)
|
||||
1. [OCCMundial](https://occ.com.mx)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [Olfeo](https://www.olfeo.com/)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
1. [Omni](https://omni.se/)
|
||||
1. [openEuler](https://openeuler.org)
|
||||
1. [openGauss](https://opengauss.org/)
|
||||
1. [OpenGov](https://opengov.com)
|
||||
1. [openLooKeng](https://openlookeng.io)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [OpsMx](https://opsmx.io)
|
||||
1. [OpsVerse](https://opsverse.io)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Pandosearch](https://www.pandosearch.com/en/home)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
1. [Platform9 Systems](https://platform9.com/)
|
||||
1. [Polarpoint.io](https://polarpoint.io)
|
||||
1. [PostFinance](https://github.com/postfinance)
|
||||
1. [Preferred Networks](https://preferred.jp/en/)
|
||||
1. [Previder BV](https://previder.nl)
|
||||
1. [Procore](https://www.procore.com)
|
||||
1. [Productboard](https://www.productboard.com/)
|
||||
1. [Prudential](https://prudential.com.sg)
|
||||
1. [PT Boer Technology (Btech)](https://btech.id/)
|
||||
1. [PUBG](https://www.pubg.com)
|
||||
1. [Puzzle ITC](https://www.puzzle.ch/)
|
||||
1. [Qonto](https://qonto.com)
|
||||
1. [QuintoAndar](https://quintoandar.com.br)
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
@@ -222,7 +176,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Redpill Linpro](https://www.redpill-linpro.com/)
|
||||
1. [Reenigne Cloud](https://reenigne.ca)
|
||||
1. [reev.com](https://www.reev.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
@@ -232,15 +185,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [SEEK](https://seek.com.au)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smart Pension](https://www.smartpension.co.uk/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Smood.ch](https://www.smood.ch/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Softway Medical](https://www.softwaymedical.fr/)
|
||||
@@ -249,7 +198,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
1. [Splunk](https://splunk.com/)
|
||||
1. [Spores Labs](https://spores.app)
|
||||
1. [StreamNative](https://streamnative.io)
|
||||
1. [Stuart](https://stuart.com/)
|
||||
1. [Sumo Logic](https://sumologic.com/)
|
||||
1. [Sutpc](http://www.sutpc.com/)
|
||||
@@ -263,7 +211,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [The Scale Factory](https://www.scalefactory.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Tiger Analytics](https://www.tigeranalytics.com/)
|
||||
@@ -271,25 +218,18 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Toss](https://toss.im/en)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Trusting Social](https://trustingsocial.com/)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [U.S. Veterans Affairs Department](https://www.va.gov/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Urbantz](https://urbantz.com/)
|
||||
1. [Vectra](https://www.vectra.ai)
|
||||
1. [Veepee](https://www.veepee.com)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [VietMoney](https://vietmoney.vn/)
|
||||
1. [Vinted](https://vinted.com/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
1. [VISITS Technologies](https://visits.world/en)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [Voyager Digital](https://www.investvoyager.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
@@ -298,13 +238,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [Wolffun Game](https://www.wolffungame.com/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Yubo](https://www.yubo.live/)
|
||||
1. [ZDF](https://www.zdf.de/)
|
||||
1. [Zimpler](https://www.zimpler.com/)
|
||||
1. [ZOZO](https://corp.zozo.com/)
|
||||
|
||||
@@ -17,7 +17,6 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -26,30 +25,22 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -58,14 +49,10 @@ const (
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
|
||||
// LabelKeyAppSetInstance is the label key to use to uniquely identify the apps of an applicationset
|
||||
// The ArgoCD applicationset name is used as the instance name
|
||||
LabelKeyAppSetInstance = "argocd.argoproj.io/application-set-name"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPreservedAnnotations = []string{
|
||||
preservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
}
|
||||
@@ -74,19 +61,14 @@ var (
|
||||
// ApplicationSetReconciler reconciles a ApplicationSet object
|
||||
type ApplicationSetReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
Policy argov1alpha1.ApplicationsSyncPolicy
|
||||
EnablePolicyOverride bool
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
ArgoCDNamespace string
|
||||
ApplicationSetNamespaces []string
|
||||
EnableProgressiveSyncs bool
|
||||
SCMRootCAPath string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -129,7 +111,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
parametersGenerated = true
|
||||
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo)
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
|
||||
if err != nil {
|
||||
// While some generators may return an error that requires user intervention,
|
||||
// other generators reference external resources that may change to cause
|
||||
@@ -152,36 +134,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else {
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
for _, app := range applications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
@@ -210,27 +162,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argov1alpha1.ApplicationSetReasonSyncApplicationError,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -260,7 +192,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
|
||||
if r.Policy.Delete() {
|
||||
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -420,7 +352,7 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
|
||||
|
||||
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
|
||||
// generated applications.
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[int]error, error) {
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
|
||||
errorsByIndex := map[int]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
@@ -432,7 +364,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -441,7 +373,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, r.ArgoCDNamespace); err != nil {
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
|
||||
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
@@ -512,13 +444,9 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
|
||||
|
||||
for _, a := range t {
|
||||
tmplApplication := getTempApplication(a.Template)
|
||||
if tmplApplication.Labels == nil {
|
||||
tmplApplication.Labels = make(map[string]string)
|
||||
}
|
||||
tmplApplication.Labels[LabelKeyAppSetInstance] = applicationSetInfo.Name
|
||||
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
@@ -540,15 +468,7 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
|
||||
return res, applicationSetReason, firstError
|
||||
}
|
||||
|
||||
func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return glob.MatchStringInList(namespaces, e.Object.GetNamespace(), false)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
@@ -564,16 +484,12 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error setting up with manager: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ownsHandler := getOwnsHandlerPredicates(enableProgressiveSyncs)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciliations,
|
||||
}).For(&argov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}, builder.WithPredicates(ownsHandler)).
|
||||
WithEventFilter(ignoreNotAllowedNamespaces(r.ApplicationSetNamespaces)).
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&argov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &corev1.Secret{}},
|
||||
&clusterSecretEventHandler{
|
||||
@@ -597,16 +513,13 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
// Normalize to avoid fighting with the application controller.
|
||||
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
|
||||
|
||||
found := &argov1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedApp.Name,
|
||||
Namespace: generatedApp.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
}
|
||||
@@ -615,20 +528,9 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// allow setting the Operation field to trigger a sync operation on an Application
|
||||
if generatedApp.Operation != nil {
|
||||
found.Operation = generatedApp.Operation
|
||||
}
|
||||
|
||||
preservedAnnotations := make([]string, 0)
|
||||
if applicationSet.Spec.PreservedFields != nil {
|
||||
preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...)
|
||||
}
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...)
|
||||
|
||||
for _, key := range preservedAnnotations {
|
||||
if state, exists := found.ObjectMeta.Annotations[key]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
@@ -665,7 +567,7 @@ func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicat
|
||||
var createApps []argov1alpha1.Application
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting current applications: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names that are current in the cluster
|
||||
@@ -704,15 +606,15 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicat
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing clusters: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current applications to be able to delete the ones that are not in appList
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting current applications: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
|
||||
@@ -765,7 +667,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
var validDestination bool
|
||||
|
||||
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, r.ArgoCDNamespace); err != nil {
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
|
||||
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
|
||||
validDestination = false
|
||||
} else {
|
||||
@@ -816,7 +718,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
|
||||
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -824,600 +726,4 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
log.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status conditions: %w", err)
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
|
||||
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
|
||||
return [][]string{}, map[string]int{}, nil
|
||||
}
|
||||
|
||||
steps := []argov1alpha1.ApplicationSetRolloutStep{}
|
||||
if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
steps = applicationSet.Spec.Strategy.RollingSync.Steps
|
||||
}
|
||||
|
||||
appDependencyList := make([][]string, 0)
|
||||
for range steps {
|
||||
appDependencyList = append(appDependencyList, make([]string, 0))
|
||||
}
|
||||
|
||||
appStepMap := map[string]int{}
|
||||
|
||||
// use applicationLabelSelectors to filter generated Applications into steps and status by name
|
||||
for _, app := range applications {
|
||||
for i, step := range steps {
|
||||
|
||||
selected := true // default to true, assuming the current Application is a match for the given step matchExpression
|
||||
|
||||
for _, matchExpression := range step.MatchExpressions {
|
||||
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
} else if matchExpression.Operator == "In" {
|
||||
selected = false // no matching label key with "In" operator means this Application will not be included in the current step
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if selected {
|
||||
appDependencyList[i] = append(appDependencyList[i], app.Name)
|
||||
if val, ok := appStepMap[app.Name]; ok {
|
||||
log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
|
||||
} else {
|
||||
appStepMap[app.Name] = i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appDependencyList, appStepMap, nil
|
||||
}
|
||||
|
||||
func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
if matchExpression.Operator != "In" && matchExpression.Operator != "NotIn" {
|
||||
log.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
|
||||
return false
|
||||
}
|
||||
|
||||
// if operator == In, default to false
|
||||
// if operator == NotIn, default to true
|
||||
valueMatched := matchExpression.Operator == "NotIn"
|
||||
|
||||
for _, value := range matchExpression.Values {
|
||||
if val == value {
|
||||
// first "In" match returns true
|
||||
// first "NotIn" match returns false
|
||||
return matchExpression.Operator == "In"
|
||||
}
|
||||
}
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
appSyncMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
|
||||
// healthy stages and the first non-healthy stage should have sync enabled
|
||||
// every stage after should have sync disabled
|
||||
|
||||
for i := range appDependencyList {
|
||||
// set the syncEnabled boolean for every Application in the current step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
appSyncMap[appName] = syncEnabled
|
||||
}
|
||||
|
||||
// detect if we need to halt before progressing to the next step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
|
||||
if idx == -1 {
|
||||
// no Application status found, likely because the Application is being newly created
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
if app, ok := appMap[appName]; ok {
|
||||
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
|
||||
if progressiveSyncsStrategyEnabled(appset, "RollingSync") {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType {
|
||||
return false
|
||||
}
|
||||
|
||||
if strategyType == "RollingSync" && appset.Spec.Strategy.RollingSync == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
healthStatusString := string(app.Status.Health.Status)
|
||||
syncStatusString := string(app.Status.Sync.Status)
|
||||
operationPhaseString := ""
|
||||
if app.Status.OperationState != nil {
|
||||
operationPhaseString = string(app.Status.OperationState.Phase)
|
||||
}
|
||||
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
|
||||
now := metav1.Now()
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
|
||||
for _, app := range applications {
|
||||
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: fmt.Sprint(appStepMap[app.Name] + 1),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
// check for successful syncs started less than 10s before the Application transitioned to Pending
|
||||
// this covers race conditions where syncs initiated by RollingSync miraculously have a sync time before the transition to Pending state occurred (could be a few seconds)
|
||||
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.Add(time.Duration(10)*time.Second).After(currentAppStatus.LastTransitionTime.Time) {
|
||||
if !app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
log.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
|
||||
}
|
||||
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
|
||||
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
|
||||
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
|
||||
updateCountMap := []int{}
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := 0
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
}
|
||||
for s := 0; s < length; s++ {
|
||||
updateCountMap = append(updateCountMap, 0)
|
||||
totalCountMap = append(totalCountMap, 0)
|
||||
}
|
||||
|
||||
// populate updateCountMap with counts of existing Pending and Progressing Applications
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]] += 1
|
||||
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate
|
||||
}
|
||||
|
||||
// by default allow all applications to update if maxUpdate is unset
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
if maxUpdate.Type == intstr.String && maxUpdate.StrVal != "0%" && maxUpdateVal < 1 {
|
||||
maxUpdateVal = 1
|
||||
}
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, appStatus)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) ([]argov1alpha1.ApplicationSetCondition, error) {
|
||||
|
||||
appSetProgressing := false
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != "Healthy" {
|
||||
appSetProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
appSetConditionProgressing := false
|
||||
for _, appSetCondition := range applicationSet.Status.Conditions {
|
||||
if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
appSetConditionProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if appSetProgressing && !appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, false,
|
||||
)
|
||||
} else if !appSetProgressing && appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout complete",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}, false,
|
||||
)
|
||||
}
|
||||
|
||||
return applicationSet.Status.Conditions, nil
|
||||
}
|
||||
|
||||
func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplicationStatus, application string) int {
|
||||
for i := range appStatuses {
|
||||
if appStatuses[i].Application == application {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
|
||||
// with any new/changed Application statuses.
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
needToUpdateStatus := false
|
||||
|
||||
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
|
||||
needToUpdateStatus = true
|
||||
} else {
|
||||
for i := range applicationStatuses {
|
||||
appStatus := applicationStatuses[i]
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application)
|
||||
if idx == -1 {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
currentStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status || currentStatus.Step != appStatus.Step {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needToUpdateStatus {
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
|
||||
// rebuild ApplicationStatus from scratch, we don't need any previous status history
|
||||
applicationSet.Status.ApplicationStatus = []argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
for i := range applicationStatuses {
|
||||
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
|
||||
}
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil {
|
||||
|
||||
log.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %v", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
|
||||
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
|
||||
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.Automated != nil {
|
||||
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
|
||||
validApps[i].Spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
|
||||
appSetStatusPending := false
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
|
||||
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
|
||||
appSetStatusPending = true
|
||||
}
|
||||
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
}
|
||||
return rolloutApps, nil
|
||||
}
|
||||
|
||||
// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) {
|
||||
|
||||
operation := argov1alpha1.Operation{
|
||||
InitiatedBy: argov1alpha1.OperationInitiator{
|
||||
Username: "applicationset-controller",
|
||||
Automated: true,
|
||||
},
|
||||
Info: []*argov1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
},
|
||||
},
|
||||
Sync: &argov1alpha1.SyncOperation{},
|
||||
}
|
||||
|
||||
if application.Spec.SyncPolicy != nil {
|
||||
if application.Spec.SyncPolicy.Retry != nil {
|
||||
operation.Retry = *application.Spec.SyncPolicy.Retry
|
||||
}
|
||||
if application.Spec.SyncPolicy.SyncOptions != nil {
|
||||
operation.Sync.SyncOptions = application.Spec.SyncPolicy.SyncOptions
|
||||
}
|
||||
operation.Sync.Prune = prune
|
||||
}
|
||||
application.Operation = &operation
|
||||
|
||||
return application, nil
|
||||
}
|
||||
|
||||
func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
// if we are the owner and there is a create event, we most likely created it and do not need to
|
||||
// re-reconcile
|
||||
log.Debugln("received create event from owning an application")
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
log.Debugln("received delete event from owning an application")
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
log.Debugln("received update event from owning an application")
|
||||
appOld, isApp := e.ObjectOld.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
appNew, isApp := e.ObjectNew.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
|
||||
log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
log.Debugln("received generic event from owning an application")
|
||||
return true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRequeueApplicationSet determines when we want to requeue an ApplicationSet for reconciling based on an owned
|
||||
// application change
|
||||
// The applicationset controller owns a subset of the Application CR.
|
||||
// We do not need to re-reconcile if parts of the application change outside the applicationset's control.
|
||||
// An example being, Application.ApplicationStatus.ReconciledAt which gets updated by the application controller.
|
||||
// Additionally, Application.ObjectMeta.ResourceVersion and Application.ObjectMeta.Generation which are set by K8s.
|
||||
func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov1alpha1.Application, enableProgressiveSyncs bool) bool {
|
||||
if appOld == nil || appNew == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// the applicationset controller owns the application spec, labels, annotations, and finalizers on the applications
|
||||
if !reflect.DeepEqual(appOld.Spec, appNew.Spec) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// progressive syncs use the application status for updates. if they differ, requeue to trigger the next progression
|
||||
if enableProgressiveSyncs {
|
||||
if appOld.Status.Health.Status != appNew.Status.Health.Status || appOld.Status.Sync.Status != appNew.Status.Sync.Status {
|
||||
return true
|
||||
}
|
||||
|
||||
if appOld.Status.OperationState != nil && appNew.Status.OperationState != nil {
|
||||
if appOld.Status.OperationState.Phase != appNew.Status.OperationState.Phase ||
|
||||
appOld.Status.OperationState.StartedAt != appNew.Status.OperationState.StartedAt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -139,11 +139,7 @@ func nestedGeneratorHasClusterGenerator(nested argoprojiov1alpha1.ApplicationSet
|
||||
return false, fmt.Errorf("unable to get nested matrix generator: %w", err)
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
hasClusterGenerator, err := nestedGeneratorsHaveClusterGenerator(nestedMatrix.ToMatrixGenerator().Generators)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error evaluating nested matrix generator: %w", err)
|
||||
}
|
||||
return hasClusterGenerator, nil
|
||||
return nestedGeneratorsHaveClusterGenerator(nestedMatrix.ToMatrixGenerator().Generators)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,11 +149,7 @@ func nestedGeneratorHasClusterGenerator(nested argoprojiov1alpha1.ApplicationSet
|
||||
return false, fmt.Errorf("unable to get nested merge generator: %w", err)
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
hasClusterGenerator, err := nestedGeneratorsHaveClusterGenerator(nestedMerge.ToMergeGenerator().Generators)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error evaluating nested merge generator: %w", err)
|
||||
}
|
||||
return hasClusterGenerator, nil
|
||||
return nestedGeneratorsHaveClusterGenerator(nestedMerge.ToMergeGenerator().Generators)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -573,68 +573,3 @@ type mockAddRateLimitingInterface struct {
|
||||
errorOccurred bool
|
||||
addedItems []ctrl.Request
|
||||
}
|
||||
|
||||
func TestNestedGeneratorHasClusterGenerator_NestedClusterGenerator(t *testing.T) {
|
||||
nested := argov1alpha1.ApplicationSetNestedGenerator{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
}
|
||||
|
||||
hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, hasClusterGenerator)
|
||||
}
|
||||
|
||||
func TestNestedGeneratorHasClusterGenerator_NestedMergeGenerator(t *testing.T) {
|
||||
nested := argov1alpha1.ApplicationSetNestedGenerator{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"clusters": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"argocd.argoproj.io/secret-type": "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, hasClusterGenerator)
|
||||
}
|
||||
|
||||
func TestNestedGeneratorHasClusterGenerator_NestedMergeGeneratorWithInvalidJSON(t *testing.T) {
|
||||
nested := argov1alpha1.ApplicationSetNestedGenerator{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"clusters": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"argocd.argoproj.io/secret-type": "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
`,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.False(t, hasClusterGenerator)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -14,14 +17,10 @@ import (
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
mockServer := &mocks.Repos{}
|
||||
mockServer := argoCDServiceMock{}
|
||||
ctx := context.Background()
|
||||
scheme := runtime.NewScheme()
|
||||
err := argov1alpha1.AddToScheme(scheme)
|
||||
@@ -60,9 +59,9 @@ func TestRequeueAfter(t *testing.T) {
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
@@ -151,3 +150,30 @@ func TestRequeueAfter(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
foo: bar
|
||||
# Update foo value with foo: bar
|
||||
# Application engineering-prod-guestbook labels will still be baz
|
||||
# Delete this element
|
||||
# Application engineering-prod-guestbook will be kept
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
foo: baz
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster}}-guestbook'
|
||||
labels:
|
||||
foo: '{{.foo}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{.cluster}}
|
||||
destination:
|
||||
server: '{{.url}}'
|
||||
namespace: guestbook
|
||||
syncPolicy:
|
||||
applicationsSync: create-only
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
foo: bar
|
||||
# Update foo value with foo: bar
|
||||
# Application engineering-prod-guestbook labels will change to foo: bar
|
||||
# Delete this element
|
||||
# Application engineering-prod-guestbook will be kept
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
foo: baz
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster}}-guestbook'
|
||||
labels:
|
||||
foo: '{{.foo}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{.cluster}}
|
||||
destination:
|
||||
server: '{{.url}}'
|
||||
namespace: guestbook
|
||||
syncPolicy:
|
||||
applicationsSync: create-update
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: book-import
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusterDecisionResource:
|
||||
configMapRef: ocm-placement
|
||||
|
||||
@@ -8,7 +8,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
|
||||
@@ -27,7 +27,6 @@ metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
|
||||
@@ -38,7 +38,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
|
||||
@@ -51,7 +51,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
|
||||
@@ -5,7 +5,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
|
||||
@@ -8,7 +8,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
|
||||
@@ -5,7 +5,6 @@ metadata:
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
|
||||
@@ -5,7 +5,6 @@ metadata:
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
key:
|
||||
components:
|
||||
- name: component1
|
||||
chart: podinfo
|
||||
version: "6.3.2"
|
||||
releaseName: component1
|
||||
repoUrl: "https://stefanprodan.github.io/podinfo"
|
||||
namespace: component1
|
||||
- name: component2
|
||||
chart: podinfo
|
||||
version: "6.3.3"
|
||||
releaseName: component2
|
||||
repoUrl: "ghcr.io/stefanprodan/charts"
|
||||
namespace: component2
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
|
||||
@@ -8,7 +8,6 @@ metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
|
||||
@@ -8,7 +8,6 @@ metadata:
|
||||
name: list-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
|
||||
@@ -5,7 +5,6 @@ metadata:
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
|
||||
@@ -13,7 +13,6 @@ metadata:
|
||||
name: matrix-and-union-in-matrix
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: merge-clusters-and-list
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: merge-two-matrixes
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- pullRequest:
|
||||
github:
|
||||
@@ -24,8 +23,6 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ .branch }}-{{ .number }}'
|
||||
labels:
|
||||
key1: '{{ index .labels 0 }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
|
||||
@@ -4,7 +4,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- scmProvider:
|
||||
github:
|
||||
|
||||
@@ -8,7 +8,6 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
|
||||
@@ -61,7 +61,8 @@ func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.Appli
|
||||
return &appSetGenerator.Clusters.Template
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, appSet *argoappsetv1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
func (g *ClusterGenerator) GenerateParams(
|
||||
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, appSet *argoappsetv1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -108,7 +109,7 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
params["nameNormalized"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -148,7 +149,7 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
}
|
||||
}
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -161,6 +162,44 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func appendTemplatedValues(clusterValues map[string]string, params map[string]interface{}, appSet *argoappsetv1alpha1.ApplicationSet) error {
|
||||
// We create a local map to ensure that we do not fall victim to a billion-laughs attack. We iterate through the
|
||||
// cluster values map and only replace values in said map if it has already been whitelisted in the params map.
|
||||
// Once we iterate through all the cluster values we can then safely merge the `tmp` map into the main params map.
|
||||
tmp := map[string]interface{}{}
|
||||
|
||||
for key, value := range clusterValues {
|
||||
result, err := replaceTemplatedString(value, params, appSet)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
if tmp["values"] == nil {
|
||||
tmp["values"] = map[string]string{}
|
||||
}
|
||||
tmp["values"].(map[string]string)[key] = result
|
||||
} else {
|
||||
tmp[fmt.Sprintf("values.%s", key)] = result
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range tmp {
|
||||
params[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceTemplatedString(value string, params map[string]interface{}, appSet *argoappsetv1alpha1.ApplicationSet) (string, error) {
|
||||
replacedTmplStr, err := render.Replace(value, params, appSet.Spec.GoTemplate)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return replacedTmplStr, nil
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
// List all Clusters:
|
||||
clusterSecretList := &corev1.SecretList{}
|
||||
|
||||
@@ -3,7 +3,6 @@ package generators
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -16,6 +15,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
const resourceApiVersion = "mallard.io/v1"
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/jeremywohl/flatten"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/jeremywohl/flatten"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
@@ -32,7 +31,7 @@ func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, al
|
||||
// so that, among other things, we can match on cluster urls.
|
||||
selector, err := utils.LabelSelectorAsSelector(requestedGenerator.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing label selector: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := []TransformResult{}
|
||||
@@ -53,7 +52,7 @@ func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, al
|
||||
}
|
||||
var params []map[string]interface{}
|
||||
if len(genParams) != 0 {
|
||||
tempInterpolatedGenerator, err := InterpolateGenerator(&requestedGenerator, genParams, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
tempInterpolatedGenerator, err := InterpolateGenerator(&requestedGenerator, genParams, appSet.Spec.GoTemplate)
|
||||
interpolatedGenerator = &tempInterpolatedGenerator
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("genParams", genParams).
|
||||
@@ -148,9 +147,9 @@ func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.
|
||||
|
||||
// InterpolateGenerator allows interpolating the matrix's 2nd child generator with values from the 1st child generator
|
||||
// "params" parameter is an array, where each index corresponds to a generator. Each index contains a map w/ that generator's parameters.
|
||||
func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (argoprojiov1alpha1.ApplicationSetGenerator, error) {
|
||||
func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool) (argoprojiov1alpha1.ApplicationSetGenerator, error) {
|
||||
render := utils.Render{}
|
||||
interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate, goTemplateOptions)
|
||||
interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("interpolatedGenerator", interpolatedGenerator).Error("error interpolating generator with other generator's parameter")
|
||||
return *interpolatedGenerator, err
|
||||
@@ -158,16 +157,3 @@ func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetG
|
||||
|
||||
return *interpolatedGenerator, nil
|
||||
}
|
||||
|
||||
// Fixes https://github.com/argoproj/argo-cd/issues/11982 while ensuring backwards compatibility.
|
||||
// This is only a short-term solution and should be removed in a future major version.
|
||||
func dropDisabledNestedSelectors(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator) bool {
|
||||
var foundSelector bool
|
||||
for i := range generators {
|
||||
if generators[i].Selector != nil {
|
||||
foundSelector = true
|
||||
generators[i].Selector = nil
|
||||
}
|
||||
}
|
||||
return foundSelector
|
||||
}
|
||||
|
||||
@@ -10,8 +10,7 @@ import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -20,6 +19,8 @@ import (
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestMatchValues(t *testing.T) {
|
||||
@@ -70,18 +71,16 @@ func TestMatchValues(t *testing.T) {
|
||||
"List": listGenerator,
|
||||
}
|
||||
|
||||
applicationSetInfo := argov1alpha1.ApplicationSet{
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: false,
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{},
|
||||
}
|
||||
|
||||
results, err := Transform(argov1alpha1.ApplicationSetGenerator{
|
||||
results, err := Transform(argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Selector: testCase.selector,
|
||||
List: &argov1alpha1.ListGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: testCase.elements,
|
||||
Template: emptyTemplate(),
|
||||
}},
|
||||
@@ -341,9 +340,9 @@ func getMockClusterGenerator() Generator {
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
return gitGenerator
|
||||
}
|
||||
|
||||
@@ -358,8 +357,8 @@ func TestGetRelevantGenerators(t *testing.T) {
|
||||
testGenerators["Merge"] = NewMergeGenerator(testGenerators)
|
||||
testGenerators["List"] = NewListGenerator()
|
||||
|
||||
requestedGenerator := &argov1alpha1.ApplicationSetGenerator{
|
||||
List: &argov1alpha1.ListGenerator{
|
||||
requestedGenerator := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}},
|
||||
}}
|
||||
|
||||
@@ -367,10 +366,10 @@ func TestGetRelevantGenerators(t *testing.T) {
|
||||
assert.Len(t, relevantGenerators, 1)
|
||||
assert.IsType(t, &ListGenerator{}, relevantGenerators[0])
|
||||
|
||||
requestedGenerator = &argov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{},
|
||||
Template: argov1alpha1.ApplicationSetTemplate{},
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
Values: nil,
|
||||
},
|
||||
}
|
||||
@@ -379,14 +378,14 @@ func TestGetRelevantGenerators(t *testing.T) {
|
||||
assert.Len(t, relevantGenerators, 1)
|
||||
assert.IsType(t, &ClusterGenerator{}, relevantGenerators[0])
|
||||
|
||||
requestedGenerator = &argov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argov1alpha1.GitGenerator{
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "",
|
||||
Directories: nil,
|
||||
Files: nil,
|
||||
Revision: "",
|
||||
RequeueAfterSeconds: nil,
|
||||
Template: argov1alpha1.ApplicationSetTemplate{},
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -396,8 +395,8 @@ func TestGetRelevantGenerators(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInterpolateGenerator(t *testing.T) {
|
||||
requestedGenerator := &argov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{
|
||||
requestedGenerator := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -414,7 +413,7 @@ func TestInterpolateGenerator(t *testing.T) {
|
||||
"path[1]": "p2",
|
||||
"path.basenameNormalized": "app3",
|
||||
}
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, false, nil)
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, false)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
@@ -423,23 +422,23 @@ func TestInterpolateGenerator(t *testing.T) {
|
||||
assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"])
|
||||
assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"])
|
||||
|
||||
fileNamePath := argov1alpha1.GitFileGeneratorItem{
|
||||
fileNamePath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{name}}",
|
||||
}
|
||||
fileServerPath := argov1alpha1.GitFileGeneratorItem{
|
||||
fileServerPath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argov1alpha1.GitGenerator{
|
||||
Files: append([]argov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argov1alpha1.ApplicationSetTemplate{},
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}
|
||||
clusterGeneratorParams := map[string]interface{}{
|
||||
"name": "production_01/west", "server": "https://production-01.example.com",
|
||||
}
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, false, nil)
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, false)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
@@ -449,8 +448,8 @@ func TestInterpolateGenerator(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInterpolateGenerator_go(t *testing.T) {
|
||||
requestedGenerator := &argov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{
|
||||
requestedGenerator := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -467,7 +466,7 @@ func TestInterpolateGenerator_go(t *testing.T) {
|
||||
"segments": []string{"p1", "p2", "app3"},
|
||||
},
|
||||
}
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, true, nil)
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, true)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
@@ -477,23 +476,23 @@ func TestInterpolateGenerator_go(t *testing.T) {
|
||||
assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"])
|
||||
assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"])
|
||||
|
||||
fileNamePath := argov1alpha1.GitFileGeneratorItem{
|
||||
fileNamePath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.name}}",
|
||||
}
|
||||
fileServerPath := argov1alpha1.GitFileGeneratorItem{
|
||||
fileServerPath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argov1alpha1.GitGenerator{
|
||||
Files: append([]argov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argov1alpha1.ApplicationSetTemplate{},
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}
|
||||
clusterGeneratorParams := map[string]interface{}{
|
||||
"name": "production_01/west", "server": "https://production-01.example.com",
|
||||
}
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, true, nil)
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, true)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
|
||||
@@ -59,9 +59,9 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
var err error
|
||||
var res []map[string]interface{}
|
||||
if len(appSetGenerator.Git.Directories) != 0 {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else if len(appSetGenerator.Git.Files) != 0 {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
@@ -81,24 +81,20 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
"pathParamPrefix": appSetGenerator.Git.PathParamPrefix,
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
|
||||
res, err := g.generateParamsFromApps(requestedApps, appSetGenerator, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate params from apps: %w", err)
|
||||
}
|
||||
res := g.generateParamsFromApps(requestedApps, appSetGenerator, useGoTemplate)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
|
||||
// Get all files that match the requested path string, removing duplicates
|
||||
allFiles := make(map[string][]byte)
|
||||
@@ -125,17 +121,19 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], appSetGenerator.Git.Values, useGoTemplate, goTemplateOptions, appSetGenerator.Git.PathParamPrefix)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], useGoTemplate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
|
||||
res = append(res, paramsArray...)
|
||||
for index := range paramsArray {
|
||||
res = append(res, paramsArray[index])
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, values map[string]string, useGoTemplate bool, goTemplateOptions []string, pathParamPrefix string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
@@ -169,11 +167,7 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(paramPath["path"].(string)))
|
||||
paramPath["filenameNormalized"] = utils.SanitizeName(path.Base(paramPath["filename"].(string)))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
if pathParamPrefix != "" {
|
||||
params[pathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
params["path"] = paramPath
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
@@ -182,31 +176,23 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
pathParamName := "path"
|
||||
if pathParamPrefix != "" {
|
||||
pathParamName = pathParamPrefix + "." + pathParamName
|
||||
}
|
||||
params[pathParamName] = path.Dir(filePath)
|
||||
params[pathParamName+".basename"] = path.Base(params[pathParamName].(string))
|
||||
params[pathParamName+".filename"] = path.Base(filePath)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName].(string)))
|
||||
params[pathParamName+".filenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName+".filename"].(string)))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"].(string))
|
||||
params["path.filename"] = path.Base(filePath)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(params["path"].(string)))
|
||||
params["path.filenameNormalized"] = utils.SanitizeName(path.Base(params["path.filename"].(string)))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(values, params, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to append templated values: %w", err)
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
@@ -237,7 +223,9 @@ func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryG
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) []map[string]interface{} {
|
||||
// TODO: At some point, the applicationSetGenerator param should be used
|
||||
|
||||
res := make([]map[string]interface{}, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
@@ -249,33 +237,20 @@ func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGene
|
||||
paramPath["basename"] = path.Base(a)
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
params[appSetGenerator.Git.PathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
params["path"] = paramPath
|
||||
} else {
|
||||
pathParamName := "path"
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
pathParamName = appSetGenerator.Git.PathParamPrefix + "." + pathParamName
|
||||
}
|
||||
params[pathParamName] = a
|
||||
params[pathParamName+".basename"] = path.Base(a)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(appSetGenerator.Git.Values, params, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to append templated values: %w", err)
|
||||
}
|
||||
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res, nil
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -8,17 +8,23 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// type clientSet struct {
|
||||
// RepoServerServiceClient apiclient.RepoServerServiceClient
|
||||
// }
|
||||
|
||||
// func (c *clientSet) NewRepoServerClient() (io.Closer, apiclient.RepoServerServiceClient, error) {
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, false, nil, "")
|
||||
`), false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -36,35 +42,11 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFile(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, false, nil, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"myRepo.path": "path/dir",
|
||||
"myRepo.path.basename": "dir",
|
||||
"myRepo.path.filename": "file_name.yaml",
|
||||
"myRepo.path.basenameNormalized": "dir",
|
||||
"myRepo.path.filenameNormalized": "file-name.yaml",
|
||||
"myRepo.path[0]": "path",
|
||||
"myRepo.path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, true, nil, "")
|
||||
`), true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -88,48 +70,15 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, true, nil, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -148,24 +97,6 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{"myRepo.path": "app1", "myRepo.path.basename": "app1", "myRepo.path.basenameNormalized": "app1", "myRepo.path[0]": "app1"},
|
||||
{"myRepo.path": "app2", "myRepo.path.basename": "app2", "myRepo.path.basenameNormalized": "app2", "myRepo.path[0]": "app2"},
|
||||
{"myRepo.path": "app_3", "myRepo.path.basename": "app_3", "myRepo.path.basenameNormalized": "app-3", "myRepo.path[0]": "app_3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -218,25 +149,6 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Value variable interpolation",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"p1/app2",
|
||||
},
|
||||
repoError: nil,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
"aaa": "{{ path[0] }}",
|
||||
"no-op": "{{ this-does-not-exist }}",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{"values.foo": "bar", "values.no-op": "{{ this-does-not-exist }}", "values.aaa": "app1", "path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"},
|
||||
{"values.foo": "bar", "values.no-op": "{{ this-does-not-exist }}", "values.aaa": "p1", "path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path[1]": "app2", "path.basenameNormalized": "app2"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles empty response from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
@@ -261,11 +173,11 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -273,11 +185,9 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
Values: testCaseCopy.values,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -292,7 +202,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -300,13 +210,12 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -352,57 +261,6 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app1",
|
||||
"basename": "app1",
|
||||
"basenameNormalized": "app1",
|
||||
"segments": []string{
|
||||
"app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app2",
|
||||
"basename": "app2",
|
||||
"basenameNormalized": "app2",
|
||||
"segments": []string{
|
||||
"app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app_3",
|
||||
"basename": "app_3",
|
||||
"basenameNormalized": "app-3",
|
||||
"segments": []string{
|
||||
"app_3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -557,11 +415,11 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -570,10 +428,9 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
GoTemplate: true,
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -588,7 +445,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -604,7 +461,6 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
repoFileContents map[string][]byte
|
||||
// if repoPathsError is non-nil, the call to GetPaths(...) will return this error value
|
||||
repoPathsError error
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
@@ -668,74 +524,6 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Value variable interpolation",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
},
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}`),
|
||||
"cluster-config/staging/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "foo.bar@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
values: map[string]string{
|
||||
"aaa": "{{ cluster.owner }}",
|
||||
"no-op": "{{ this-does-not-exist }}",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
"values.aaa": "john.doe@example.com",
|
||||
"values.no-op": "{{ this-does-not-exist }}",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "staging",
|
||||
"path.basenameNormalized": "staging",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
"values.aaa": "foo.bar@example.com",
|
||||
"values.no-op": "{{ this-does-not-exist }}",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error during getting repo paths",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
@@ -917,11 +705,11 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -932,7 +720,6 @@ cluster:
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: testCaseCopy.files,
|
||||
Values: testCaseCopy.values,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -948,7 +735,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1267,11 +1054,11 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1298,7 +1085,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -75,16 +73,5 @@ func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Appli
|
||||
}
|
||||
}
|
||||
|
||||
// Append elements from ElementsYaml to the response
|
||||
if len(appSetGenerator.List.ElementsYaml) > 0 {
|
||||
|
||||
var yamlElements []map[string]interface{}
|
||||
err := yaml.Unmarshal([]byte(appSetGenerator.List.ElementsYaml), &yamlElements)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling decoded ElementsYaml %v", err)
|
||||
}
|
||||
res = append(res, yamlElements...)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ Generator = (*MatrixGenerator)(nil)
|
||||
@@ -86,22 +84,10 @@ func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Appli
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if matrixGen != nil && !appSet.Spec.ApplyNestedSelectors {
|
||||
foundSelector := dropDisabledNestedSelectors(matrixGen.Generators)
|
||||
if foundSelector {
|
||||
log.Warnf("AppSet '%v' defines selector on nested matrix generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selectors", appSet.Name)
|
||||
}
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mergeGen != nil && !appSet.Spec.ApplyNestedSelectors {
|
||||
foundSelector := dropDisabledNestedSelectors(mergeGen.Generators)
|
||||
if foundSelector {
|
||||
log.Warnf("AppSet '%v' defines selector on nested merge generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selectors", appSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
@@ -111,7 +97,6 @@ func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Appli
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Plugin: appSetBaseGenerator.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
@@ -150,7 +135,6 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
|
||||
@@ -13,12 +13,11 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -31,7 +30,7 @@ func TestMatrixGenerate(t *testing.T) {
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url", "templated": "test-{{path.basenameNormalized}}"}`)}},
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -51,8 +50,8 @@ func TestMatrixGenerate(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "cluster": "Cluster", "url": "Url", "templated": "test-app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "cluster": "Cluster", "url": "Url", "templated": "test-app2"},
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "cluster": "Cluster", "url": "Url"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "cluster": "Cluster", "url": "Url"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -838,172 +837,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "config.yaml"},
|
||||
},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{},
|
||||
ElementsYaml: "{{ .foo.bar | toJson }}",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
expectedErr error
|
||||
expected []map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "happy flow - generate params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorMock{}
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
}}, nil)
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": genMock,
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.ErrorIs(t, err, testCaseCopy.expectedErr)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
@@ -1053,8 +886,8 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
repoServiceMock := &mocks.Repos{}
|
||||
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
repoServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
repoServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock)
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ Generator = (*MergeGenerator)(nil)
|
||||
@@ -143,22 +141,10 @@ func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Applic
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if matrixGen != nil && !appSet.Spec.ApplyNestedSelectors {
|
||||
foundSelector := dropDisabledNestedSelectors(matrixGen.Generators)
|
||||
if foundSelector {
|
||||
log.Warnf("AppSet '%v' defines selector on nested matrix generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selector", appSet.Name)
|
||||
}
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mergeGen != nil && !appSet.Spec.ApplyNestedSelectors {
|
||||
foundSelector := dropDisabledNestedSelectors(mergeGen.Generators)
|
||||
if foundSelector {
|
||||
log.Warnf("AppSet '%v' defines selector on nested merge generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selector", appSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
@@ -168,7 +154,6 @@ func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Applic
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Plugin: appSetBaseGenerator.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
@@ -205,7 +190,6 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
|
||||
@@ -1,211 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jeremywohl/flatten"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPluginRequeueAfterSeconds = 30 * time.Minute
|
||||
)
|
||||
|
||||
var _ Generator = (*PluginGenerator)(nil)
|
||||
|
||||
type PluginGenerator struct {
|
||||
client client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewPluginGenerator(client client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
|
||||
g := &PluginGenerator{
|
||||
client: client,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
// Return a requeue default of 30 minutes, if no default is specified.
|
||||
|
||||
if appSetGenerator.Plugin.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.Plugin.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultPluginRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Plugin.Template
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Plugin == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
providerConfig := appSetGenerator.Plugin
|
||||
|
||||
pluginClient, err := g.getPluginFromGenerator(ctx, applicationSetInfo.Name, providerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, err := pluginClient.List(ctx, providerConfig.Input.Parameters)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing params: %w", err)
|
||||
}
|
||||
|
||||
res, err := g.generateParams(appSetGenerator, applicationSetInfo, list.Output.Parameters, appSetGenerator.Plugin.Input.Parameters, applicationSetInfo.Spec.GoTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) getPluginFromGenerator(ctx context.Context, appSetName string, generatorConfig *argoprojiov1alpha1.PluginGenerator) (*plugin.Service, error) {
|
||||
cm, err := g.getConfigMap(ctx, generatorConfig.ConfigMapRef.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching ConfigMap: %w", err)
|
||||
}
|
||||
token, err := g.getToken(ctx, cm["token"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
|
||||
var requestTimeout int
|
||||
requestTimeoutStr, ok := cm["requestTimeout"]
|
||||
if ok {
|
||||
requestTimeout, err = strconv.Atoi(requestTimeoutStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error set requestTimeout : %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pluginClient, err := plugin.NewPluginService(ctx, appSetName, cm["baseUrl"], token, requestTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pluginClient, nil
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) generateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet, objectsFound []map[string]interface{}, pluginParams argoprojiov1alpha1.PluginParameters, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
res := []map[string]interface{}{}
|
||||
|
||||
for _, objectFound := range objectsFound {
|
||||
|
||||
params := map[string]interface{}{}
|
||||
|
||||
if useGoTemplate {
|
||||
for k, v := range objectFound {
|
||||
params[k] = v
|
||||
}
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
|
||||
params["generator"] = map[string]interface{}{
|
||||
"input": map[string]argoprojiov1alpha1.PluginParameters{
|
||||
"parameters": pluginParams,
|
||||
},
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(appSetGenerator.Plugin.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) getToken(ctx context.Context, tokenRef string) (string, error) {
|
||||
|
||||
if tokenRef == "" || !strings.HasPrefix(tokenRef, "$") {
|
||||
return "", fmt.Errorf("token is empty, or does not reference a secret key starting with '$': %v", tokenRef)
|
||||
}
|
||||
|
||||
secretName, tokenKey := plugin.ParseSecretKey(tokenRef)
|
||||
|
||||
secret := &corev1.Secret{}
|
||||
err := g.client.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: secretName,
|
||||
Namespace: g.namespace,
|
||||
},
|
||||
secret)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error fetching secret %s/%s: %v", g.namespace, secretName, err)
|
||||
}
|
||||
|
||||
secretValues := make(map[string]string, len(secret.Data))
|
||||
|
||||
for k, v := range secret.Data {
|
||||
secretValues[k] = string(v)
|
||||
}
|
||||
|
||||
token := settings.ReplaceStringSecret(tokenKey, secretValues)
|
||||
|
||||
return token, err
|
||||
}
|
||||
|
||||
func (g *PluginGenerator) getConfigMap(ctx context.Context, configMapRef string) (map[string]string, error) {
|
||||
cm := &corev1.ConfigMap{}
|
||||
err := g.client.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: configMapRef,
|
||||
Namespace: g.namespace,
|
||||
},
|
||||
cm)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseUrl, ok := cm.Data["baseUrl"]
|
||||
if !ok || baseUrl == "" {
|
||||
return nil, fmt.Errorf("baseUrl not found in ConfigMap")
|
||||
}
|
||||
|
||||
token, ok := cm.Data["token"]
|
||||
if !ok || token == "" {
|
||||
return nil, fmt.Errorf("token not found in ConfigMap")
|
||||
}
|
||||
|
||||
return cm.Data, nil
|
||||
}
|
||||
@@ -1,705 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/plugin"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestPluginGenerateParams(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
configmap *v1.ConfigMap
|
||||
secret *v1.Secret
|
||||
inputParameters map[string]apiextensionsv1.JSON
|
||||
values map[string]string
|
||||
gotemplate bool
|
||||
expected []map[string]interface{}
|
||||
content []byte
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "simple case",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simple case with values",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
values: map[string]string{
|
||||
"valuekey1": "valuevalue1",
|
||||
"valuekey2": "templated-{{key1}}",
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"values.valuekey1": "valuevalue1",
|
||||
"values.valuekey2": "templated-val1",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simple case with gotemplate",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: true,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2": map[string]interface{}{
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": map[string]interface{}{
|
||||
"key2_2_1": "val2_2_1",
|
||||
},
|
||||
},
|
||||
"key3": float64(123),
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simple case with appended params",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123,
|
||||
"pkey2": "valplugin"
|
||||
}]}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"pkey2": "valplugin",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "no params",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: argoprojiov1alpha1.PluginParameters{},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": map[string]map[string]interface{}{
|
||||
"parameters": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "empty return",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"input": {"parameters": []}}`),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "wrong return",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{},
|
||||
gotemplate: false,
|
||||
content: []byte(`wrong body ...`),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("error listing params: error get api 'set': invalid character 'w' looking for beginning of value: wrong body ..."),
|
||||
},
|
||||
{
|
||||
name: "external secret",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin-secret:plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "plugin-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123,
|
||||
"pkey2": "valplugin"
|
||||
}]}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"pkey2": "valplugin",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "no secret",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching Secret token: error fetching secret default/argocd-secret: secrets \"argocd-secret\" not found"),
|
||||
},
|
||||
{
|
||||
name: "no configmap",
|
||||
configmap: &v1.ConfigMap{},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: configmaps \"\" not found"),
|
||||
},
|
||||
{
|
||||
name: "no baseUrl",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"token": "$plugin.token",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"plugin.token": []byte("my-secret"),
|
||||
},
|
||||
},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: baseUrl not found in ConfigMap"),
|
||||
},
|
||||
{
|
||||
name: "no token",
|
||||
configmap: &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "first-plugin-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"baseUrl": "http://127.0.0.1",
|
||||
},
|
||||
},
|
||||
secret: &v1.Secret{},
|
||||
inputParameters: map[string]apiextensionsv1.JSON{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
gotemplate: false,
|
||||
content: []byte(`{"output": {
|
||||
"parameters": [{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]
|
||||
}}`),
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"generator": map[string]interface{}{
|
||||
"input": argoprojiov1alpha1.PluginInput{
|
||||
Parameters: argoprojiov1alpha1.PluginParameters{
|
||||
"pkey1": {Raw: []byte(`"val1"`)},
|
||||
"pkey2": {Raw: []byte(`"val2"`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: token not found in ConfigMap"),
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Plugin: &argoprojiov1alpha1.PluginGenerator{
|
||||
ConfigMapRef: argoprojiov1alpha1.PluginConfigMapRef{Name: testCase.configmap.Name},
|
||||
Input: argoprojiov1alpha1.PluginInput{
|
||||
Parameters: testCase.inputParameters,
|
||||
},
|
||||
Values: testCase.values,
|
||||
},
|
||||
}
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
_, tokenKey := plugin.ParseSecretKey(testCase.configmap.Data["token"])
|
||||
expectedToken := testCase.secret.Data[strings.Replace(tokenKey, "$", "", -1)]
|
||||
if authHeader != "Bearer "+string(expectedToken) {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err := w.Write(testCase.content)
|
||||
if err != nil {
|
||||
assert.NoError(t, fmt.Errorf("Error Write %v", err))
|
||||
}
|
||||
})
|
||||
|
||||
fakeServer := httptest.NewServer(handler)
|
||||
|
||||
defer fakeServer.Close()
|
||||
|
||||
if _, ok := testCase.configmap.Data["baseUrl"]; ok {
|
||||
testCase.configmap.Data["baseUrl"] = fakeServer.URL
|
||||
}
|
||||
|
||||
fakeClient := kubefake.NewSimpleClientset(append([]runtime.Object{}, testCase.configmap, testCase.secret)...)
|
||||
|
||||
fakeClientWithCache := fake.NewClientBuilder().WithObjects([]client.Object{testCase.configmap, testCase.secret}...).Build()
|
||||
|
||||
var pluginGenerator = NewPluginGenerator(fakeClientWithCache, ctx, fakeClient, "default")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: testCase.gotemplate,
|
||||
},
|
||||
}
|
||||
|
||||
got, err := pluginGenerator.GenerateParams(&generatorConfig, &applicationSetInfo)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
expectedJson, err := json.Marshal(testCase.expected)
|
||||
require.NoError(t, err)
|
||||
gotJson, err := json.Marshal(got)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expectedJson), string(gotJson))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/gosimple/slug"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/pull_request"
|
||||
pullrequest "github.com/argoproj/argo-cd/v2/applicationset/services/pull_request"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -25,16 +26,12 @@ type PullRequestGenerator struct {
|
||||
client client.Client
|
||||
selectServiceProviderFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
auth SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
}
|
||||
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string) Generator {
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders) Generator {
|
||||
g := &PullRequestGenerator{
|
||||
client: client,
|
||||
auth: auth,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedScmProviders,
|
||||
client: client,
|
||||
auth: auth,
|
||||
}
|
||||
g.selectServiceProviderFunc = g.selectServiceProvider
|
||||
return g
|
||||
@@ -69,7 +66,7 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("failed to select pull request service provider: %v", err)
|
||||
}
|
||||
|
||||
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
pulls, err := pull_request.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing repos: %v", err)
|
||||
}
|
||||
@@ -87,34 +84,19 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, pull := range pulls {
|
||||
shortSHALength = 8
|
||||
if len(pull.HeadSHA) < 8 {
|
||||
shortSHALength = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
shortSHALength7 = 7
|
||||
if len(pull.HeadSHA) < 7 {
|
||||
shortSHALength7 = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
paramMap := map[string]interface{}{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
"target_branch": pull.TargetBranch,
|
||||
"target_branch_slug": slug.Make(pull.TargetBranch),
|
||||
"head_sha": pull.HeadSHA,
|
||||
"head_short_sha": pull.HeadSHA[:shortSHALength],
|
||||
"head_short_sha_7": pull.HeadSHA[:shortSHALength7],
|
||||
}
|
||||
|
||||
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
|
||||
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
|
||||
paramMap["labels"] = pull.Labels
|
||||
}
|
||||
params = append(params, paramMap)
|
||||
params = append(params, map[string]interface{}{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
"head_sha": pull.HeadSHA,
|
||||
"head_short_sha": pull.HeadSHA[:shortSHALength],
|
||||
})
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
@@ -122,27 +104,18 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
// selectServiceProvider selects the provider to get pull requests from the configuration
|
||||
func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
if generatorConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, generatorConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Github.API)
|
||||
}
|
||||
return g.github(ctx, generatorConfig.Github, applicationSetInfo)
|
||||
}
|
||||
if generatorConfig.GitLab != nil {
|
||||
providerConfig := generatorConfig.GitLab
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewGitLabService(ctx, token, providerConfig.API, providerConfig.Project, providerConfig.Labels, providerConfig.PullRequestState, g.scmRootCAPath, providerConfig.Insecure)
|
||||
return pullrequest.NewGitLabService(ctx, token, providerConfig.API, providerConfig.Project, providerConfig.Labels, providerConfig.PullRequestState)
|
||||
}
|
||||
if generatorConfig.Gitea != nil {
|
||||
providerConfig := generatorConfig.Gitea
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
@@ -151,9 +124,6 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.BitbucketServer != nil {
|
||||
providerConfig := generatorConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
@@ -164,32 +134,6 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
return pullrequest.NewBitbucketServiceNoAuth(ctx, providerConfig.API, providerConfig.Project, providerConfig.Repo)
|
||||
}
|
||||
}
|
||||
if generatorConfig.Bitbucket != nil {
|
||||
providerConfig := generatorConfig.Bitbucket
|
||||
if providerConfig.BearerToken != nil {
|
||||
appToken, err := g.getSecretRef(ctx, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %v", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketCloudServiceBearerToken(providerConfig.API, appToken, providerConfig.Owner, providerConfig.Repo)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketCloudServiceBasicAuth(providerConfig.API, providerConfig.BasicAuth.Username, password, providerConfig.Owner, providerConfig.Repo)
|
||||
} else {
|
||||
return pullrequest.NewBitbucketCloudServiceNoAuth(providerConfig.API, providerConfig.Owner, providerConfig.Repo)
|
||||
}
|
||||
}
|
||||
if generatorConfig.AzureDevOps != nil {
|
||||
providerConfig := generatorConfig.AzureDevOps
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewAzureDevOpsService(ctx, token, providerConfig.API, providerConfig.Organization, providerConfig.Project, providerConfig.Repo, providerConfig.Labels)
|
||||
}
|
||||
return nil, fmt.Errorf("no Pull Request provider implementation configured")
|
||||
}
|
||||
|
||||
|
||||
@@ -17,21 +17,19 @@ import (
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
@@ -39,14 +37,11 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"target_branch": "master",
|
||||
"target_branch_slug": "master",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
"head_short_sha_7": "089d92c",
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -56,11 +51,10 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 2,
|
||||
Branch: "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
TargetBranch: "feat/anotherreally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
HeadSHA: "9b34ff5bd418e57d58891eb0aa0728043ca1e8be",
|
||||
&pullrequest.PullRequest{
|
||||
Number: 2,
|
||||
Branch: "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
HeadSHA: "9b34ff5bd418e57d58891eb0aa0728043ca1e8be",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
@@ -68,14 +62,11 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "2",
|
||||
"branch": "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
"branch_slug": "feat-areally-long-pull-request-name-to-test-argo",
|
||||
"target_branch": "feat/anotherreally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
"target_branch_slug": "feat-anotherreally-long-pull-request-name-to-test",
|
||||
"head_sha": "9b34ff5bd418e57d58891eb0aa0728043ca1e8be",
|
||||
"head_short_sha": "9b34ff5b",
|
||||
"head_short_sha_7": "9b34ff5",
|
||||
"number": "2",
|
||||
"branch": "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
"branch_slug": "feat-areally-long-pull-request-name-to-test-argo",
|
||||
"head_sha": "9b34ff5bd418e57d58891eb0aa0728043ca1e8be",
|
||||
"head_short_sha": "9b34ff5b",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -85,11 +76,10 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "a-very-short-sha",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "abcd",
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "a-very-short-sha",
|
||||
HeadSHA: "abcd",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
@@ -97,14 +87,11 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "a-very-short-sha",
|
||||
"branch_slug": "a-very-short-sha",
|
||||
"target_branch": "master",
|
||||
"target_branch_slug": "master",
|
||||
"head_sha": "abcd",
|
||||
"head_short_sha": "abcd",
|
||||
"head_short_sha_7": "abcd",
|
||||
"number": "1",
|
||||
"branch": "a-very-short-sha",
|
||||
"branch_slug": "a-very-short-sha",
|
||||
"head_sha": "abcd",
|
||||
"head_short_sha": "abcd",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -120,79 +107,6 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
expected: nil,
|
||||
expectedErr: fmt.Errorf("error listing repos: fake error"),
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"target_branch": "master",
|
||||
"target_branch_slug": "master",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
"head_short_sha_7": "089d92c",
|
||||
"labels": []string{"preview"},
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using Go Template.
|
||||
GoTemplate: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"target_branch": "master",
|
||||
"target_branch_slug": "master",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
"head_short_sha_7": "089d92c",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using fasttemplate.
|
||||
GoTemplate: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -203,7 +117,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{},
|
||||
}
|
||||
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, &c.applicationSet)
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, nil)
|
||||
assert.Equal(t, c.expectedErr, gotErr)
|
||||
assert.ElementsMatch(t, c.expected, got)
|
||||
}
|
||||
@@ -273,80 +187,3 @@ func TestPullRequestGetSecretRef(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.PullRequestGenerator
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
GitLab: &argoprojiov1alpha1.PullRequestGeneratorGitLab{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Gitea: &argoprojiov1alpha1.PullRequestGeneratorGitea{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
BitbucketServer: &argoprojiov1alpha1.PullRequestGeneratorBitbucketServer{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pullRequestGenerator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
})
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
PullRequest: testCaseCopy.providerConfig,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := pullRequestGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,9 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -29,20 +26,16 @@ type SCMProviderGenerator struct {
|
||||
// Testing hooks.
|
||||
overrideProvider scm_provider.SCMProviderService
|
||||
SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
}
|
||||
|
||||
type SCMAuthProviders struct {
|
||||
GitHubApps github_app_auth.Credentials
|
||||
}
|
||||
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string) Generator {
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders) Generator {
|
||||
return &SCMProviderGenerator{
|
||||
client: client,
|
||||
SCMAuthProviders: providers,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedSCMProviders,
|
||||
client: client,
|
||||
SCMAuthProviders: providers,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,26 +58,6 @@ func (g *SCMProviderGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.A
|
||||
return &appSetGenerator.SCMProvider.Template
|
||||
}
|
||||
|
||||
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, url string, allowedScmProviders []string) bool {
|
||||
if url == "" || len(allowedScmProviders) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, allowedScmProvider := range allowedScmProviders {
|
||||
if url == allowedScmProvider {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
common.SecurityField: common.SecurityMedium,
|
||||
"applicationset": applicationSetInfo.Name,
|
||||
"appSetNamespace": applicationSetInfo.Namespace,
|
||||
}).Debugf("attempted to use disallowed SCM %q", url)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -102,30 +75,21 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if g.overrideProvider != nil {
|
||||
provider = g.overrideProvider
|
||||
} else if providerConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Github.API)
|
||||
}
|
||||
var err error
|
||||
provider, err = g.githubProvider(ctx, providerConfig.Github, applicationSetInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scm provider: %w", err)
|
||||
}
|
||||
} else if providerConfig.Gitlab != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitlab.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitlab.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitlab.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitlab token: %v", err)
|
||||
}
|
||||
provider, err = scm_provider.NewGitlabProvider(ctx, providerConfig.Gitlab.Group, token, providerConfig.Gitlab.API, providerConfig.Gitlab.AllBranches, providerConfig.Gitlab.IncludeSubgroups, providerConfig.Gitlab.Insecure, g.scmRootCAPath)
|
||||
provider, err = scm_provider.NewGitlabProvider(ctx, providerConfig.Gitlab.Group, token, providerConfig.Gitlab.API, providerConfig.Gitlab.AllBranches, providerConfig.Gitlab.IncludeSubgroups)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Gitlab service: %v", err)
|
||||
}
|
||||
} else if providerConfig.Gitea != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitea.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitea token: %v", err)
|
||||
@@ -136,9 +100,6 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
} else if providerConfig.BitbucketServer != nil {
|
||||
providerConfig := providerConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
var scmError error
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
@@ -153,9 +114,6 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Bitbucket Server service: %v", scmError)
|
||||
}
|
||||
} else if providerConfig.AzureDevOps != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.AzureDevOps.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.AzureDevOps.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Azure Devops access token: %v", err)
|
||||
@@ -173,12 +131,6 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Bitbucket cloud service: %v", err)
|
||||
}
|
||||
} else if providerConfig.AWSCodeCommit != nil {
|
||||
var awsErr error
|
||||
provider, awsErr = scm_provider.NewAWSCodeCommitProvider(ctx, providerConfig.AWSCodeCommit.TagFilters, providerConfig.AWSCodeCommit.Role, providerConfig.AWSCodeCommit.Region, providerConfig.AWSCodeCommit.AllBranches)
|
||||
if awsErr != nil {
|
||||
return nil, fmt.Errorf("error initializing AWS codecommit service: %v", awsErr)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("no SCM provider implementation configured")
|
||||
}
|
||||
@@ -188,40 +140,26 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing repos: %v", err)
|
||||
}
|
||||
paramsArray := make([]map[string]interface{}, 0, len(repos))
|
||||
params := make([]map[string]interface{}, 0, len(repos))
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, repo := range repos {
|
||||
shortSHALength = 8
|
||||
if len(repo.SHA) < 8 {
|
||||
shortSHALength = len(repo.SHA)
|
||||
}
|
||||
|
||||
shortSHALength7 = 7
|
||||
if len(repo.SHA) < 7 {
|
||||
shortSHALength7 = len(repo.SHA)
|
||||
}
|
||||
|
||||
params := map[string]interface{}{
|
||||
params = append(params, map[string]interface{}{
|
||||
"organization": repo.Organization,
|
||||
"repository": repo.Repository,
|
||||
"url": repo.URL,
|
||||
"branch": repo.Branch,
|
||||
"sha": repo.SHA,
|
||||
"short_sha": repo.SHA[:shortSHALength],
|
||||
"short_sha_7": repo.SHA[:shortSHALength7],
|
||||
"labels": strings.Join(repo.Labels, ","),
|
||||
"branchNormalized": utils.SanitizeName(repo.Branch),
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(appSetGenerator.SCMProvider.Values, params, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to append templated values: %w", err)
|
||||
}
|
||||
|
||||
paramsArray = append(paramsArray, params)
|
||||
})
|
||||
}
|
||||
return paramsArray, nil
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func (g *SCMProviderGenerator) getSecretRef(ctx context.Context, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) {
|
||||
|
||||
@@ -80,209 +80,38 @@ func TestSCMProviderGetSecretRef(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSCMProviderGenerateParams(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
repos []*scm_provider.Repository
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Multiple repos with labels",
|
||||
repos: []*scm_provider.Repository{
|
||||
{
|
||||
Organization: "myorg",
|
||||
Repository: "repo1",
|
||||
URL: "git@github.com:myorg/repo1.git",
|
||||
Branch: "main",
|
||||
SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
Labels: []string{"prod", "staging"},
|
||||
},
|
||||
{
|
||||
Organization: "myorg",
|
||||
Repository: "repo2",
|
||||
URL: "git@github.com:myorg/repo2.git",
|
||||
Branch: "main",
|
||||
SHA: "59d0",
|
||||
},
|
||||
mockProvider := &scm_provider.MockProvider{
|
||||
Repos: []*scm_provider.Repository{
|
||||
{
|
||||
Organization: "myorg",
|
||||
Repository: "repo1",
|
||||
URL: "git@github.com:myorg/repo1.git",
|
||||
Branch: "main",
|
||||
SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
Labels: []string{"prod", "staging"},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"organization": "myorg",
|
||||
"repository": "repo1",
|
||||
"url": "git@github.com:myorg/repo1.git",
|
||||
"branch": "main",
|
||||
"branchNormalized": "main",
|
||||
"sha": "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
"short_sha": "0bc57212",
|
||||
"short_sha_7": "0bc5721",
|
||||
"labels": "prod,staging",
|
||||
},
|
||||
{
|
||||
"organization": "myorg",
|
||||
"repository": "repo2",
|
||||
"url": "git@github.com:myorg/repo2.git",
|
||||
"branch": "main",
|
||||
"branchNormalized": "main",
|
||||
"sha": "59d0",
|
||||
"short_sha": "59d0",
|
||||
"short_sha_7": "59d0",
|
||||
"labels": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Value interpolation",
|
||||
repos: []*scm_provider.Repository{
|
||||
{
|
||||
Organization: "myorg",
|
||||
Repository: "repo3",
|
||||
URL: "git@github.com:myorg/repo3.git",
|
||||
Branch: "main",
|
||||
SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
Labels: []string{"prod", "staging"},
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
"should_i_force_push_to": "{{ branch }}?",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"organization": "myorg",
|
||||
"repository": "repo3",
|
||||
"url": "git@github.com:myorg/repo3.git",
|
||||
"branch": "main",
|
||||
"branchNormalized": "main",
|
||||
"sha": "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
"short_sha": "0bc57212",
|
||||
"short_sha_7": "0bc5721",
|
||||
"labels": "prod,staging",
|
||||
"values.foo": "bar",
|
||||
"values.should_i_force_push_to": "main?",
|
||||
},
|
||||
{
|
||||
Organization: "myorg",
|
||||
Repository: "repo2",
|
||||
URL: "git@github.com:myorg/repo2.git",
|
||||
Branch: "main",
|
||||
SHA: "59d0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
mockProvider := &scm_provider.MockProvider{
|
||||
Repos: testCaseCopy.repos,
|
||||
}
|
||||
scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider}
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Values: testCaseCopy.values,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedSCMProvider(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.SCMProviderGenerator
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Gitlab: &argoprojiov1alpha1.SCMProviderGeneratorGitlab{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Gitea: &argoprojiov1alpha1.SCMProviderGeneratorGitea{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
BitbucketServer: &argoprojiov1alpha1.SCMProviderGeneratorBitbucketServer{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error AzureDevops",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
AzureDevOps: &argoprojiov1alpha1.SCMProviderGeneratorAzureDevOps{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scmGenerator := &SCMProviderGenerator{allowedSCMProviders: []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
}}
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
SCMProvider: testCaseCopy.providerConfig,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
gen := &SCMProviderGenerator{overrideProvider: mockProvider}
|
||||
params, err := gen.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{},
|
||||
}, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, params, 2)
|
||||
assert.Equal(t, "myorg", params[0]["organization"])
|
||||
assert.Equal(t, "repo1", params[0]["repository"])
|
||||
assert.Equal(t, "git@github.com:myorg/repo1.git", params[0]["url"])
|
||||
assert.Equal(t, "main", params[0]["branch"])
|
||||
assert.Equal(t, "0bc57212c3cbbec69d20b34c507284bd300def5b", params[0]["sha"])
|
||||
assert.Equal(t, "0bc57212", params[0]["short_sha"])
|
||||
assert.Equal(t, "59d0", params[1]["short_sha"])
|
||||
assert.Equal(t, "prod,staging", params[0]["labels"])
|
||||
assert.Equal(t, "repo2", params[1]["repository"])
|
||||
}
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func appendTemplatedValues(values map[string]string, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) error {
|
||||
// We create a local map to ensure that we do not fall victim to a billion-laughs attack. We iterate through the
|
||||
// cluster values map and only replace values in said map if it has already been allowlisted in the params map.
|
||||
// Once we iterate through all the cluster values we can then safely merge the `tmp` map into the main params map.
|
||||
tmp := map[string]interface{}{}
|
||||
|
||||
for key, value := range values {
|
||||
result, err := replaceTemplatedString(value, params, useGoTemplate, goTemplateOptions)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to replace templated string: %w", err)
|
||||
}
|
||||
|
||||
if useGoTemplate {
|
||||
if tmp["values"] == nil {
|
||||
tmp["values"] = map[string]string{}
|
||||
}
|
||||
tmp["values"].(map[string]string)[key] = result
|
||||
} else {
|
||||
tmp[fmt.Sprintf("values.%s", key)] = result
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range tmp {
|
||||
params[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceTemplatedString(value string, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error) {
|
||||
replacedTmplStr, err := render.Replace(value, params, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to replace templated string with rendered values: %w", err)
|
||||
}
|
||||
return replacedTmplStr, nil
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValueInterpolation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
params map[string]interface{}
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "Simple interpolation",
|
||||
values: map[string]string{
|
||||
"hello": "{{ world }}",
|
||||
},
|
||||
params: map[string]interface{}{
|
||||
"world": "world!",
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"world": "world!",
|
||||
"values.hello": "world!",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent",
|
||||
values: map[string]string{
|
||||
"non-existent": "{{ non-existent }}",
|
||||
},
|
||||
params: map[string]interface{}{},
|
||||
expected: map[string]interface{}{
|
||||
"values.non-existent": "{{ non-existent }}",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Billion laughs",
|
||||
values: map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "{{values.lol1}}{{values.lol1}}",
|
||||
"lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}",
|
||||
},
|
||||
params: map[string]interface{}{},
|
||||
expected: map[string]interface{}{
|
||||
"values.lol1": "lol",
|
||||
"values.lol2": "{{values.lol1}}{{values.lol1}}",
|
||||
"values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := appendTemplatedValues(testCase.values, testCase.params, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, testCase.expected, testCase.params)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueInterpolationWithGoTemplating(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
params map[string]interface{}
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "Simple interpolation",
|
||||
values: map[string]string{
|
||||
"hello": "{{ .world }}",
|
||||
},
|
||||
params: map[string]interface{}{
|
||||
"world": "world!",
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"world": "world!",
|
||||
"values": map[string]string{
|
||||
"hello": "world!",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent to default",
|
||||
values: map[string]string{
|
||||
"non_existent": "{{ default \"bar\" .non_existent }}",
|
||||
},
|
||||
params: map[string]interface{}{},
|
||||
expected: map[string]interface{}{
|
||||
"values": map[string]string{
|
||||
"non_existent": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Billion laughs",
|
||||
values: map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "{{.values.lol1}}{{.values.lol1}}",
|
||||
"lol3": "{{.values.lol2}}{{.values.lol2}}{{.values.lol2}}",
|
||||
},
|
||||
params: map[string]interface{}{},
|
||||
expected: map[string]interface{}{
|
||||
"values": map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "<no value><no value>",
|
||||
"lol3": "<no value><no value><no value>",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := appendTemplatedValues(testCase.values, testCase.params, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, testCase.expected, testCase.params)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -20,12 +20,10 @@ func Client(g github_app_auth.Authentication, url string) (*github.Client, error
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
var client *github.Client
|
||||
httpClient := http.Client{Transport: rt}
|
||||
if url == "" {
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client = github.NewClient(&httpClient)
|
||||
} else {
|
||||
rt.BaseURL = url
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client, err = github.NewEnterpriseClient(url, url, &httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github enterprise client: %w", err)
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
userAgent = "argocd-applicationset"
|
||||
defaultTimeout = 30
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
// URL is the URL used for API requests.
|
||||
baseURL string
|
||||
|
||||
// UserAgent is the user agent to include in HTTP requests.
|
||||
UserAgent string
|
||||
|
||||
// Token is used to make authenticated API calls.
|
||||
token string
|
||||
|
||||
// Client is an HTTP client used to communicate with the API.
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Body []byte
|
||||
Response *http.Response
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewClient(baseURL string, options ...ClientOptionFunc) (*Client, error) {
|
||||
client, err := newClient(baseURL, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func newClient(baseURL string, options ...ClientOptionFunc) (*Client, error) {
|
||||
c := &Client{baseURL: baseURL, UserAgent: userAgent}
|
||||
|
||||
// Configure the HTTP client.
|
||||
c.client = &http.Client{
|
||||
Timeout: time.Duration(defaultTimeout) * time.Second,
|
||||
}
|
||||
|
||||
// Apply any given client options.
|
||||
for _, fn := range options {
|
||||
if fn == nil {
|
||||
continue
|
||||
}
|
||||
if err := fn(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Client) NewRequest(method, path string, body interface{}, options []ClientOptionFunc) (*http.Request, error) {
|
||||
|
||||
// Make sure the given URL end with a slash
|
||||
if !strings.HasSuffix(c.baseURL, "/") {
|
||||
c.baseURL += "/"
|
||||
}
|
||||
|
||||
var buf io.ReadWriter
|
||||
if body != nil {
|
||||
buf = &bytes.Buffer{}
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
err := enc.Encode(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, c.baseURL+path, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
if len(c.token) != 0 {
|
||||
req.Header.Set("Authorization", "Bearer "+c.token)
|
||||
}
|
||||
|
||||
if c.UserAgent != "" {
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*http.Response, error) {
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckResponse(resp); err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
case io.Writer:
|
||||
_, err = io.Copy(v, resp.Body)
|
||||
default:
|
||||
buf := new(bytes.Buffer)
|
||||
teeReader := io.TeeReader(resp.Body, buf)
|
||||
decErr := json.NewDecoder(teeReader).Decode(v)
|
||||
if decErr == io.EOF {
|
||||
decErr = nil // ignore EOF errors caused by empty response body
|
||||
}
|
||||
if decErr != nil {
|
||||
err = fmt.Errorf("%s: %s", decErr.Error(), buf.String())
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// CheckResponse checks the API response for errors, and returns them if present.
|
||||
func CheckResponse(resp *http.Response) error {
|
||||
|
||||
if c := resp.StatusCode; 200 <= c && c <= 299 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("API error with status code %d: %v", resp.StatusCode, err)
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return fmt.Errorf("API error with status code %d: %s", resp.StatusCode, string(data))
|
||||
}
|
||||
|
||||
message := ""
|
||||
if value, ok := raw["message"].(string); ok {
|
||||
message = value
|
||||
} else if value, ok := raw["error"].(string); ok {
|
||||
message = value
|
||||
}
|
||||
|
||||
return fmt.Errorf("API error with status code %d: %s", resp.StatusCode, message)
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package http
|
||||
|
||||
import "time"
|
||||
|
||||
// ClientOptionFunc can be used to customize a new Restful API client.
|
||||
type ClientOptionFunc func(*Client) error
|
||||
|
||||
// WithToken is an option for NewClient to set token
|
||||
func WithToken(token string) ClientOptionFunc {
|
||||
return func(c *Client) error {
|
||||
c.token = token
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout can be used to configure a custom timeout for requests.
|
||||
func WithTimeout(timeout int) ClientOptionFunc {
|
||||
return func(c *Client) error {
|
||||
c.client.Timeout = time.Duration(timeout) * time.Second
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte("Hello, World!"))
|
||||
if err != nil {
|
||||
assert.NoError(t, fmt.Errorf("Error Write %v", err))
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
var clientOptionFns []ClientOptionFunc
|
||||
_, err := NewClient(server.URL, clientOptionFns...)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientDo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
params map[string]string
|
||||
content []byte
|
||||
fakeServer *httptest.Server
|
||||
clientOptionFns []ClientOptionFunc
|
||||
expected []map[string]interface{}
|
||||
expectedCode int
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple",
|
||||
params: map[string]string{
|
||||
"pkey1": "val1",
|
||||
"pkey2": "val2",
|
||||
},
|
||||
fakeServer: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`[{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]`))
|
||||
if err != nil {
|
||||
assert.NoError(t, fmt.Errorf("Error Write %v", err))
|
||||
}
|
||||
})),
|
||||
clientOptionFns: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"key1": "val1",
|
||||
"key2": map[string]interface{}{
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": map[string]interface{}{
|
||||
"key2_2_1": "val2_2_1",
|
||||
},
|
||||
},
|
||||
"key3": float64(123),
|
||||
},
|
||||
},
|
||||
expectedCode: 200,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "With Token",
|
||||
params: map[string]string{
|
||||
"pkey1": "val1",
|
||||
"pkey2": "val2",
|
||||
},
|
||||
fakeServer: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader != "Bearer "+string("test-token") {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`[{
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}]`))
|
||||
if err != nil {
|
||||
assert.NoError(t, fmt.Errorf("Error Write %v", err))
|
||||
}
|
||||
})),
|
||||
clientOptionFns: nil,
|
||||
expected: []map[string]interface{}(nil),
|
||||
expectedCode: 401,
|
||||
expectedError: fmt.Errorf("API error with status code 401: "),
|
||||
},
|
||||
} {
|
||||
cc := c
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
defer cc.fakeServer.Close()
|
||||
|
||||
client, err := NewClient(cc.fakeServer.URL, cc.clientOptionFns...)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient returned unexpected error: %v", err)
|
||||
}
|
||||
|
||||
req, err := client.NewRequest("POST", "", cc.params, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("NewRequest returned unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var data []map[string]interface{}
|
||||
|
||||
resp, err := client.Do(ctx, req, &data)
|
||||
|
||||
if cc.expectedError != nil {
|
||||
assert.EqualError(t, err, cc.expectedError.Error())
|
||||
} else {
|
||||
assert.Equal(t, resp.StatusCode, cc.expectedCode)
|
||||
assert.Equal(t, data, cc.expected)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckResponse(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"error":"invalid_request","description":"Invalid token"}`)),
|
||||
}
|
||||
|
||||
err := CheckResponse(resp)
|
||||
if err == nil {
|
||||
t.Error("Expected an error, got nil")
|
||||
}
|
||||
|
||||
expected := "API error with status code 400: invalid_request"
|
||||
if err.Error() != expected {
|
||||
t.Errorf("Expected error '%s', got '%s'", expected, err.Error())
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
// Code generated by mockery v2.25.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Repos is an autogenerated mock type for the Repos type
|
||||
type Repos struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetDirectories provides a mock function with given fields: ctx, repoURL, revision
|
||||
func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision)
|
||||
|
||||
var r0 []string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok {
|
||||
return rf(ctx, repoURL, revision)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok {
|
||||
r0 = rf(ctx, repoURL, revision)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern
|
||||
func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
var r0 map[string][]byte
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (map[string][]byte, error)); ok {
|
||||
return rf(ctx, repoURL, revision, pattern)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) map[string][]byte); ok {
|
||||
r0 = rf(ctx, repoURL, revision, pattern)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string][]byte)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision, pattern)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewRepos interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewRepos creates a new instance of Repos. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewRepos(t mockConstructorTestingTNewRepos) *Repos {
|
||||
mock := &Repos{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
// Code generated by mockery v2.21.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// RepositoryDB is an autogenerated mock type for the RepositoryDB type
|
||||
type RepositoryDB struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetRepository provides a mock function with given fields: ctx, url
|
||||
func (_m *RepositoryDB) GetRepository(ctx context.Context, url string) (*v1alpha1.Repository, error) {
|
||||
ret := _m.Called(ctx, url)
|
||||
|
||||
var r0 *v1alpha1.Repository
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (*v1alpha1.Repository, error)); ok {
|
||||
return rf(ctx, url)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.Repository); ok {
|
||||
r0 = rf(ctx, url)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.Repository)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, url)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewRepositoryDB interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewRepositoryDB creates a new instance of RepositoryDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewRepositoryDB(t mockConstructorTestingTNewRepositoryDB) *RepositoryDB {
|
||||
mock := &RepositoryDB{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
internalhttp "github.com/argoproj/argo-cd/v2/applicationset/services/internal/http"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// ServiceRequest is the request object sent to the plugin service.
|
||||
type ServiceRequest struct {
|
||||
// ApplicationSetName is the appSetName of the ApplicationSet for which we're requesting parameters. Useful for logging in
|
||||
// the plugin service.
|
||||
ApplicationSetName string `json:"applicationSetName"`
|
||||
// Input is the map of parameters set in the ApplicationSet spec for this generator.
|
||||
Input v1alpha1.PluginInput `json:"input"`
|
||||
}
|
||||
|
||||
type Output struct {
|
||||
// Parameters is the list of parameter sets returned by the plugin.
|
||||
Parameters []map[string]interface{} `json:"parameters"`
|
||||
}
|
||||
|
||||
// ServiceResponse is the response object returned by the plugin service.
|
||||
type ServiceResponse struct {
|
||||
// Output is the map of outputs returned by the plugin.
|
||||
Output Output `json:"output"`
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
client *internalhttp.Client
|
||||
appSetName string
|
||||
}
|
||||
|
||||
func NewPluginService(ctx context.Context, appSetName string, baseURL string, token string, requestTimeout int) (*Service, error) {
|
||||
var clientOptionFns []internalhttp.ClientOptionFunc
|
||||
|
||||
clientOptionFns = append(clientOptionFns, internalhttp.WithToken(token))
|
||||
|
||||
if requestTimeout != 0 {
|
||||
clientOptionFns = append(clientOptionFns, internalhttp.WithTimeout(requestTimeout))
|
||||
}
|
||||
|
||||
client, err := internalhttp.NewClient(baseURL, clientOptionFns...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating plugin client: %v", err)
|
||||
}
|
||||
|
||||
return &Service{
|
||||
client: client,
|
||||
appSetName: appSetName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Service) List(ctx context.Context, parameters v1alpha1.PluginParameters) (*ServiceResponse, error) {
|
||||
req, err := p.client.NewRequest(http.MethodPost, "api/v1/getparams.execute", ServiceRequest{ApplicationSetName: p.appSetName, Input: v1alpha1.PluginInput{Parameters: parameters}}, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewRequest returned unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var data ServiceResponse
|
||||
|
||||
_, err = p.client.Do(ctx, req, &data)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error get api '%s': %v", p.appSetName, err)
|
||||
}
|
||||
|
||||
return &data, err
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
expectedJSON := `{"parameters": [{"number":123,"digest":"sha256:942ae2dfd73088b54d7151a3c3fd5af038a51c50029bfcfd21f1e650d9579967"},{"number":456,"digest":"sha256:224e68cc69566e5cbbb76034b3c42cd2ed57c1a66720396e1c257794cb7d68c1"}]}`
|
||||
token := "0bc57212c3cbbec69d20b34c507284bd300def5b"
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader != "Bearer "+token {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
_, err := w.Write([]byte(expectedJSON))
|
||||
|
||||
if err != nil {
|
||||
assert.NoError(t, fmt.Errorf("Error Write %v", err))
|
||||
}
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
client, err := NewPluginService(context.Background(), "plugin-test", ts.URL, token, 0)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
data, err := client.List(context.Background(), nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var expectedData ServiceResponse
|
||||
err = json.Unmarshal([]byte(expectedJSON), &expectedData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, &expectedData, data)
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
)
|
||||
|
||||
// ParseSecretKey retrieves secret appSetName if different from common ArgoCDSecretName.
|
||||
func ParseSecretKey(key string) (secretName string, tokenKey string) {
|
||||
if strings.Contains(key, ":") {
|
||||
parts := strings.Split(key, ":")
|
||||
secretName = parts[0][1:]
|
||||
tokenKey = fmt.Sprintf("$%s", parts[1])
|
||||
} else {
|
||||
secretName = common.ArgoCDSecretName
|
||||
tokenKey = key
|
||||
}
|
||||
return secretName, tokenKey
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseSecretKey(t *testing.T) {
|
||||
secretName, tokenKey := ParseSecretKey("#my-secret:my-token")
|
||||
assert.Equal(t, "my-secret", secretName)
|
||||
assert.Equal(t, "$my-token", tokenKey)
|
||||
|
||||
secretName, tokenKey = ParseSecretKey("#my-secret")
|
||||
assert.Equal(t, "argocd-secret", secretName)
|
||||
assert.Equal(t, "#my-secret", tokenKey)
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops"
|
||||
core "github.com/microsoft/azure-devops-go-api/azuredevops/core"
|
||||
git "github.com/microsoft/azure-devops-go-api/azuredevops/git"
|
||||
)
|
||||
|
||||
const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
|
||||
|
||||
type AzureDevOpsClientFactory interface {
|
||||
// Returns an Azure Devops Client interface.
|
||||
GetClient(ctx context.Context) (git.Client, error)
|
||||
}
|
||||
|
||||
type devopsFactoryImpl struct {
|
||||
connection *azuredevops.Connection
|
||||
}
|
||||
|
||||
func (factory *devopsFactoryImpl) GetClient(ctx context.Context) (git.Client, error) {
|
||||
gitClient, err := git.NewClient(ctx, factory.connection)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get new Azure DevOps git client for pull request generator: %w", err)
|
||||
}
|
||||
return gitClient, nil
|
||||
}
|
||||
|
||||
type AzureDevOpsService struct {
|
||||
clientFactory AzureDevOpsClientFactory
|
||||
project string
|
||||
repo string
|
||||
labels []string
|
||||
}
|
||||
|
||||
var _ PullRequestService = (*AzureDevOpsService)(nil)
|
||||
var _ AzureDevOpsClientFactory = &devopsFactoryImpl{}
|
||||
|
||||
func NewAzureDevOpsService(ctx context.Context, token, url, organization, project, repo string, labels []string) (PullRequestService, error) {
|
||||
organizationUrl := buildURL(url, organization)
|
||||
|
||||
var connection *azuredevops.Connection
|
||||
if token == "" {
|
||||
connection = azuredevops.NewAnonymousConnection(organizationUrl)
|
||||
} else {
|
||||
connection = azuredevops.NewPatConnection(organizationUrl, token)
|
||||
}
|
||||
|
||||
return &AzureDevOpsService{
|
||||
clientFactory: &devopsFactoryImpl{connection: connection},
|
||||
project: project,
|
||||
repo: repo,
|
||||
labels: labels,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
client, err := a.clientFactory.GetClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Azure DevOps client: %w", err)
|
||||
}
|
||||
|
||||
args := git.GetPullRequestsByProjectArgs{
|
||||
Project: &a.project,
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
azurePullRequests, err := client.GetPullRequestsByProject(ctx, args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pull requests by project: %w", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
for _, pr := range *azurePullRequests {
|
||||
if pr.Repository == nil ||
|
||||
pr.Repository.Name == nil ||
|
||||
pr.PullRequestId == nil ||
|
||||
pr.SourceRefName == nil ||
|
||||
pr.LastMergeSourceCommit == nil ||
|
||||
pr.LastMergeSourceCommit.CommitId == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
azureDevOpsLabels := convertLabels(pr.Labels)
|
||||
if !containAzureDevOpsLabels(a.labels, azureDevOpsLabels) {
|
||||
continue
|
||||
}
|
||||
|
||||
if *pr.Repository.Name == a.repo {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: *pr.PullRequestId,
|
||||
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
|
||||
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
|
||||
Labels: azureDevOpsLabels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return pullRequests, nil
|
||||
}
|
||||
|
||||
// convertLabels converts WebApiTagDefinitions to strings
|
||||
func convertLabels(tags *[]core.WebApiTagDefinition) []string {
|
||||
if tags == nil {
|
||||
return []string{}
|
||||
}
|
||||
labelStrings := make([]string, len(*tags))
|
||||
for i, label := range *tags {
|
||||
labelStrings[i] = *label.Name
|
||||
}
|
||||
return labelStrings
|
||||
}
|
||||
|
||||
// containAzureDevOpsLabels returns true if gotLabels contains expectedLabels
|
||||
func containAzureDevOpsLabels(expectedLabels []string, gotLabels []string) bool {
|
||||
for _, expected := range expectedLabels {
|
||||
found := false
|
||||
for _, got := range gotLabels {
|
||||
if expected == got {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func buildURL(url, organization string) string {
|
||||
if url == "" {
|
||||
url = AZURE_DEVOPS_DEFAULT_URL
|
||||
}
|
||||
separator := ""
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
separator = "/"
|
||||
}
|
||||
devOpsURL := fmt.Sprintf("%s%s%s", url, separator, organization)
|
||||
return devOpsURL
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/core"
|
||||
git "github.com/microsoft/azure-devops-go-api/azuredevops/git"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
azureMock "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider/azure_devops/git/mocks"
|
||||
)
|
||||
|
||||
func createBoolPtr(x bool) *bool {
|
||||
return &x
|
||||
}
|
||||
|
||||
func createStringPtr(x string) *string {
|
||||
return &x
|
||||
}
|
||||
|
||||
func createIntPtr(x int) *int {
|
||||
return &x
|
||||
}
|
||||
|
||||
func createLabelsPtr(x []core.WebApiTagDefinition) *[]core.WebApiTagDefinition {
|
||||
return &x
|
||||
}
|
||||
|
||||
type AzureClientFactoryMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (m *AzureClientFactoryMock) GetClient(ctx context.Context) (git.Client, error) {
|
||||
args := m.mock.Called(ctx)
|
||||
|
||||
var client git.Client
|
||||
c := args.Get(0)
|
||||
if c != nil {
|
||||
client = c.(git.Client)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
if e, ok := args.Get(1).(error); ok {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
return client, err
|
||||
}
|
||||
|
||||
func TestListPullRequest(t *testing.T) {
|
||||
teamProject := "myorg_project"
|
||||
repoName := "myorg_project_repo"
|
||||
pr_id := 123
|
||||
pr_head_sha := "cd4973d9d14a08ffe6b641a89a68891d6aac8056"
|
||||
ctx := context.Background()
|
||||
|
||||
pullRequestMock := []git.GitPullRequest{
|
||||
{
|
||||
PullRequestId: createIntPtr(pr_id),
|
||||
SourceRefName: createStringPtr("refs/heads/feature-branch"),
|
||||
LastMergeSourceCommit: &git.GitCommitRef{
|
||||
CommitId: createStringPtr(pr_head_sha),
|
||||
},
|
||||
Labels: &[]core.WebApiTagDefinition{},
|
||||
Repository: &git.GitRepository{
|
||||
Name: createStringPtr(repoName),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
args := git.GetPullRequestsByProjectArgs{
|
||||
Project: &teamProject,
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
gitClientMock := azureMock.Client{}
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
gitClientMock.On("GetPullRequestsByProject", ctx, args).Return(&pullRequestMock, nil)
|
||||
|
||||
provider := AzureDevOpsService{
|
||||
clientFactory: clientFactoryMock,
|
||||
project: teamProject,
|
||||
repo: repoName,
|
||||
labels: nil,
|
||||
}
|
||||
|
||||
list, err := provider.List(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(list))
|
||||
assert.Equal(t, "feature-branch", list[0].Branch)
|
||||
assert.Equal(t, pr_head_sha, list[0].HeadSHA)
|
||||
assert.Equal(t, pr_id, list[0].Number)
|
||||
}
|
||||
|
||||
func TestConvertLabes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
gotLabels *[]core.WebApiTagDefinition
|
||||
expectedLabels []string
|
||||
}{
|
||||
{
|
||||
name: "empty labels",
|
||||
gotLabels: createLabelsPtr([]core.WebApiTagDefinition{}),
|
||||
expectedLabels: []string{},
|
||||
},
|
||||
{
|
||||
name: "nil labels",
|
||||
gotLabels: createLabelsPtr(nil),
|
||||
expectedLabels: []string{},
|
||||
},
|
||||
{
|
||||
name: "one label",
|
||||
gotLabels: createLabelsPtr([]core.WebApiTagDefinition{
|
||||
{Name: createStringPtr("label1"), Active: createBoolPtr(true)},
|
||||
}),
|
||||
expectedLabels: []string{"label1"},
|
||||
},
|
||||
{
|
||||
name: "two label",
|
||||
gotLabels: createLabelsPtr([]core.WebApiTagDefinition{
|
||||
{Name: createStringPtr("label1"), Active: createBoolPtr(true)},
|
||||
{Name: createStringPtr("label2"), Active: createBoolPtr(true)},
|
||||
}),
|
||||
expectedLabels: []string{"label1", "label2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := convertLabels(tc.gotLabels)
|
||||
assert.Equal(t, tc.expectedLabels, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainAzureDevOpsLabels(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
expectedLabels []string
|
||||
gotLabels []string
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty labels",
|
||||
expectedLabels: []string{},
|
||||
gotLabels: []string{},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "no matching labels",
|
||||
expectedLabels: []string{"label1", "label2"},
|
||||
gotLabels: []string{"label3", "label4"},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "some matching labels",
|
||||
expectedLabels: []string{"label1", "label2"},
|
||||
gotLabels: []string{"label1", "label3"},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "all matching labels",
|
||||
expectedLabels: []string{"label1", "label2"},
|
||||
gotLabels: []string{"label1", "label2"},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := containAzureDevOpsLabels(tc.expectedLabels, tc.gotLabels)
|
||||
assert.Equal(t, tc.expectedResult, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
url string
|
||||
organization string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Provided default URL and organization",
|
||||
url: "https://dev.azure.com/",
|
||||
organization: "myorganization",
|
||||
expected: "https://dev.azure.com/myorganization",
|
||||
},
|
||||
{
|
||||
name: "Provided default URL and organization without trailing slash",
|
||||
url: "https://dev.azure.com",
|
||||
organization: "myorganization",
|
||||
expected: "https://dev.azure.com/myorganization",
|
||||
},
|
||||
{
|
||||
name: "Provided no URL and organization",
|
||||
url: "",
|
||||
organization: "myorganization",
|
||||
expected: "https://dev.azure.com/myorganization",
|
||||
},
|
||||
{
|
||||
name: "Provided custom URL and organization",
|
||||
url: "https://azuredevops.mycompany.com/",
|
||||
organization: "myorganization",
|
||||
expected: "https://azuredevops.mycompany.com/myorganization",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := buildURL(tc.url, tc.organization)
|
||||
assert.Equal(t, result, tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/ktrysmt/go-bitbucket"
|
||||
)
|
||||
|
||||
type BitbucketCloudService struct {
|
||||
client *bitbucket.Client
|
||||
owner string
|
||||
repositorySlug string
|
||||
}
|
||||
|
||||
type BitbucketCloudPullRequest struct {
|
||||
ID int `json:"id"`
|
||||
Source BitbucketCloudPullRequestSource `json:"source"`
|
||||
}
|
||||
|
||||
type BitbucketCloudPullRequestSource struct {
|
||||
Branch BitbucketCloudPullRequestSourceBranch `json:"branch"`
|
||||
Commit BitbucketCloudPullRequestSourceCommit `json:"commit"`
|
||||
}
|
||||
|
||||
type BitbucketCloudPullRequestSourceBranch struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type BitbucketCloudPullRequestSourceCommit struct {
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
type PullRequestResponse struct {
|
||||
Page int32 `json:"page"`
|
||||
Size int32 `json:"size"`
|
||||
Pagelen int32 `json:"pagelen"`
|
||||
Next string `json:"next"`
|
||||
Previous string `json:"previous"`
|
||||
Items []PullRequest `json:"values"`
|
||||
}
|
||||
|
||||
var _ PullRequestService = (*BitbucketCloudService)(nil)
|
||||
|
||||
func parseUrl(uri string) (*url.URL, error) {
|
||||
if uri == "" {
|
||||
uri = "https://api.bitbucket.org/2.0"
|
||||
}
|
||||
|
||||
url, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func NewBitbucketCloudServiceBasicAuth(baseUrl, username, password, owner, repositorySlug string) (PullRequestService, error) {
|
||||
url, err := parseUrl(baseUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %v", baseUrl, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient := bitbucket.NewBasicAuth(username, password)
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{
|
||||
client: bitbucketClient,
|
||||
owner: owner,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewBitbucketCloudServiceBearerToken(baseUrl, bearerToken, owner, repositorySlug string) (PullRequestService, error) {
|
||||
url, err := parseUrl(baseUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %v", baseUrl, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient := bitbucket.NewOAuthbearerToken(bearerToken)
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{
|
||||
client: bitbucketClient,
|
||||
owner: owner,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewBitbucketCloudServiceNoAuth(baseUrl, owner, repositorySlug string) (PullRequestService, error) {
|
||||
// There is currently no method to explicitly not require auth
|
||||
return NewBitbucketCloudServiceBearerToken(baseUrl, "", owner, repositorySlug)
|
||||
}
|
||||
|
||||
func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
opts := &bitbucket.PullRequestsOptions{
|
||||
Owner: b.owner,
|
||||
RepoSlug: b.repositorySlug,
|
||||
}
|
||||
|
||||
response, err := b.client.Repositories.PullRequests.Gets(opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %v", b.owner, b.repositorySlug, err)
|
||||
}
|
||||
|
||||
resp, ok := response.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown type returned from bitbucket pull requests")
|
||||
}
|
||||
|
||||
repoArray, ok := resp["values"].([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown type returned from response values")
|
||||
}
|
||||
|
||||
jsonStr, err := json.Marshal(repoArray)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling response body to json: %v", err)
|
||||
}
|
||||
|
||||
var pulls []BitbucketCloudPullRequest
|
||||
if err := json.Unmarshal(jsonStr, &pulls); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling json to type '[]BitbucketCloudPullRequest': %v", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
Branch: pull.Source.Branch.Name,
|
||||
HeadSHA: pull.Source.Commit.Hash,
|
||||
})
|
||||
}
|
||||
|
||||
return pullRequests, nil
|
||||
}
|
||||
@@ -1,410 +0,0 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func defaultHandlerCloud(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var err error
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err = io.WriteString(w, `{
|
||||
"size": 1,
|
||||
"pagelen": 10,
|
||||
"page": 1,
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature/foo-bar"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`)
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUrlEmptyUrl(t *testing.T) {
|
||||
url, err := parseUrl("")
|
||||
bitbucketUrl, _ := url.Parse("https://api.bitbucket.org/2.0")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, bitbucketUrl, url)
|
||||
}
|
||||
|
||||
func TestInvalidBaseUrlBasicAuthCloud(t *testing.T) {
|
||||
_, err := NewBitbucketCloudServiceBasicAuth("http:// example.org", "user", "password", "OWNER", "REPO")
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestInvalidBaseUrlBearerTokenCloud(t *testing.T) {
|
||||
_, err := NewBitbucketCloudServiceBearerToken("http:// example.org", "TOKEN", "OWNER", "REPO")
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestInvalidBaseUrlNoAuthCloud(t *testing.T) {
|
||||
_, err := NewBitbucketCloudServiceNoAuth("http:// example.org", "OWNER", "REPO")
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestListPullRequestBearerTokenCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "Bearer TOKEN", r.Header.Get("Authorization"))
|
||||
defaultHandlerCloud(t)(w, r)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, err := NewBitbucketCloudServiceBearerToken(ts.URL, "TOKEN", "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
}
|
||||
|
||||
func TestListPullRequestNoAuthCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
defaultHandlerCloud(t)(w, r)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
}
|
||||
|
||||
func TestListPullRequestBasicAuthCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "Basic dXNlcjpwYXNzd29yZA==", r.Header.Get("Authorization"))
|
||||
defaultHandlerCloud(t)(w, r)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, err := NewBitbucketCloudServiceBasicAuth(ts.URL, "user", "password", "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
}
|
||||
|
||||
func TestListPullRequestPaginationCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var err error
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err = io.WriteString(w, fmt.Sprintf(`{
|
||||
"size": 2,
|
||||
"pagelen": 1,
|
||||
"page": 1,
|
||||
"next": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2",
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-101"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 102,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-102"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "4cf807e67a6d"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
case "/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2":
|
||||
_, err = io.WriteString(w, fmt.Sprintf(`{
|
||||
"size": 2,
|
||||
"pagelen": 1,
|
||||
"page": 2,
|
||||
"previous": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=1",
|
||||
"values": [
|
||||
{
|
||||
"id": 103,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-103"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "6344d9623e3b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "1a8dd249c04a",
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "4cf807e67a6d",
|
||||
}, *pullRequests[1])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 103,
|
||||
Branch: "feature-103",
|
||||
HeadSHA: "6344d9623e3b",
|
||||
}, *pullRequests[2])
|
||||
}
|
||||
|
||||
func TestListResponseErrorCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
_, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestListResponseMalformedCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err := io.WriteString(w, `[{
|
||||
"size": 1,
|
||||
"pagelen": 10,
|
||||
"page": 1,
|
||||
"values": [{ "id": 101 }]
|
||||
}]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
_, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestListResponseMalformedValuesCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err := io.WriteString(w, `{
|
||||
"size": 1,
|
||||
"pagelen": 10,
|
||||
"page": 1,
|
||||
"values": { "id": 101 }
|
||||
}`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
_, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestListResponseEmptyCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err := io.WriteString(w, `{
|
||||
"size": 1,
|
||||
"pagelen": 10,
|
||||
"page": 1,
|
||||
"values": []
|
||||
}`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, pullRequests)
|
||||
}
|
||||
|
||||
func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var err error
|
||||
switch r.RequestURI {
|
||||
case "/repositories/OWNER/REPO/pullrequests/":
|
||||
_, err = io.WriteString(w, fmt.Sprintf(`{
|
||||
"size": 2,
|
||||
"pagelen": 1,
|
||||
"page": 1,
|
||||
"next": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2",
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-101"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 200,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-200"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "4cf807e67a6d"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
case "/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2":
|
||||
_, err = io.WriteString(w, fmt.Sprintf(`{
|
||||
"size": 2,
|
||||
"pagelen": 1,
|
||||
"page": 2,
|
||||
"previous": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=1",
|
||||
"values": [
|
||||
{
|
||||
"id": 102,
|
||||
"source": {
|
||||
"branch": {
|
||||
"name": "feature-102"
|
||||
},
|
||||
"commit": {
|
||||
"type": "commit",
|
||||
"hash": "6344d9623e3b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
default:
|
||||
t.Fail()
|
||||
}
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
regexp := `feature-1[\d]{2}`
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
BranchMatch: ®exp,
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "1a8dd249c04a",
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "6344d9623e3b",
|
||||
}, *pullRequests[1])
|
||||
|
||||
regexp = `.*2$`
|
||||
svc, err = NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
pullRequests, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
BranchMatch: ®exp,
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "6344d9623e3b",
|
||||
}, *pullRequests[0])
|
||||
|
||||
regexp = `[\d{2}`
|
||||
svc, err = NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")
|
||||
assert.NoError(t, err)
|
||||
_, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
BranchMatch: ®exp,
|
||||
},
|
||||
})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -66,11 +66,9 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
TargetBranch: pull.ToRef.DisplayID,
|
||||
HeadSHA: pull.FromRef.LatestCommit, // This is not defined in the official docs, but works in practice
|
||||
Labels: []string{}, // Not supported by library
|
||||
Number: pull.ID,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
HeadSHA: pull.FromRef.LatestCommit, // This is not defined in the official docs, but works in practice
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -24,11 +24,6 @@ func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "master",
|
||||
"id": "refs/heads/master"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-ABC-123",
|
||||
"displayId": "feature-ABC-123",
|
||||
@@ -60,7 +55,6 @@ func TestListPullRequestNoAuth(t *testing.T) {
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "master", pullRequests[0].TargetBranch)
|
||||
assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA)
|
||||
}
|
||||
|
||||
@@ -77,11 +71,6 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "master",
|
||||
"id": "refs/heads/master"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-101",
|
||||
"displayId": "feature-101",
|
||||
@@ -90,11 +79,6 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"id": 102,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "branch",
|
||||
"id": "refs/heads/branch"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-102",
|
||||
"displayId": "feature-102",
|
||||
@@ -112,11 +96,6 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
"values": [
|
||||
{
|
||||
"id": 200,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "master",
|
||||
"id": "refs/heads/master"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-200",
|
||||
"displayId": "feature-200",
|
||||
@@ -140,25 +119,19 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
TargetBranch: "branch",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[1])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 200,
|
||||
Branch: "feature-200",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "cb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 200,
|
||||
Branch: "feature-200",
|
||||
HeadSHA: "cb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[2])
|
||||
}
|
||||
|
||||
@@ -182,7 +155,7 @@ func TestListPullRequestBasicAuth(t *testing.T) {
|
||||
|
||||
func TestListResponseError(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.WriteHeader(500)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, _ := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO")
|
||||
@@ -255,11 +228,6 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
"values": [
|
||||
{
|
||||
"id": 101,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "master",
|
||||
"id": "refs/heads/master"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-101",
|
||||
"displayId": "feature-101",
|
||||
@@ -268,11 +236,6 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"id": 102,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "branch",
|
||||
"id": "refs/heads/branch"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-102",
|
||||
"displayId": "feature-102",
|
||||
@@ -290,11 +253,6 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
"values": [
|
||||
{
|
||||
"id": 200,
|
||||
"toRef": {
|
||||
"latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a",
|
||||
"displayId": "master",
|
||||
"id": "refs/heads/master"
|
||||
},
|
||||
"fromRef": {
|
||||
"id": "refs/heads/feature-200",
|
||||
"displayId": "feature-200",
|
||||
@@ -323,18 +281,14 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
TargetBranch: "branch",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[1])
|
||||
|
||||
regexp = `.*2$`
|
||||
@@ -348,11 +302,9 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(pullRequests))
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
TargetBranch: "branch",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
}, *pullRequests[0])
|
||||
|
||||
regexp = `[\d{2}`
|
||||
|
||||
@@ -54,21 +54,10 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
list := []*PullRequest{}
|
||||
for _, pr := range prs {
|
||||
list = append(list, &PullRequest{
|
||||
Number: int(pr.Index),
|
||||
Branch: pr.Head.Ref,
|
||||
TargetBranch: pr.Base.Ref,
|
||||
HeadSHA: pr.Head.Sha,
|
||||
Labels: getGiteaPRLabelNames(pr.Labels),
|
||||
Number: int(pr.Index),
|
||||
Branch: pr.Head.Ref,
|
||||
HeadSHA: pr.Head.Sha,
|
||||
})
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Get the Gitea pull request label names.
|
||||
func getGiteaPRLabelNames(giteaLabels []*gitea.Label) []string {
|
||||
var labelNames []string
|
||||
for _, giteaLabel := range giteaLabels {
|
||||
labelNames = append(labelNames, giteaLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/sdk/gitea"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -256,35 +255,5 @@ func TestGiteaList(t *testing.T) {
|
||||
assert.Equal(t, len(prs), 1)
|
||||
assert.Equal(t, prs[0].Number, 1)
|
||||
assert.Equal(t, prs[0].Branch, "test")
|
||||
assert.Equal(t, prs[0].TargetBranch, "main")
|
||||
assert.Equal(t, prs[0].HeadSHA, "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f")
|
||||
}
|
||||
|
||||
func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*gitea.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*gitea.Label{
|
||||
{Name: "label1"},
|
||||
{Name: "label2"},
|
||||
{Name: "label3"},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*gitea.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGiteaPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,11 +65,9 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
continue
|
||||
}
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: *pull.Number,
|
||||
Branch: *pull.Head.Ref,
|
||||
TargetBranch: *pull.Base.Ref,
|
||||
HeadSHA: *pull.Head.SHA,
|
||||
Labels: getGithubPRLabelNames(pull.Labels),
|
||||
Number: *pull.Number,
|
||||
Branch: *pull.Head.Ref,
|
||||
HeadSHA: *pull.Head.SHA,
|
||||
})
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
@@ -99,12 +97,3 @@ func containLabels(expectedLabels []string, gotLabels []*github.Label) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get the Github pull request label names.
|
||||
func getGithubPRLabelNames(gitHubLabels []*github.Label) []string {
|
||||
var labelNames []string
|
||||
for _, gitHubLabel := range gitHubLabels {
|
||||
labelNames = append(labelNames, *gitHubLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v35/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func toPtr(s string) *string {
|
||||
@@ -22,9 +21,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "Match labels",
|
||||
Labels: []string{"label1", "label2"},
|
||||
PullLabels: []*github.Label{
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
@@ -32,9 +31,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "Not match labels",
|
||||
Labels: []string{"label1", "label4"},
|
||||
PullLabels: []*github.Label{
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: false,
|
||||
},
|
||||
@@ -42,9 +41,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "No specify",
|
||||
Labels: []string{},
|
||||
PullLabels: []*github.Label{
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
@@ -58,32 +57,3 @@ func TestContainLabels(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*github.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*github.Label{
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*github.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGithubPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user