diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 513ad62979..3fa8fe02c6 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -ARG VARIANT="1.21" +ARG VARIANT="1.23" FROM mcr.microsoft.com/vscode/devcontainers/go:${VARIANT} RUN apt-get update && \ export DEBIAN_FRONTEND=noninteractive && \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index eee94e61c7..c8f0e01dc3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,7 +5,7 @@ "build": { "dockerfile": "Dockerfile", "args": { - "VARIANT": "1.21-bullseye", + "VARIANT": "1.23-bullseye", "NODE_VERSION": "none" } }, @@ -57,6 +57,9 @@ "postCreateCommand": "make setup", "remoteUser": "vscode", "features": { + "ghcr.io/devcontainers/features/go:1": { + "version": "1.23.2" + }, "docker-in-docker": "latest", "kubectl-helm-minikube": "latest", "git": "latest", diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 022b1dfc96..2dcc3c80b1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,10 +7,13 @@ # review a PR in an area. # # Rules are evaluated in this order, and the last match is used for auto-assignment. -* @azure/azure-sdn-members -/.github/ @azure/acn-admins -/cns/ @azure/acn-cns-reviewers -/cni/ @azure/acn-cni-reviewers -/dropgz/ @rbtr @camrynl @paulyufan2 @ashvindeodhar @thatmattlong -/npm/ @azure/acn-npm-reviewers -/zapai/ @rbtr @ZetaoZhuang +* @azure/azure-sdn-members +/.github/ @azure/acn-admins +/cns/ @azure/acn-cns-reviewers +/cni/ @azure/acn-cni-reviewers +/dropgz/ @rbtr @camrynl @paulyufan2 @ashvindeodhar @thatmattlong +/npm/ @azure/acn-npm-reviewers +/zapai/ @rbtr @ZetaoZhuang +/bpf-prog/ @camrynl +/azure-ip-masq-merger/ @QxBytes @santhoshmprabhu +/azure-iptables-monitor/ @QxBytes @santhoshmprabhu diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 1e4d9a4c96..af0537a91f 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -5,8 +5,6 @@ updates: directory: "/" schedule: interval: "daily" - reviewers: - - "azure/azure-sdn-members" commit-message: prefix: "ci" labels: [ "ci", "dependencies" ] @@ -15,45 +13,83 @@ updates: directory: "/" schedule: interval: "daily" - reviewers: - - "azure/azure-sdn-members" commit-message: prefix: "ci" labels: [ "ci", "dependencies" ] open-pull-requests-limit: 10 + + +# Constrain Kubernetes dependencies to compatibility with default branch - v1.33-v1.35 - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" - reviewers: - - "azure/azure-sdn-members" commit-message: prefix: "deps" + labels: [ "dependencies" ] open-pull-requests-limit: 10 + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.35.0"] - package-ecosystem: "gomod" directory: "/azure-ipam" schedule: interval: "daily" - reviewers: - - "azure/azure-sdn-members" commit-message: prefix: "deps" + labels: [ "dependencies", "azure-ipam" ] open-pull-requests-limit: 10 + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.35.0"] - package-ecosystem: "gomod" directory: "/build/tools" schedule: interval: "daily" - reviewers: - - "azure/azure-sdn-members" commit-message: prefix: "deps" + labels: [ "dependencies" ] open-pull-requests-limit: 10 + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.35.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.35.0"] - package-ecosystem: "gomod" directory: "/dropgz" schedule: interval: "daily" commit-message: prefix: "deps" + labels: [ "dependencies", "dropgz" ] open-pull-requests-limit: 10 - package-ecosystem: "gomod" directory: "/zapai" @@ -61,4 +97,146 @@ updates: interval: "daily" commit-message: prefix: "deps" + labels: [ "dependencies" ] open-pull-requests-limit: 10 + + +# Constrain Kubernetes dependencies to compatibility with release/v1.6 branch - v1.30-v1.32 +- package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "release/1.6" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.6" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.32.0"] +- package-ecosystem: "gomod" + directory: "/azure-ipam" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "azure-ipam", "release/1.6" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.6" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.32.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.32.0"] + + +# Constrain Kubernetes dependencies to compatibility with release/v1.5 branch - v1.28-v1.30 +- package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "release/1.5" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.5" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.30.0"] +- package-ecosystem: "gomod" + directory: "/azure-ipam" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "azure-ipam", "release/1.5" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.5" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.30.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.30.0"] + + +# Constrain Kubernetes dependencies to compatibility with release/v1.4 branch - v1.25-v1.27 +- package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "release/1.4" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.4" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.27.0"] +- package-ecosystem: "gomod" + directory: "/azure-ipam" + schedule: + interval: "daily" + commit-message: + prefix: "deps" + labels: [ "dependencies", "azure-ipam", "release/1.4" ] + open-pull-requests-limit: 10 + target-branch: "release/v1.4" + ignore: + - dependency-name: "k8s.io/api" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/apiextensions-apiserver" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/apimachinery" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/client-go" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/kubelet" + versions: [">=0.27.0"] + - dependency-name: "k8s.io/kubectl" + versions: [">=0.27.0"] diff --git a/.github/workflows/baseimages.yaml b/.github/workflows/baseimages.yaml new file mode 100644 index 0000000000..6bcf13f477 --- /dev/null +++ b/.github/workflows/baseimages.yaml @@ -0,0 +1,32 @@ +name: 'Docker Base Images' +on: + workflow_dispatch: + pull_request: + types: + - opened + - reopened + - synchronize + - ready_for_review + merge_group: + types: + - checks_requested + +jobs: + render: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.23' + - name: Render Dockerfiles + run: make dockerfiles + - name: Fail if base images are outdated + run: | + if [ -n "$(git status --porcelain)" ]; then + echo "Changes detected. Please run 'make dockerfiles' locally to update the base images." + exit 1 + fi diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml index 71ecbf5ff6..0137fe75de 100644 --- a/.github/workflows/codeql.yaml +++ b/.github/workflows/codeql.yaml @@ -2,12 +2,12 @@ name: "CodeQL" on: push: branches: - - master - - release/* + - master + - release/* pull_request: branches: - - master - - release/* + - master + - release/* types: - opened - reopened @@ -32,20 +32,21 @@ jobs: contents: read security-events: write steps: - - name: Setup go - uses: actions/setup-go@v5 - with: - go-version: '1.21' - check-latest: true - - name: Checkout repository - uses: actions/checkout@v4 - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - - name: Autobuild - uses: github/codeql-action/autobuild@v3 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{matrix.language}}" + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Checkout repository + uses: actions/checkout@v4 + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + queries: ./codeql/ + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/crdgen.yaml b/.github/workflows/crdgen.yaml index 781647c9c3..3686cf8c71 100644 --- a/.github/workflows/crdgen.yaml +++ b/.github/workflows/crdgen.yaml @@ -14,7 +14,7 @@ jobs: crdgen: strategy: matrix: - go-version: ['1.21'] + go-version: ['1.22', '1.23'] os: [ubuntu-latest] name: CRDs are Generated runs-on: ${{ matrix.os }} @@ -31,5 +31,9 @@ jobs: run: make -C crd/multitenantnetworkcontainer - name: Regenerate Multitenancy CRDs run: make -C crd/multitenancy + - name: Regenerate ClusterSubnetState CRD + run: make -C crd/clustersubnetstate + - name: Regenerate OverlayExtensionConfig CRD + run: make -C crd/overlayextensionconfig - name: Fail if the tree is dirty run: test -z "$(git status --porcelain)" diff --git a/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml b/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml index 1d86de9b88..2bf36f0bb2 100644 --- a/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml +++ b/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml @@ -26,14 +26,15 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: "^1.21" + go-version: "^1.23" - name: Setup Kind - uses: engineerd/setup-kind@v0.5.0 + uses: helm/kind-action@v1 with: - version: "v0.11.1" + version: "v0.22.0" + kubectl_version: "v1.27.7" config: ./test/kind/kind.yaml - name: npm-kind + cluster_name: npm-kind - name: Check Kind run: | diff --git a/.github/workflows/cyclonus-netpol-test.yaml b/.github/workflows/cyclonus-netpol-test.yaml index 9cb93876ef..14811d4fa2 100644 --- a/.github/workflows/cyclonus-netpol-test.yaml +++ b/.github/workflows/cyclonus-netpol-test.yaml @@ -33,7 +33,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: '^1.21' + go-version: '^1.23' - name: Setup Kind uses: helm/kind-action@v1 diff --git a/.github/workflows/golangci.yaml b/.github/workflows/golangci.yaml index 0a85a34c6d..59163383e6 100644 --- a/.github/workflows/golangci.yaml +++ b/.github/workflows/golangci.yaml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - go-version: ['1.21.x'] + go-version: ['1.22.x', '1.23.x'] os: [ubuntu-latest, windows-latest] name: Lint runs-on: ${{ matrix.os }} @@ -29,5 +29,5 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.55 - args: --new-from-rev=origin/master --config=.golangci.yml --timeout=25m + version: v1.61 + args: ${{ github.event_name == 'pull_request' && format('--new-from-rev=origin/{0}', github.base_ref) || '' }} --config=.golangci.yml --timeout=25m diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 55c413c551..1ac719dc92 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -16,14 +16,15 @@ jobs: id: stale with: ascending: true - close-issue-message: 'Issue closed due to inactivity.' close-pr-message: 'Pull request closed due to inactivity.' - days-before-close: 7 days-before-stale: 14 + days-before-issue-close: -1 + days-before-pr-close: 7 delete-branch: true exempt-issue-labels: 'exempt-stale' + exempt-pr-labels: 'exempt-stale,dependencies,ci' operations-per-run: 100 - stale-issue-message: 'This issue is stale because it has been open for 2 weeks with no activity. Remove stale label or comment or this will be closed in 7 days' + stale-issue-message: 'This issue is stale because it has been open for 2 weeks with no activity. Remove stale label or comment to keep it open.' stale-pr-message: 'This pull request is stale because it has been open for 2 weeks with no activity. Remove stale label or comment or this will be closed in 7 days' - name: Print outputs run: echo ${{ join(steps.stale.outputs.*, ',') }} diff --git a/.gitignore b/.gitignore index b28e45c929..2558114dbc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +vendor/ + # Binaries out/* output/* @@ -8,6 +10,7 @@ ipam-*.xml # Environment .vscode/* +**/*.sw? # Coverage *.out diff --git a/.golangci.yml b/.golangci.yml index b0d9e59c04..dbd199f36d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,6 @@ issues: max-same-issues: 0 max-issues-per-linter: 0 - new-from-rev: origin/master linters: presets: - bugs @@ -9,16 +8,12 @@ linters: - format - performance - unused - disable: - - maligned - - scopelint enable: - - exportloopref + - copyloopvar - goconst - gocritic - gocyclo - gofmt - - gomnd - goprintffuncname - gosimple - lll @@ -35,6 +30,7 @@ linters-settings: disabled-checks: - "hugeParam" govet: - check-shadowing: true + enable: + - shadow lll: line-length: 200 diff --git a/.pipelines/OneBranch.NonOfficial.CrossPlat.yaml b/.pipelines/OneBranch.NonOfficial.CrossPlat.yaml deleted file mode 100644 index 35f263ec6e..0000000000 --- a/.pipelines/OneBranch.NonOfficial.CrossPlat.yaml +++ /dev/null @@ -1,28 +0,0 @@ -trigger: none - -variables: - WindowsContainerImage: 'onebranch.azurecr.io/windows/ltsc2022/vse2022:latest' # for Windows jobs - LinuxContainerImage: 'mcr.microsoft.com/onebranch/cbl-mariner/build:2.0' # for Linux jobs - -resources: - repositories: - - repository: templates - type: git - name: OneBranch.Pipelines/GovernedTemplates - ref: refs/heads/main - -extends: - template: v2/OneBranch.NonOfficial.CrossPlat.yml@templates - parameters: - featureFlags: - WindowsHostVersion: 1ESWindows2022 - stages: - - stage: build - jobs: - - job: main - pool: - type: windows - variables: - ob_outputDirectory: '$(Build.SourcesDirectory)\out' - steps: [] - diff --git a/.pipelines/OneBranch.Official.CrossPlat.yaml b/.pipelines/OneBranch.Official.CrossPlat.yaml deleted file mode 100644 index e67c7df267..0000000000 --- a/.pipelines/OneBranch.Official.CrossPlat.yaml +++ /dev/null @@ -1,27 +0,0 @@ -trigger: none - -variables: - WindowsContainerImage: 'onebranch.azurecr.io/windows/ltsc2022/vse2022:latest' # for Windows jobs - LinuxContainerImage: 'mcr.microsoft.com/onebranch/cbl-mariner/build:2.0' # for Linux jobs - -resources: - repositories: - - repository: templates - type: git - name: OneBranch.Pipelines/GovernedTemplates - ref: refs/heads/main - -extends: - template: v2/OneBranch.Official.CrossPlat.yml@templates - parameters: - featureFlags: - WindowsHostVersion: 1ESWindows2022 - stages: - - stage: build - jobs: - - job: main - pool: - type: windows - variables: - ob_outputDirectory: '$(Build.SourcesDirectory)\out' - steps: [] diff --git a/.pipelines/build/binaries.jobs.yaml b/.pipelines/build/binaries.jobs.yaml new file mode 100644 index 0000000000..1311f243d3 --- /dev/null +++ b/.pipelines/build/binaries.jobs.yaml @@ -0,0 +1,79 @@ +parameters: +- name: binaries + type: jobList + + +jobs: +- ${{ each job_data in parameters.binaries }}: + - ${{ if eq(job_data.templateContext.action, 'build') }}: + - job: binaries_${{ job_data.job }} + displayName: "Build Binary - ${{ job_data.displayName }} -" + strategy: ${{ job_data.strategy }} + pool: + type: linux + ${{ if eq(job_data.job, 'linux_arm64') }}: + hostArchitecture: arm64 + + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + ob_artifactSuffix: _$(artifact) + ob_git_checkout: false + REPO_ROOT: $(Build.SourcesDirectory)/${{ job_data.templateContext.repositoryArtifact }} + ${{ if eq(job_data.job, 'linux_amd64') }}: + LinuxContainerImage: 'onebranch.azurecr.io/linux/ubuntu-2204:latest' + ARCH: amd64 + OS: linux + ${{ elseif eq(job_data.job, 'windows_amd64') }}: + LinuxContainerImage: 'onebranch.azurecr.io/linux/ubuntu-2204:latest' + ARCH: amd64 + OS: windows + ${{ elseif eq(job_data.job, 'linux_arm64') }}: + ob_enable_qemu: true + ARCH: arm64 + OS: linux + + steps: + - task: DownloadPipelineArtifact@2 + inputs: + targetPath: $(Build.SourcesDirectory)/${{ job_data.templateContext.repositoryArtifact }} + artifact: '${{ job_data.templateContext.repositoryArtifact }}' + + - template: binary.steps.yaml + parameters: + target: $(name) + os: $(OS) + arch: $(ARCH) + + + - ${{ elseif and(eq(job_data.templateContext.action, 'sign'), job_data.templateContext.isOfficial) }}: + - job: sign_${{ job_data.job }} + displayName: "Sign Binary - ${{ job_data.displayName }} -" + strategy: ${{ job_data.strategy }} + pool: + ${{ if eq(job_data.job, 'windows_amd64') }}: + type: windows + ${{ else }}: + type: linux + variables: + ob_outputDirectory: $(Build.SourcesDirectory) + ob_artifactSuffix: _$(artifact) + ob_git_checkout: false + steps: + - task: DownloadPipelineArtifact@2 + inputs: + targetPath: $(Build.SourcesDirectory) + artifact: '${{ job_data.templateContext.repositoryArtifact }}' + + - task: ExtractFiles@1 + inputs: + archiveFilePatterns: '**/*.?(tgz|tgz.gz|zip)' + destinationFolder: $(Build.SourcesDirectory) + cleanDestinationFolder: false + overwriteExistingFiles: true + + - task: onebranch.pipeline.signing@1 + inputs: + command: 'sign' + signing_profile: 'external_distribution' + files_to_sign: '**/*' + search_root: $(Build.SourcesDirectory) diff --git a/.pipelines/build/binary.steps.yaml b/.pipelines/build/binary.steps.yaml new file mode 100644 index 0000000000..4dce0c1132 --- /dev/null +++ b/.pipelines/build/binary.steps.yaml @@ -0,0 +1,72 @@ +parameters: +- name: target + type: string + +- name: os + type: string + +- name: arch + type: string + + +steps: +- task: GoTool@0 + inputs: + version: '$(GOVERSION)' + +- bash: | + # Ubuntu + if [[ -f /etc/debian_version ]];then + sudo apt-get update -y + if [[ $GOARCH =~ amd64 ]]; then + sudo apt-get install -y llvm clang linux-libc-dev linux-headers-generic libbpf-dev libc6-dev nftables iproute2 gcc-multilib tree + for dir in /usr/include/x86_64-linux-gnu/*; do + sudo ln -sfn "$dir" /usr/include/$(basename "$dir") + done + + elif [[ $GOARCH =~ arm64 ]]; then + sudo apt-get install -y llvm clang linux-libc-dev linux-headers-generic libbpf-dev libc6-dev nftables iproute2 gcc-aarch64-linux-gnu tree + for dir in /usr/include/aarch64-linux-gnu/*; do + sudo ln -sfn "$dir" /usr/include/$(basename "$dir") + done + fi + # Mariner + else + sudo tdnf install -y llvm clang libbpf-devel nftables tree + for dir in /usr/include/aarch64-linux-gnu/*; do + if [[ -d $dir ]]; then + sudo ln -sfn "$dir" /usr/include/$(basename "$dir") + elif [[ -f "$dir" ]]; then + sudo ln -Tsfn "$dir" /usr/include/$(basename "$dir") + fi + done + fi + displayName: "Install Binary Pre-Reqs" + workingDirectory: $(ACN_DIR) + continueOnError: true + env: + GOOS: ${{ parameters.os }} + GOARCH: ${{ parameters.arch }} + +- bash: | + make "$TARGET" + displayName: "Build Binary - ${{ parameters.target }}" + workingDirectory: $(ACN_DIR) + env: + REPO_ROOT: $(ACN_DIR) + TARGET: ${{ parameters.target }} + GOOS: ${{ parameters.os }} + GOARCH: ${{ parameters.arch }} + +- script: | + SOURCE_DIR="./output" + TARGET_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY"/out + tree "$SOURCE_DIR" + + mkdir -p "$TARGET_DIR" + find "$SOURCE_DIR" -name '*.tgz*' -print -exec mv -t "$TARGET_DIR"/ {} + + find "$SOURCE_DIR" -name '*.zip' -print -exec mv -t "$TARGET_DIR"/ {} + + + tree "$TARGET_DIR" + displayName: "Prepare Artifacts" + workingDirectory: $(ACN_DIR) diff --git a/.pipelines/build/dockerfiles/azure-ip-masq-merger.Dockerfile b/.pipelines/build/dockerfiles/azure-ip-masq-merger.Dockerfile new file mode 100644 index 0000000000..733231cefe --- /dev/null +++ b/.pipelines/build/dockerfiles/azure-ip-masq-merger.Dockerfile @@ -0,0 +1,7 @@ +ARG ARCH + +FROM scratch AS linux +ARG ARTIFACT_DIR + +COPY ${ARTIFACT_DIR}/bin/azure-ip-masq-merger /azure-ip-masq-merger +ENTRYPOINT ["/azure-ip-masq-merger"] diff --git a/.pipelines/build/dockerfiles/azure-ipam.Dockerfile b/.pipelines/build/dockerfiles/azure-ipam.Dockerfile new file mode 100644 index 0000000000..dd4d32a4f2 --- /dev/null +++ b/.pipelines/build/dockerfiles/azure-ipam.Dockerfile @@ -0,0 +1,16 @@ +ARG ARCH + + +# skopeo inspect docker://mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 --format "{{.Name}}@{{.Digest}}" +FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b as windows +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz.exe /dropgz.exe +ENTRYPOINT [ "/dropgz.exe" ] + + +FROM scratch AS linux +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz /dropgz +ENTRYPOINT [ "/dropgz" ] diff --git a/.pipelines/build/dockerfiles/azure-iptables-monitor.Dockerfile b/.pipelines/build/dockerfiles/azure-iptables-monitor.Dockerfile new file mode 100644 index 0000000000..0fe8fd4c1c --- /dev/null +++ b/.pipelines/build/dockerfiles/azure-iptables-monitor.Dockerfile @@ -0,0 +1,18 @@ +ARG ARCH + +# mcr.microsoft.com/azurelinux/base/core:3.0 +FROM mcr.microsoft.com/azurelinux/base/core@sha256:9948138108a3d69f1dae62104599ac03132225c3b7a5ac57b85a214629c8567d AS mariner-core + +# mcr.microsoft.com/azurelinux/distroless/minimal:3.0 +FROM mcr.microsoft.com/azurelinux/distroless/minimal@sha256:0801b80a0927309572b9adc99bd1813bc680473175f6e8175cd4124d95dbd50c AS mariner-distroless + +FROM mariner-core AS iptables +RUN tdnf install -y iptables + +FROM mariner-distroless AS linux +ARG ARTIFACT_DIR +COPY --from=iptables /usr/sbin/*tables* /usr/sbin/ +COPY --from=iptables /usr/lib /usr/lib +COPY ${ARTIFACT_DIR}/bin/azure-iptables-monitor /azure-iptables-monitor + +ENTRYPOINT ["/azure-iptables-monitor"] diff --git a/.pipelines/build/dockerfiles/cni.Dockerfile b/.pipelines/build/dockerfiles/cni.Dockerfile new file mode 100644 index 0000000000..c4d38741b8 --- /dev/null +++ b/.pipelines/build/dockerfiles/cni.Dockerfile @@ -0,0 +1,17 @@ +# !! AUTOGENERATED - DO NOT EDIT !! +# SOURCE: .pipelines/build/dockerfiles/cni.Dockerfile.tmpl +ARG ARCH + +# mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 +FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b AS windows +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz.exe /dropgz.exe +ENTRYPOINT [ "/dropgz.exe" ] + + +FROM scratch AS linux +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz /dropgz +ENTRYPOINT [ "/dropgz" ] diff --git a/.pipelines/build/dockerfiles/cni.Dockerfile.tmpl b/.pipelines/build/dockerfiles/cni.Dockerfile.tmpl new file mode 100644 index 0000000000..004ddb191c --- /dev/null +++ b/.pipelines/build/dockerfiles/cni.Dockerfile.tmpl @@ -0,0 +1,17 @@ +# {{.RENDER_MSG}} +# SOURCE: {{.SRC_PIPE}} +ARG ARCH + +# {{.WIN_HPC_IMG}} +FROM --platform=windows/${ARCH} {{.WIN_HPC_PIN}} AS windows +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz.exe /dropgz.exe +ENTRYPOINT [ "/dropgz.exe" ] + + +FROM scratch AS linux +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/dropgz /dropgz +ENTRYPOINT [ "/dropgz" ] diff --git a/.pipelines/build/dockerfiles/cns.Dockerfile b/.pipelines/build/dockerfiles/cns.Dockerfile new file mode 100644 index 0000000000..d6540d2bc6 --- /dev/null +++ b/.pipelines/build/dockerfiles/cns.Dockerfile @@ -0,0 +1,25 @@ +# !! AUTOGENERATED - DO NOT EDIT !! +# SOURCE: .pipelines/build/dockerfiles/cns.Dockerfile.tmpl +ARG ARCH + +# mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 +FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b AS windows +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/azure-cns.exe /azure-cns.exe +ENTRYPOINT ["azure-cns.exe"] +EXPOSE 10090 + +# mcr.microsoft.com/azurelinux/base/core:3.0 +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:c09a4e011a092a45b5c46ac5633253eb1e1106df028912b89cbe225d9061ef0b AS build-helper +RUN tdnf install -y iptables + +# mcr.microsoft.com/azurelinux/distroless/minimal:3.0 +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/distroless/minimal@sha256:c37100f358ee19e62c60673c54fb43b83d43b2c305846e44b23b2e032e9caf30 AS linux +ARG ARTIFACT_DIR . + +COPY --from=build-helper /usr/sbin/*tables* /usr/sbin/ +COPY --from=build-helper /usr/lib /usr/lib +COPY ${ARTIFACT_DIR}/bin/azure-cns /usr/local/bin/azure-cns +ENTRYPOINT [ "/usr/local/bin/azure-cns" ] +EXPOSE 10090 diff --git a/.pipelines/build/dockerfiles/cns.Dockerfile.tmpl b/.pipelines/build/dockerfiles/cns.Dockerfile.tmpl new file mode 100644 index 0000000000..fbd09997dc --- /dev/null +++ b/.pipelines/build/dockerfiles/cns.Dockerfile.tmpl @@ -0,0 +1,25 @@ +# {{.RENDER_MSG}} +# SOURCE: {{.SRC_PIPE}} +ARG ARCH + +# {{.WIN_HPC_IMG}} +FROM --platform=windows/${ARCH} {{.WIN_HPC_PIN}} AS windows +ARG ARTIFACT_DIR . + +COPY ${ARTIFACT_DIR}/bin/azure-cns.exe /azure-cns.exe +ENTRYPOINT ["azure-cns.exe"] +EXPOSE 10090 + +# {{.MARINER_CORE_IMG}} +FROM --platform=linux/${ARCH} {{.MARINER_CORE_PIN}} AS build-helper +RUN tdnf install -y iptables + +# {{.MARINER_DISTROLESS_IMG}} +FROM --platform=linux/${ARCH} {{.MARINER_DISTROLESS_PIN}} AS linux +ARG ARTIFACT_DIR . + +COPY --from=build-helper /usr/sbin/*tables* /usr/sbin/ +COPY --from=build-helper /usr/lib /usr/lib +COPY ${ARTIFACT_DIR}/bin/azure-cns /usr/local/bin/azure-cns +ENTRYPOINT [ "/usr/local/bin/azure-cns" ] +EXPOSE 10090 diff --git a/.pipelines/build/dockerfiles/ipv6-hp-bpf.Dockerfile b/.pipelines/build/dockerfiles/ipv6-hp-bpf.Dockerfile new file mode 100644 index 0000000000..045244f870 --- /dev/null +++ b/.pipelines/build/dockerfiles/ipv6-hp-bpf.Dockerfile @@ -0,0 +1,10 @@ +ARG ARCH + + +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/distroless/minimal:3.0 AS linux +ARG ARTIFACT_DIR +COPY ${ARTIFACT_DIR}/lib/* /lib +COPY ${ARTIFACT_DIR}/bin/ipv6-hp-bpf /ipv6-hp-bpf +COPY ${ARTIFACT_DIR}/bin/nft /usr/sbin/nft +COPY ${ARTIFACT_DIR}/bin/ip /sbin/ip +CMD ["/ipv6-hp-bpf"] diff --git a/.pipelines/build/dockerfiles/npm.Dockerfile b/.pipelines/build/dockerfiles/npm.Dockerfile new file mode 100644 index 0000000000..381d8862cb --- /dev/null +++ b/.pipelines/build/dockerfiles/npm.Dockerfile @@ -0,0 +1,29 @@ +ARG ARCH + + +# intermediate for win-ltsc2022 +FROM --platform=windows/${ARCH} mcr.microsoft.com/windows/servercore@sha256:45952938708fbde6ec0b5b94de68bcdec3f8c838be018536b1e9e5bd95e6b943 as windows +ARG ARTIFACT_DIR + +COPY ${ARTIFACT_DIR}/files/kubeconfigtemplate.yaml kubeconfigtemplate.yaml +COPY ${ARTIFACT_DIR}/scripts/setkubeconfigpath.ps1 setkubeconfigpath.ps1 +COPY ${ARTIFACT_DIR}/scripts/setkubeconfigpath-capz.ps1 setkubeconfigpath-capz.ps1 +COPY ${ARTIFACT_DIR}/bin/azure-npm.exe npm.exe + +CMD ["npm.exe", "start" "--kubeconfig=.\\kubeconfig"] + + +FROM --platform=linux/${ARCH} mcr.microsoft.com/mirror/docker/library/ubuntu:24.04 as linux +ARG ARTIFACT_DIR + +RUN apt-get update && apt-get install -y iptables ipset ca-certificates && apt-get autoremove -y && apt-get clean +#RUN apt-get update && \ +# apt-get install -y \ +# linux-libc-dev \ +# libc6-dev \ +# libtasn1-6 \ +# gnutls30 iptables ipset ca-certificates +#RUN apt-get autoremove -y && apt-get clean + +COPY ${ARTIFACT_DIR}/bin/azure-npm /usr/bin/azure-npm +ENTRYPOINT ["/usr/bin/azure-npm", "start"] diff --git a/.pipelines/build/generate-manifest.steps.yaml b/.pipelines/build/generate-manifest.steps.yaml new file mode 100644 index 0000000000..7065eed0bc --- /dev/null +++ b/.pipelines/build/generate-manifest.steps.yaml @@ -0,0 +1,23 @@ +parameters: +- name: platforms + type: object + default: [] + + +steps: +- bash: | + set -e + MANIFEST_DATA=$(echo "$IMAGE_PLATFORM_DATA" | \ + jq -r '.[] | + .args = [ (.platform | split("/")[0]), (.platform | split("/")[1]) ] | + .args = [ ("--os " + .args[0] ), ("--arch " + .args[1] ) ] | + if .osVersion then .args += ["--os-version " + .osVersion] else . end | + { image: .imageReference, annotate: (.args | join(" ")) }' | \ + jq -rcs) + echo >&2 "##vso[task.setvariable variable=MANIFEST_JSON;isOutput=true]$MANIFEST_DATA" + echo "$MANIFEST_DATA" | jq -r . + displayName: "Populate Image Manifest Data" + name: data + env: + IMAGE_PLATFORM_DATA: '${{ convertToJson(parameters.platforms) }}' + diff --git a/.pipelines/build/image.steps.yaml b/.pipelines/build/image.steps.yaml new file mode 100644 index 0000000000..e8403a6241 --- /dev/null +++ b/.pipelines/build/image.steps.yaml @@ -0,0 +1,85 @@ +parameters: +- name: arch + type: string + default: "" + +- name: name + type: string + default: "" + +- name: os + type: string + default: "" + +- name: build_tag + type: string + default: "" + +- name: archive_file + type: string + default: '$(name)-$(os)-$(platform)-$(Tag)' + +- name: source + type: string + default: drop_setup_env_source + +- name: extra_args + type: string + default: '' + + +- name: default_args + type: object + default: + - "--target $(os) " + - "--platform $(os)/$(arch) " + - "--provenance false " + +- name: common_build_args + type: object + default: + - "PLATFORM=$(os)/$(arch) " + - "ARCH=$(arch) " + - "OS=$(os) " + - "VERSION=$(Tag) " + + +steps: +- task: DownloadPipelineArtifact@2 + inputs: + targetPath: $(Build.SourcesDirectory)/dst/artifacts + artifact: ${{ parameters.source }} + +- task: onebranch.pipeline.containercontrol@1 + displayName: "Login to ACR" + inputs: + command: login + endpoint: $(ACR_ARM_SERVICE_CONNECTION) + +# Build and push the Docker image +- task: onebranch.pipeline.imagebuildinfo@1 + displayName: Image Build + retryCountOnTaskFailure: 3 + timeoutInMinutes: 30 + inputs: + endpoint: $(ACR_ARM_SERVICE_CONNECTION) + registry: $(ACR).azurecr.io + repositoryName: $(os)-$(arch)/${{ parameters.name }} + os: '${{ parameters.os }}' + buildkit: 1 + dockerFileRelPath: artifacts/Dockerfile + enable_network: true + enable_pull: true + build_tag: ${{ parameters.build_tag }} + enable_acr_push: true + saveImageToPath: images/$(os)-$(arch)/${{ parameters.archive_file }}.tar.gz + enabled_cache: false + #compress: true + #saveMetadataToPath: images/$(os)-$(arch)/metadata/${{ parameters.archive_file }}-metadata.json + #enable_isolated_acr_push: true + + # Docker Build Arguments + ${{ if parameters.common_build_args }}: + arguments: --build-arg ${{ join('--build-arg ', parameters.common_build_args) }} ${{ parameters.extra_args }} ${{ join(' ', parameters.default_args) }} + ${{ else }}: + arguments: ${{ parameters.extra_args }} ${{ join(' ', parameters.default_args) }} diff --git a/.pipelines/build/images.jobs.yaml b/.pipelines/build/images.jobs.yaml new file mode 100644 index 0000000000..9d107b0b03 --- /dev/null +++ b/.pipelines/build/images.jobs.yaml @@ -0,0 +1,147 @@ +parameters: +- name: images + type: jobList + + +jobs: +- ${{ each job_data in parameters.images }}: + - job: pkg_${{ job_data.job }} + displayName: "Prepare Image Package - ${{ job_data.displayName }} -" + ${{ if job_data.strategy }}: + strategy: ${{ job_data.strategy }} + ${{ if job_data.dependsOn }}: + dependsOn: ${{ job_data.dependsOn }} + pool: + type: linux + ${{ if eq(job_data.job, 'linux_arm64') }}: + hostArchitecture: arm64 + + variables: + ob_artifactSuffix: _$(name) + ob_git_checkout: false + # keep these variables concerned with instrumentation. + GEN_DIR: $(Build.SourcesDirectory)/temp + REPO_ROOT: $(Build.SourcesDirectory)/${{ job_data.templateContext.repositoryArtifact }} + OUT_DIR: $(Build.ArtifactStagingDirectory) + DROPGZ_VERSION: v0.0.12 + DEBUG: $[ coalesce(variables['System.Debug'], 'False') ] + ob_outputDirectory: $(Build.ArtifactStagingDirectory) + ${{ if eq(job_data.job, 'linux_amd64') }}: + DEBIAN_FRONTEND: noninteractive + LinuxContainerImage: 'onebranch.azurecr.io/linux/ubuntu-2404:latest' + #mcr.microsoft.com/mirror/docker/library/ubuntu:24.04' + #LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + OS: linux + ARCH: amd64 + ${{ elseif eq(job_data.job, 'windows_amd64') }}: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ob_enable_qemu: true + OS: windows + ARCH: amd64 + ${{ elseif eq(job_data.job, 'linux_arm64') }}: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + OS: linux + ARCH: arm64 + GOARCH: arm64 + steps: + - task: DownloadPipelineArtifact@2 + inputs: + targetPath: $(REPO_ROOT) + artifact: '${{ job_data.templateContext.repositoryArtifact }}' + + - task: GoTool@0 + inputs: + version: '$(GOVERSION)' + + - task: ShellScript@2 + inputs: + scriptPath: $(REPO_ROOT)/${{ job_data.templateContext.buildScript }} + + - script: | + ls -la "$SOURCE" + cp "$SOURCE" "$DEST" + ls -la "$DEST" + env: + SOURCE: $(REPO_ROOT)/${{ job_data.templateContext.obDockerfile }} + DEST: $(OUT_DIR)/Dockerfile + + - task: onebranch.pipeline.signing@1 + inputs: + command: 'sign' + signing_profile: 'external_distribution' + files_to_sign: '**/*' + search_root: $(OUT_DIR) + + + - task: ShellScript@2 + displayName: "Package with DropGZ" + condition: and( + succeeded(), + eq(variables.packageWithDropGZ, 'True')) + inputs: + scriptPath: $(REPO_ROOT)/.pipelines/build/scripts/dropgz.sh + + - ${{ if not(contains(job_data.job, 'linux')) }}: + - task: onebranch.pipeline.signing@1 + condition: and( + succeeded(), + eq(variables.packageWithDropGZ, 'True')) + inputs: + command: 'sign' + signing_profile: 'external_distribution' + files_to_sign: '**/dropgz*' + search_root: $(OUT_DIR) + + # OneBranch artifacts are stored on a Windows machine which obliterates + # Linux file permissions. + # This task is added (along with ob_extract_root_artifact in jobs that + # download the artifact) to protect those file permissions from changing + # during image build time. + # + # See: https://eng.ms/docs/products/onebranch/build/containerbasedworkflow/dockerimagesandacr/preservefilepermissionsfordockerbuild + - script: | + tar cvf "$OUT_DIR"/root_artifact.tar --exclude=root_artifact.tar "$OUT_DIR" + displayName: "Zip to Preserve Linux File Permissions" + + + - job: images_${{ job_data.job }} + displayName: "Build Images - ${{ job_data.displayName }} -" + dependsOn: + - pkg_${{ job_data.job }} + strategy: ${{ job_data.strategy }} + pool: + os: linux + type: docker + ${{ if eq(job_data.job, 'linux_arm64') }}: + hostArchitecture: arm64 +# ${{ else }}: +# LinuxHostVersion: 'AzLinux3.0AMD64' + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory) + ob_artifactSuffix: _$(name) + ob_git_checkout: false + ob_extract_root_artifact: true + ${{ if eq(job_data.job, 'linux_amd64') }}: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ARCH: amd64 + OS: linux + ${{ elseif eq(job_data.job, 'windows_amd64') }}: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ARCH: amd64 + OS: windows + ${{ elseif eq(job_data.job, 'linux_arm64') }}: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ARCH: arm64 + OS: linux + GOARCH: arm64 + + steps: + - template: image.steps.yaml + parameters: + arch: $(ARCH) + os: $(OS) + name: $(name) + build_tag: $(imageTag) + extra_args: $(extraArgs) --build-arg ARTIFACT_DIR="/__w/1/a" + archive_file: $(archiveName)-$(OS)-$(ARCH)-$(archiveVersion) + source: drop_build_pkg_${{ job_data.job }}_$(name) diff --git a/.pipelines/build/manifests.jobs.yaml b/.pipelines/build/manifests.jobs.yaml new file mode 100644 index 0000000000..bcecbeb2ae --- /dev/null +++ b/.pipelines/build/manifests.jobs.yaml @@ -0,0 +1,66 @@ +parameters: +- name: generate + type: jobList + + +jobs: +- ${{ each job_data in parameters.generate }}: + - job: ${{ job_data.job }}_generate_manifest + displayName: "Generate Image Manifest - ${{ job_data.job }}" + pool: + type: linux + variables: + ob_outputDirectory: $(Build.SourcesDirectory)/out + ob_git_checkout: false + steps: + - template: /.pipelines/build/generate-manifest.steps.yaml + parameters: + platforms: ${{ job_data.templateContext.platforms }} + + - job: ${{ job_data.job }}_publish_manifest + displayName: "Publish Image Manifest - ${{ job_data.job }}" + dependsOn: + - ${{ job_data.job }}_generate_manifest + pool: + type: docker + os: linux + variables: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ob_outputDirectory: $(Build.SourcesDirectory)/out + ob_git_checkout: false + + MANIFEST_JSON: $[ dependencies.${{ job_data.job }}_generate_manifest.outputs['data.MANIFEST_JSON'] ] + steps: + - template: /.pipelines/build/publish-manifest.steps.yaml + parameters: + image_repository: ${{ job_data.templateContext.name }} + image_tag: ${{ job_data.templateContext.image_tag }} + manifest_data: $(MANIFEST_JSON) + + - job: ${{ job_data.job }}_package_manifest + displayName: "Package Image Manifest - ${{ job_data.job }}" + dependsOn: + - ${{ job_data.job }}_publish_manifest + pool: + type: linux + variables: + LinuxContainerImage: 'mcr.microsoft.com/onebranch/azurelinux/build:3.0' + ob_outputDirectory: $(Build.SourcesDirectory)/out + ob_git_checkout: false + steps: + # Leverage new feature from https://github.com/oras-project/oras/issues/1366 in the future + # :v1 has no impact, but is required for oras + # Copy manifest from ACR + - bash: | + oras copy $(ACR).azurecr.io/$(IMAGE_REPO_PATH)/${{ job_data.templateContext.name }}:${{ job_data.templateContext.image_tag }} \ + --to-oci-layout ./${{ job_data.templateContext.image_tag }}_artifact:v1 + name: oras_copy + displayName: "Oras Copy" + + # Generate tarball + - bash: | + TARGET_DIR=$(ob_outputDirectory) + mkdir -p "$TARGET_DIR" + tar -cvzf ${TARGET_DIR}/${{ job_data.templateContext.image_tag }}_artifact.tar.gz ./${{ job_data.templateContext.image_tag }}_artifact + name: tarball + displayName: "Generate tarball" diff --git a/.pipelines/build/ob-prepare.steps.yaml b/.pipelines/build/ob-prepare.steps.yaml new file mode 100644 index 0000000000..863f92d246 --- /dev/null +++ b/.pipelines/build/ob-prepare.steps.yaml @@ -0,0 +1,99 @@ +steps: +- template: utils/rename-dockerfile-references.steps.yaml + parameters: + topic: "Linux - ipv6-hp-bpf" + replace_references: true + source_path: bpf-prog/ipv6-hp-bpf + target_path: bpf-prog/ipv6-hp-bpf + source_dockerfile: linux.Dockerfile + +# - template: utils/rename-dockerfile-references.steps.yaml +# parameters: +# topic: "Windows - npm" +# replace_references: true +# working_directory: $(ACN_DIR) +# source_path: npm +# target_path: npm-windows +# source_dockerfile: windows.Dockerfile + +# - template: utils/rename-dockerfile-references.steps.yaml +# parameters: +# topic: "Linux - npm" +# replace_references: true +# working_directory: $(ACN_DIR) +# source_path: npm +# target_path: npm +# source_dockerfile: linux.Dockerfile + +- bash: | + rm -rf .hooks .github + displayName: "Remove Unnecessary Dirs from Source" + workingDirectory: $(Build.SourcesDirectory)/azure-container-networking + +- task: CopyFiles@2 + displayName: "Add Repo to Container Artifacts" + inputs: + sourceFolder: $(Build.SourcesDirectory)/azure-container-networking + targetFolder: $(Build.ArtifactStagingDirectory) + +- script: | + STORAGE_ID=$(echo "${BUILD_BUILDNUMBER//./-}") + echo "##vso[task.setvariable variable=StorageID;isOutput=true]$STORAGE_ID" + echo "StorageID: $STORAGE_ID" + + COMMITID=$(git rev-parse --short HEAD) + COMMITID="$COMMITID"-"$(date "+%d%H%M")" + echo "##vso[task.setvariable variable=commitID;isOutput=true]$COMMITID" + echo "commitID: $COMMITID" + + TAG=$(make version) + echo "##vso[task.setvariable variable=Tag;isOutput=true]$TAG" + echo "Tag: $TAG" + + IMAGEREPOPATH="artifact/dd590928-4e04-48cb-9d3d-ee06c5f0e17f/$BUILD_TYPE" + echo "##vso[task.setvariable variable=imageRepositoryPath;isOutput=true]$IMAGEREPOPATH" + echo "imageRepositoryPath: $IMAGEREPOPATH" + + AZUREIPAMVERSION=$(make azure-ipam-version) + echo "##vso[task.setvariable variable=azureIpamVersion;isOutput=true]$AZUREIPAMVERSION" + echo "azureIpamVersion: $AZUREIPAMVERSION" + + AZUREIPMASQMERGERVERSION=$(make azure-ip-masq-merger-version) + echo "##vso[task.setvariable variable=azureIpMasqMergerVersion;isOutput=true]$AZUREIPMASQMERGERVERSION" + echo "azureIpMasqMergerVersion: $AZUREIPMASQMERGERVERSION" + + AZUREIPTABLESMONITORVERSION=$(make azure-iptables-monitor-version) + echo "##vso[task.setvariable variable=azureIptablesMonitorVersion;isOutput=true]$AZUREIPTABLESMONITORVERSION" + echo "azureIptablesMonitorVersion: $AZUREIPTABLESMONITORVERSION" + + CNIVERSION=$(make cni-version) + echo "##vso[task.setvariable variable=cniVersion;isOutput=true]$CNIVERSION" + echo "cniVersion: $CNIVERSION" + + CNSVERSION=$(make cns-version) + echo "##vso[task.setvariable variable=cnsVersion;isOutput=true]$CNSVERSION" + echo "cnsVersion: $CNSVERSION" + + IPV6HPBPFVERSION=$(make ipv6-hp-bpf-version) + echo "##vso[task.setvariable variable=ipv6HpBpfVersion;isOutput=true]$IPV6HPBPFVERSION" + echo "ipv6HpBpfVersion: $IPV6HPBPFVERSION" + + NPMVERSION=$(make npm-version) + echo "##vso[task.setvariable variable=npmVersion;isOutput=true]$NPMVERSION" + echo "npmVersion: $NPMVERSION" + + cat /etc/os-release + uname -a + sudo chown -R $(whoami):$(whoami) . + go version + go env + which go + echo $PATH + echo "------" + echo $(Build.QueuedBy) + echo $(Build.Reason) # manual, PR, IndividualCI + echo $(Build.SourceBranch) + name: "EnvironmentalVariables" + displayName: "Set environmental variables" + condition: always() + workingDirectory: $(ACN_DIR) diff --git a/.pipelines/build/publish-manifest.steps.yaml b/.pipelines/build/publish-manifest.steps.yaml new file mode 100644 index 0000000000..cf074ec194 --- /dev/null +++ b/.pipelines/build/publish-manifest.steps.yaml @@ -0,0 +1,25 @@ +parameters: +- name: image_repository + type: string + +- name: image_tag + type: string + +- name: manifest_data + type: string + + +steps: +- task: onebranch.pipeline.containercontrol@1 + displayName: "Login to ACR" + inputs: + command: login + endpoint: $(ACR_ARM_SERVICE_CONNECTION) + +- task: onebranch.pipeline.imagebuildinfo@1 + inputs: + repositoryName: ${{ parameters.image_repository }} + registry: $(ACR).azurecr.io + build_tag: ${{ parameters.image_tag }} + manifest_push: true + manifest: ${{ parameters.manifest_data }} diff --git a/.pipelines/build/scripts/azure-ip-masq-merger.sh b/.pipelines/build/scripts/azure-ip-masq-merger.sh new file mode 100644 index 0000000000..03c2c4efbd --- /dev/null +++ b/.pipelines/build/scripts/azure-ip-masq-merger.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && { echo "azure-ip-masq-merger is not supported on Windows"; exit 1; } +FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/files + +pushd "$REPO_ROOT"/azure-ip-masq-merger + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-ip-masq-merger"$FILE_EXT" \ + -ldflags "-X github.com/Azure/azure-container-networking/azure-ip-masq-merger/internal/buildinfo.Version=$AZURE_IP_MASQ_MERGER_VERSION -X main.version=$AZURE_IP_MASQ_MERGER_VERSION" \ + -gcflags="-dwarflocationlists=true" \ + . +popd diff --git a/.pipelines/build/scripts/azure-ipam.sh b/.pipelines/build/scripts/azure-ipam.sh new file mode 100644 index 0000000000..6bf2dfd96d --- /dev/null +++ b/.pipelines/build/scripts/azure-ipam.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/files + +pushd "$REPO_ROOT"/azure-ipam + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-ipam"$FILE_EXT" \ + -ldflags "-X github.com/Azure/azure-container-networking/azure-ipam/internal/buildinfo.Version="$AZURE_IPAM_VERSION" -X main.version="$AZURE_IPAM_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + . + + cp *.conflist "$OUT_DIR"/files/ +popd diff --git a/.pipelines/build/scripts/azure-iptables-monitor.sh b/.pipelines/build/scripts/azure-iptables-monitor.sh new file mode 100644 index 0000000000..5ef9daacb8 --- /dev/null +++ b/.pipelines/build/scripts/azure-iptables-monitor.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && { echo "azure-iptables-monitor is not supported on Windows"; exit 1; } +FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/files + +pushd "$REPO_ROOT"/azure-iptables-monitor + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-iptables-monitor"$FILE_EXT" \ + -ldflags "-s -w -X github.com/Azure/azure-container-networking/azure-iptables-monitor/internal/buildinfo.Version=$AZURE_IPTABLES_MONITOR_VERSION -X main.version=$AZURE_IPTABLES_MONITOR_VERSION" \ + -gcflags="-dwarflocationlists=true" \ + . +popd diff --git a/.pipelines/build/scripts/cni.sh b/.pipelines/build/scripts/cni.sh new file mode 100644 index 0000000000..8d9c210e46 --- /dev/null +++ b/.pipelines/build/scripts/cni.sh @@ -0,0 +1,65 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +mkdir -p "$OUT_DIR"/files +mkdir -p "$OUT_DIR"/bin + +export CGO_ENABLED=0 + + +CNI_NET_DIR="$REPO_ROOT"/cni/network/plugin +pushd "$CNI_NET_DIR" + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-vnet"$FILE_EXT" \ + -ldflags "-X main.version="$CNI_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + ./main.go +popd + +STATELESS_CNI_BUILD_DIR="$REPO_ROOT"/cni/network/stateless +pushd "$STATELESS_CNI_BUILD_DIR" + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-vnet-stateless"$FILE_EXT" \ + -ldflags "-X main.version="$CNI_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + ./main.go +popd + +CNI_IPAM_DIR="$REPO_ROOT"/cni/ipam/plugin +pushd "$CNI_IPAM_DIR" + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-vnet-ipam"$FILE_EXT" \ + -ldflags "-X main.version="$CNI_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + ./main.go +popd + +CNI_IPAMV6_DIR="$REPO_ROOT"/cni/ipam/pluginv6 +pushd "$CNI_IPAMV6_DIR" + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-vnet-ipamv6"$FILE_EXT" \ + -ldflags "-X main.version="$CNI_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + ./main.go +popd + +CNI_TELEMETRY_DIR="$REPO_ROOT"/cni/telemetry/service +pushd "$CNI_TELEMETRY_DIR" + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/azure-vnet-telemetry"$FILE_EXT" \ + -ldflags "-X main.version="$CNI_VERSION" -X "$CNI_AI_PATH"="$CNI_AI_ID"" \ + -gcflags="-dwarflocationlists=true" \ + ./telemetrymain.go +popd + +pushd "$REPO_ROOT"/cni + cp azure-$OS.conflist "$OUT_DIR"/files/azure.conflist + cp azure-$OS-swift.conflist "$OUT_DIR"/files/azure-swift.conflist + cp azure-linux-multitenancy-transparent-vlan.conflist "$OUT_DIR"/files/azure-multitenancy-transparent-vlan.conflist + cp azure-$OS-swift-overlay.conflist "$OUT_DIR"/files/azure-swift-overlay.conflist + cp azure-$OS-swift-overlay-dualstack.conflist "$OUT_DIR"/files/azure-swift-overlay-dualstack.conflist + cp azure-$OS-multitenancy.conflist "$OUT_DIR"/files/multitenancy.conflist + cp "$REPO_ROOT"/telemetry/azure-vnet-telemetry.config "$OUT_DIR"/files/azure-vnet-telemetry.config +popd diff --git a/.pipelines/build/scripts/cns.sh b/.pipelines/build/scripts/cns.sh new file mode 100644 index 0000000000..fbb4c4a221 --- /dev/null +++ b/.pipelines/build/scripts/cns.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$OUT_DIR"/files +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/scripts + +pushd "$REPO_ROOT"/cns + GOOS="$OS" go build -v -a \ + -o "$OUT_DIR"/bin/azure-cns"$FILE_EXT" \ + -ldflags "-X main.version="$CNS_VERSION" -X "$CNS_AI_PATH"="$CNS_AI_ID"" \ + -gcflags="-dwarflocationlists=true" \ + service/*.go + cp kubeconfigtemplate.yaml "$OUT_DIR"/files/kubeconfigtemplate.yaml + cp configuration/cns_config.json "$OUT_DIR"/files/cns_config.json + cp ../npm/examples/windows/setkubeconfigpath.ps1 "$OUT_DIR"/scripts/setkubeconfigpath.ps1 +popd diff --git a/.pipelines/build/scripts/dropgz.sh b/.pipelines/build/scripts/dropgz.sh new file mode 100644 index 0000000000..711a0bbfc0 --- /dev/null +++ b/.pipelines/build/scripts/dropgz.sh @@ -0,0 +1,64 @@ +#!/bin/bash +set -eux + +function _remove_exe_extension() { + local file_path + file_path="${1}" + file_dir=$(dirname "$file_path") + file_dir=$(realpath "$file_dir") + file_basename=$(basename "$file_path" '.exe') + mv "$file_path" "$file_dir"/"$file_basename" +} +function files::remove_exe_extensions() { + local target_dir + target_dir="${1}" + + for file in $(find "$target_dir" -type f -name '*.exe'); do + _remove_exe_extension "$file" + done +} + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$GEN_DIR" +mkdir -p "$OUT_DIR"/bin + +DROPGZ_BUILD_DIR=$(mktemp -d -p "$GEN_DIR") +PAYLOAD_DIR=$(mktemp -d -p "$GEN_DIR") +DROPGZ_VERSION="${DROPGZ_VERSION:-v0.0.12}" +DROPGZ_MOD_DOWNLOAD_PATH=""$ACN_PACKAGE_PATH"/dropgz@"$DROPGZ_VERSION"" +DROPGZ_MOD_DOWNLOAD_PATH=$(echo "$DROPGZ_MOD_DOWNLOAD_PATH" | tr '[:upper:]' '[:lower:]') + +mkdir -p "$DROPGZ_BUILD_DIR" + +echo >&2 "##[section]Construct DropGZ Embedded Payload" +pushd "$PAYLOAD_DIR" + [[ -d "$OUT_DIR"/files ]] && cp "$OUT_DIR"/files/* . || true + [[ -d "$OUT_DIR"/scripts ]] && cp "$OUT_DIR"/scripts/* . || true + [[ -d "$OUT_DIR"/bin ]] && cp "$OUT_DIR"/bin/* . || true + + [[ $OS =~ windows ]] && files::remove_exe_extensions . + + sha256sum * > sum.txt + gzip --verbose --best --recursive . + + for file in $(find . -name '*.gz'); do + mv "$file" "${file%%.gz}" + done +popd + +echo >&2 "##[section]Download DropGZ ($DROPGZ_VERSION)" +GOPATH="$DROPGZ_BUILD_DIR" \ + go mod download "$DROPGZ_MOD_DOWNLOAD_PATH" + +echo >&2 "##[section]Build DropGZ with Embedded Payload" +pushd "$DROPGZ_BUILD_DIR"/pkg/mod/"$DROPGZ_MOD_DOWNLOAD_PATH" + mv "$PAYLOAD_DIR"/* pkg/embed/fs/ + GOOS="$OS" go build -v -trimpath -a \ + -o "$OUT_DIR"/bin/dropgz"$FILE_EXT" \ + -ldflags "-X github.com/Azure/azure-container-networking/dropgz/internal/buildinfo.Version="$DROPGZ_VERSION"" \ + -gcflags="-dwarflocationlists=true" \ + main.go +popd diff --git a/.pipelines/build/scripts/ipv6-hp-bpf.sh b/.pipelines/build/scripts/ipv6-hp-bpf.sh new file mode 100644 index 0000000000..121ad88399 --- /dev/null +++ b/.pipelines/build/scripts/ipv6-hp-bpf.sh @@ -0,0 +1,164 @@ +#!/bin/bash +set -eux + +function findcp::shared_library() { + local filename + filename="${1}" + local copy_to + copy_to="${2}" + local search_dirs + search_dirs="${@:3}" + + for dir in $search_dirs; do + if [[ -d "$dir" ]]; then + if [[ "$filename" =~ ^.*\.so.*$ ]]; then + found=$(find "$dir" -name "$filename") + else + found=$(find "$dir" -name ""$filename".so*") + fi + + if [[ -n $found ]]; then + break; + fi + else + echo >&2 "##[debug]Not a directory. Skipping..." + echo >&2 "##[debug]Dir: "$dir"" + fi + done + + echo -e >&2 "##[debug]Found: \n$found" + select=$(echo "$found" | head -n1) + + echo -e >&2 "##[debug]Selected: \n$select" + echo >&2 "##[debug]cp "$select" "$copy_to"" + cp "$select" "$copy_to" +} + + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +export CGO_ENABLED=0 +export C_INCLUDE_PATH=/usr/include/bpf + +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/lib + +# Package up Needed C Files +if [[ -f /etc/debian_version ]];then + apt-get update -y + apt-get install -y --no-install-recommends llvm clang linux-libc-dev linux-headers-generic libbpf-dev libc6-dev nftables iproute2 + if [[ $ARCH =~ amd64 ]]; then + apt-get install -y --no-install-recommends gcc-multilib + + ARCH=x86_64-linux-gnu + cp /usr/lib/"$ARCH"/ld-linux-x86-64.so.2 "$OUT_DIR"/lib/ + + elif [[ $ARCH =~ arm64 ]]; then + apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu + + ARCH=aarch64-linux-gnu + cp /usr/lib/"$ARCH"/ld-linux-aarch64.so.1 "$OUT_DIR"/lib/ + fi + + for dir in /usr/include/"$ARCH"/*; do + ln -sfn "$dir" /usr/include/$(basename "$dir") + done + + echo >&2 "##[group]lib $ARCH directory list" + ls -la /lib/"$ARCH" || true + echo >&2 "##[endgroup]" + echo >&2 "##[group]usr lib directory list" + ls -la /usr/lib || true + echo >&2 "##[endgroup]" + echo >&2 "##[group]usr lib $ARCH directory list" + ls -la /usr/lib/"$ARCH" || true + echo >&2 "##[endgroup]" + + # Copy Shared Library Files + ln -sfn /usr/include/"$ARCH"/asm /usr/include/asm + cp /lib/"$ARCH"/libnftables.so.1 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libedit.so.2 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libc.so.6 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libmnl.so.0 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libnftnl.so.11 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libxtables.so.12 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libjansson.so.4 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libgmp.so.10 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libtinfo.so.6 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libbsd.so.0 "$OUT_DIR"/lib/ + cp /lib/"$ARCH"/libmd.so.0 "$OUT_DIR"/lib/ + + +# Mariner +else + tdnf install -y llvm clang libbpf-devel nftables gcc binutils iproute glibc + if [[ $ARCH =~ amd64 ]]; then + ARCH=x86_64-linux-gnu + if [[ -f '/usr/lib/ld-linux-x86-64.so.2' ]]; then + cp /usr/lib/ld-linux-x86-64.so.2 "$OUT_DIR"/lib/ + fi + elif [[ $ARCH =~ arm64 ]]; then + ARCH=aarch64-linux-gnu + #tdnf install -y glibc-devel.i386 + if [[ -f '/usr/lib/ld-linux-aarch64.so.1' ]]; then + cp /usr/lib/ld-linux-aarch64.so.1 "$OUT_DIR"/lib/ + fi + fi + for dir in /usr/include/"$ARCH"/*; do + if [[ -d $dir ]]; then + ln -sfn "$dir" /usr/include/$(basename "$dir") + elif [[ -f "$dir" ]]; then + ln -Tsfn "$dir" /usr/include/$(basename "$dir") + fi + done + + echo >&2 "##[group]usr include $ARCH directory list" + ls -la /usr/include/"$ARCH" || true + echo >&2 "##[endgroup]" + + echo >&2 "##[group]usr lib directory list" + ls -la /usr/lib || true + echo >&2 "##[endgroup]" + + echo >&2 "##[group]usr lib ldscripts directory list" + ls -la /usr/lib/ldscripts || true + echo >&2 "##[endgroup]" + + # Copy Shared Library Files + ln -sfn /usr/include/"$ARCH"/asm /usr/include/asm + cp /usr/lib/libnftables.so.1 "$OUT_DIR"/lib/ + cp /usr/lib/libedit.so.0 "$OUT_DIR"/lib/ + cp /usr/lib/libc.so.6 "$OUT_DIR"/lib/ + cp /usr/lib/libmnl.so.0 "$OUT_DIR"/lib/ + cp /usr/lib/libnftnl.so.11 "$OUT_DIR"/lib/ + cp /usr/lib/libxtables.so.12 "$OUT_DIR"/lib/ + cp /usr/lib/libjansson.so.4 "$OUT_DIR"/lib/ + cp /usr/lib/libgmp.so.10 "$OUT_DIR"/lib/ + cp /usr/lib/libtinfo.so.6 "$OUT_DIR"/lib/ + + cp /usr/lib/libbsd.so.0 "$OUT_DIR"/lib/ || tdnf install -y libbsd-devel + findcp::shared_library libbsd.so "$OUT_DIR"/lib/ /usr/lib /lib /lib32 /lib64 + cp /usr/lib/libmd.so.0 "$OUT_DIR"/lib/ || tdnf install -y libmd-devel + findcp::shared_library libmd.so "$OUT_DIR"/lib/ /usr/lib /lib /lib32 /lib64 +fi + + +# Add Needed Binararies +cp /usr/sbin/nft "$OUT_DIR"/bin/nft"$FILE_EXT" +cp /sbin/ip "$OUT_DIR"/bin/ip"$FILE_EXT" + + +# Build IPv6 HP BPF +pushd "$REPO_ROOT"/bpf-prog/ipv6-hp-bpf + cp ./cmd/ipv6-hp-bpf/*.go . + + if [[ "$DEBUG" =~ ^[T|t]rue$ ]]; then + echo -e "\n#define DEBUG" >> ./include/helper.h + fi + + go generate ./... + GOOS="$OS" go build -v -a -trimpath \ + -o "$OUT_DIR"/bin/ipv6-hp-bpf"$FILE_EXT" \ + -ldflags "-X main.version="$IPV6_HP_BPF_VERSION"" \ + -gcflags="-dwarflocationlists=true" . +popd diff --git a/.pipelines/build/scripts/npm.sh b/.pipelines/build/scripts/npm.sh new file mode 100644 index 0000000000..29e274b9f2 --- /dev/null +++ b/.pipelines/build/scripts/npm.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -eux + +[[ $OS =~ windows ]] && FILE_EXT='.exe' || FILE_EXT='' + +export CGO_ENABLED=0 + +mkdir -p "$OUT_DIR"/files +mkdir -p "$OUT_DIR"/bin +mkdir -p "$OUT_DIR"/scripts + +pushd "$REPO_ROOT"/npm + GOOS="$OS" go build -a -v -trimpath \ + -o "$OUT_DIR"/bin/azure-npm"$FILE_EXT" \ + -ldflags "-X main.version="$NPM_VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" \ + -gcflags="-dwarflocationlists=true" \ + ./cmd/*.go + + cp ./examples/windows/kubeconfigtemplate.yaml "$OUT_DIR"/files/kubeconfigtemplate.yaml + cp ./examples/windows/setkubeconfigpath.ps1 "$OUT_DIR"/scripts/setkubeconfigpath.ps1 + cp ./examples/windows/setkubeconfigpath-capz.ps1 "$OUT_DIR"/scripts/setkubeconfigpath-capz.ps1 +popd diff --git a/.pipelines/build/utils/rename-dockerfile-references.steps.yaml b/.pipelines/build/utils/rename-dockerfile-references.steps.yaml new file mode 100644 index 0000000000..0e9060a642 --- /dev/null +++ b/.pipelines/build/utils/rename-dockerfile-references.steps.yaml @@ -0,0 +1,171 @@ +################################################################################ +#################### Rename Dockerfile References Template ##################### +################################################################################ +# +# Description: +# OneBranch requires that dockerfiles used in their builds are named +# 'Dockerfile'. This fascilitates the creation of this template in order to +# easily create new folders for each operating system required for build. +# +# This template also has the ability to conduct simple naming replacements for +# the updated Dockerfile name within a specified directory. +# +# For improved clarity, all other top-level dockerfiles are removed from the +# target - or replacement - directory. +# +# Parameters: +# source_path (string) - The path (absolute or relative to $working_directory) +# to the desired source folder. If the folder does not exist, a new one will +# be created. Any matching files will be overwritten. +# +# source_dockerfile (string) - The file name of the source dockerfile. +# Filename only. +# +# target_path (string) - The path (absolute or relative to $working_directory) +# to the desired target folder. If the folder does not exist, a new one will +# be created. Any matching files will be overwritten. +# +# target_dockerfile (string|default) - The new file name of the dockerfile. +# Filename only. Will likely never need to be used. +# +# working_directory (string|default) - The directory to perform the +# operations. +# +# replace_references (boolean|default) - If this is set to true, this module +# will rename references to the moved Dockerfile to the new +# $target_dockerfile. This is a dumb sed replace; set with care. +# +# replace_path (string|default) - The directory for which to search for +# references to the old dockerfile name. Replace will default to $target_path +# if not provided. +# +# dockerfile_cleanup (boolean|default) - If this is set to true, this module +# will remove any dockerfiles that are unrelated to the $target_dockerfile. +# +# topic (string|default) - Appends a provided topic string to the display +# name. Need not be unique. Defaults to empty string. +# +# Outputs: None +# +################################################################################ +parameters: +- name: source_path + type: string + +- name: source_dockerfile + type: string + +- name: target_path + type: string + +- name: target_dockerfile + type: string + default: 'Dockerfile' + +- name: working_directory + type: string + default: '$(Build.SourcesDirectory)' + +- name: replace_references + type: boolean + default: false + +- name: replace_path + type: string + default: '' + +- name: dockerfile_cleanup + type: boolean + default: true + +- name: topic + type: string + default: '' + + +steps: + +- bash: | + set -e; # - Not set here because '-d' will cause non-zero exit break. + [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ ^[T|t]rue$ ]] && set -x + + if ! [[ -d "$TARGET_REL_PATH" ]]; then + echo >&2 "##[info]Target path ('$TARGET_REL_PATH') does not exist, copying source contents to new folder." + mkdir -p "$TARGET_REL_PATH" + cp -r "$SOURCE_REL_PATH"/* "$TARGET_REL_PATH" + else + echo >&2 "##[info]Target path ('$TARGET_REL_PATH') exists. Nothing to do." + fi + ls -la "$TARGET_REL_PATH" + ls -la "$SOURCE_REL_PATH" + workingDirectory: $(ACR_DIR) + env: + SOURCE_REL_PATH: ${{ parameters.source_path }} + TARGET_REL_PATH: ${{ parameters.target_path }} + ${{ if parameters.topic }}: + displayName: "Create OneBranch Docker Workspace" + ${{ else }}: + displayName: "Create OneBranch Docker Workspace - ${{ parameters.topic }}" + + +- bash: | + set -e; [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ ^[T|t]rue$ ]] && set -x + + echo >&2 "##[info]Renaming source dockerfile ('$SOURCE_DOCKERFILE') to desired name ('$TARGET_DOCKERFILE')." + mv "$SOURCE_REL_PATH"/"$SOURCE_DOCKERFILE" "$TARGET_REL_PATH"/"$TARGET_DOCKERFILE" + workingDirectory: $(ACR_DIR) + env: + SOURCE_REL_PATH: ${{ parameters.source_path }} + TARGET_REL_PATH: ${{ parameters.target_path }} + SOURCE_DOCKERFILE: ${{ parameters.source_dockerfile }} + TARGET_DOCKERFILE: ${{ parameters.target_dockerfile }} + ${{ if parameters.topic }}: + displayName: "Move Dockerfile to OneBranch Approved Naming Format - ${{ parameters.topic }}" + ${{ else }}: + displayName: "Move Dockerfile to OneBranch Approved Naming Format" + + +- bash: | + set -e; [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ ^[T|t]rue$ ]] && set -x + + count=$(grep -rl "$SOURCE_DOCKERFILE" "$REPLACE_PATH" | wc -l) + if (( $count > 0 )); then + FILE_REFERENCES=( $(grep -rl "$SOURCE_DOCKERFILE" "$REPLACE_PATH") ) + echo >&2 "##[debug]Found '$count' files with references to '$SOURCE_DOCKERFILE'." + printf "%s\n" "${FILE_REFERENCES[@]}" | sed "s/^/$(printf '%*s' 4)/" >&2 + + echo >&2 "##[info]Replacing references to old source dockerfile ('$SOURCE_DOCKERFILE') to the new target name ('$TARGET_DOCKERFILE')." + sed -i "s|$SOURCE_DOCKERFILE|$TARGET_DOCKERFILE|g" "${FILE_REFERENCES[@]}" + else + echo >&2 "##[info]No other dockerfiles found." + fi + workingDirectory: $(ACR_DIR) + condition: and(succeeded(), '${{ parameters.replace_references }}') + env: + REPLACE_PATH: ${{ coalesce(parameters.replace_path, parameters.target_path) }} + SOURCE_REL_PATH: ${{ parameters.source_path }} + TARGET_REL_PATH: ${{ parameters.target_path }} + SOURCE_DOCKERFILE: ${{ parameters.source_dockerfile }} + TARGET_DOCKERFILE: ${{ parameters.target_dockerfile }} + ${{ if parameters.topic }}: + displayName: "Replace References to Updated Dockerfile - ${{ parameters.topic }}" + ${{ else }}: + displayName: "Replace References to Updated Dockerfile" + + +- bash: | + set -e; [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ ^[T|t]rue$ ]] && set -x + + echo >&2 "##[info]Deleting top-level dockerfiles that are -not- the target ('$TARGET_DOCKERFILE')." + find "$TARGET_REL_PATH" -maxdepth 1 -type f -name '*Dockerfile' ! -wholename "$TARGET_REL_PATH"/"$TARGET_DOCKERFILE" -delete + workingDirectory: $(ACR_DIR) + condition: and(succeeded(), '${{ parameters.dockerfile_cleanup }}') + env: + SOURCE_REL_PATH: ${{ parameters.source_path }} + TARGET_REL_PATH: ${{ parameters.target_path }} + SOURCE_DOCKERFILE: ${{ parameters.source_dockerfile }} + TARGET_DOCKERFILE: ${{ parameters.target_dockerfile }} + ${{ if parameters.topic }}: + displayName: "Remove Extra Dockerfile References - ${{ parameters.topic }}" + ${{ else }}: + displayName: "Remove Extra Dockerfile References" diff --git a/.pipelines/cni/ado-automation/var-pipeline.yaml b/.pipelines/cni/ado-automation/var-pipeline.yaml new file mode 100644 index 0000000000..7c9546ef9b --- /dev/null +++ b/.pipelines/cni/ado-automation/var-pipeline.yaml @@ -0,0 +1,142 @@ +pr: none +trigger: none + +variables: +- group: ACN-CNI-Pipeline + +parameters: +- name: versions + displayName: K8s Versions + type: object + default: + - ver: '27' + LTS: true + - ver: '28' + LTS: true + - ver: '29' + LTS: true + - ver: '30' + LTS: false + - ver: '31' + LTS: false + - ver: '32' + LTS: false + # - ver: '33' + # LTS: 'false' + +stages: + - stage: setup + displayName: Variable Group Init + jobs: + - job: env + displayName: Setup + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - script: | + # To use the variables below, you must make the respective stage's dependsOn have - setup or it will not retain context of this stage + + echo "##vso[task.setvariable variable=commitID;isOutput=true]$(echo $(make revision)-$(date "+%d%H%M"))" + name: "EnvironmentalVariables" + displayName: "Set environmental variables" + condition: always() + - job: vgroup + displayName: View Variable Group + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + az pipelines variable-group show --id $(CNI_VAR_GROUP) --org $(System.TeamFoundationCollectionUri) --project $(System.TeamProject) --debug + + displayName: "Configure Defaults and List" + condition: always() + env: + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + + - bash: | + az pipelines variable-group variable update --id $(CNI_VAR_GROUP) --org $(System.TeamFoundationCollectionUri) --project $(System.TeamProject) --name CILIUM_IMAGE_REGISTRY --value "Changed it was" + displayName: "Change Variable" + env: + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + + - bash: | + az pipelines variable-group show --id $(CNI_VAR_GROUP) --org $(System.TeamFoundationCollectionUri) --project $(System.TeamProject) --debug + displayName: "List Variable Group" + env: + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + + + - stage: cluster + displayName: "Cluster Create" + dependsOn: + - setup + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - ${{ each version in parameters.versions }}: + - job: cluster${{ version.ver }} + displayName: 1.${{ version.ver }} Cluster Creation + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + strategy: + matrix: + cniv1: + clusterType: cniv1-up + clusterName: cniv1 + cilium: + clusterType: overlay-cilium-up + clusterName: cilium + overlay: + clusterType: overlay-up + clusterName: overlay + steps: + - template: ../../templates/create-cluster-steps.yaml + parameters: + clusterType: $(clusterType) + clusterName: $(clusterName)-${{ version.ver }}-$(commitID) + k8sVersion: 1.${{ version.ver }} + vmSize: Standard_B2ms + vmSizeWin: Standard_B2ms + os: windows + region: $(REGION_AKS_CLUSTER_TEST) + LTS: ${{ version.LTS }} + + + - stage: clusterDelete + displayName: "Cluster Delete" + condition: always() + dependsOn: + - setup + - cluster + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - ${{ each version in parameters.versions }}: + - job: cluster${{ version.ver }} + displayName: 1.${{ version.ver }} Cluster Delete + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + strategy: + matrix: + cniv1: + clusterName: cniv1 + cilium: + clusterName: cilium + overlay: + clusterName: overlay + steps: + - template: ../../templates/delete-cluster.yaml + parameters: + name: $(clusterName)-${{ version.ver }}-$(commitID) + clusterName: $(clusterName)-${{ version.ver }}-$(commitID) + region: $(REGION_AKS_CLUSTER_TEST) + sub: $(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) + svcConn: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + + diff --git a/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml b/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml index 38b7d98791..1f808d74e7 100644 --- a/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml +++ b/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml @@ -11,37 +11,39 @@ parameters: hubbleEnabled: false dualstackVersion: "" cni: "cilium" + upgradeScenario: "" # Condition confirms that: # Previous job has reported Succeeded. Previous job is currently setup which controls variable assignment and we are dependent on its success. # CONTROL_CNI either contains 'cniv1' or 'all'. It is not case sensitive stages: - - stage: create_${{ parameters.name }} - condition: and( succeeded(), and( or( contains(variables.CONTROL_CNI, 'cilium') , contains(variables.CONTROL_CNI, 'all') ), or( contains(variables.CONTROL_OS, 'linux'), contains(variables.CONTROL_OS, 'all') ) ) ) - variables: - ${{ if contains(parameters.clusterName, 'rdma') }}: - location: $(LOCATION_RDMA) - ${{ elseif eq(parameters.arch, 'arm64') }}: - location: $(LOCATION_ARM64) - ${{ else }}: - location: $(LOCATION_AMD64) - commitID: $[ stagedependencies.setup.env.outputs['SetEnvVars.commitID'] ] - dependsOn: - - setup - - build_images - displayName: "Create Cluster - ${{ parameters.clusterName }}" - jobs: - - job: create_aks_cluster_with_${{ parameters.name }} - pool: - name: "$(BUILD_POOL_NAME_DEFAULT)" - steps: - - template: ../load-test-templates/create-cluster-template.yaml - parameters: - clusterType: ${{ parameters.clusterType }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - nodeCount: ${{ parameters.nodeCount }} - vmSize: ${{ parameters.vmSize }} - region: $(location) + - ${{if eq(parameters.upgradeScenario, false)}}: + - stage: create_${{ parameters.name }} + condition: and( succeeded(), and( or( contains(variables.CONTROL_CNI, 'cilium') , contains(variables.CONTROL_CNI, 'all') ), or( contains(variables.CONTROL_OS, 'linux'), contains(variables.CONTROL_OS, 'all') ) ) ) + variables: + ${{ if contains(parameters.clusterName, 'rdma') }}: + location: $(LOCATION_RDMA) + ${{ elseif eq(parameters.arch, 'arm64') }}: + location: $(LOCATION_ARM64) + ${{ else }}: + location: $(LOCATION_AMD64) + commitID: $[ stagedependencies.setup.env.outputs['SetEnvVars.commitID'] ] + dependsOn: + - setup + - build_images + displayName: "Create Cluster - ${{ parameters.clusterName }}" + jobs: + - job: create_aks_cluster_with_${{ parameters.name }} + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - template: ../load-test-templates/create-cluster-template.yaml + parameters: + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + nodeCount: ${{ parameters.nodeCount }} + vmSize: ${{ parameters.vmSize }} + region: $(location) # Conditions for below E2E test scenarios confirm that: # Pipeline has not been canceled and that the previous job has reports anything other than failure(Succeeded, SuccededWithIssues, Skipped). Previous job is declared by dependsOn: @@ -60,7 +62,10 @@ stages: pool: name: "$(BUILD_POOL_NAME_DEFAULT)" dependsOn: - - create_${{ parameters.name }} + - ${{ if eq(parameters.upgradeScenario, false) }}: + - create_${{ parameters.name }} + - ${{ else }}: + - ${{ parameters.dependsOn }} - publish - setup displayName: "Cilium Test - ${{ parameters.name }}" @@ -91,7 +96,7 @@ stages: fi echo "install Cilium ${CILIUM_VERSION_TAG}" - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" echo "deploy Cilium ConfigMap" @@ -108,8 +113,9 @@ stages: if [ ! -z ${{ parameters.dualstackVersion }} ]; then echo "Use dualstack daemonset for Cilium" + export IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - else envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - fi @@ -117,9 +123,6 @@ stages: envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - kubectl get po -owide -A - echo "Deploy Azure-CNS" - sudo -E env "PATH=$PATH" make test-integration AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) INSTALL_CNS=true INSTALL_OVERLAY=true CNS_IMAGE_REPO=$(CNS_IMAGE_REPO) - kubectl get po -owide -A - ${{if eq(parameters.hubbleEnabled, true)}}: - job: deploy_cilium_components displayName: Deploy Cilium with Hubble @@ -142,7 +145,7 @@ stages: echo "install Cilium onto Overlay Cluster with hubble enabled" export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files @@ -151,9 +154,71 @@ stages: envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - kubectl get po -owide -A - echo "Deploy Azure-CNS" - sudo -E env "PATH=$PATH" make test-integration AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) INSTALL_CNS=true INSTALL_OVERLAY=true CNS_IMAGE_REPO=$(CNS_IMAGE_REPO) - kubectl get po -owide -A + - job: deploy_cns_and_ipam + displayName: "Deploy CNS and IPAM" + dependsOn: deploy_cilium_components + steps: + - task: AzureCLI@2 + displayName: "Install CNS and IPAM" + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -ex + az extension add --name aks-preview + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + if [ ${{parameters.upgradeScenario}} = "true" ] + then + echo "Upgrade scenario is true, using upgrade azure ipam and cns version from pipeline variables" + if [ -z "$UPGRADE_AZURE_IPAM_VERSION" ] + then + echo "UPGRADE_AZURE_IPAM_VERSION is not set, using default value" + IPAM=$(make azure-ipam-version) + else + IPAM=$(UPGRADE_AZURE_IPAM_VERSION) + echo "UPGRADE_AZURE_IPAM_VERSION is set to $IPAM" + fi + + if [ -z "$UPGRADE_CNS_VERSION" ] + then + echo "UPGRADE_CNS_VERSION is not set, using default value" + CNS=$(make cns-version) + else + CNS=$(UPGRADE_CNS_VERSION) + echo "UPGRADE_CNS_VERSION is set to $CNS" + fi + IPAM_IMAGE_REPO=$(UPGRADE_IPAM_IMAGE_REPO) + CNS_IMAGE_REPO=$(UPGRADE_CNS_IMAGE_REPO) + echo Deploying with Azure Ipam version $IPAM from $IPAM_IMAGE_REPO and CNS version $CNS from $CNS_IMAGE_REPO + else + if [ -z "$TEST_AZURE_IPAM_VERSION" ] + then + echo "TEST_AZURE_IPAM_VERSION is not set, using default value" + IPAM=$(make azure-ipam-version) + else + IPAM=$(TEST_AZURE_IPAM_VERSION) + echo "TEST_AZURE_IPAM_VERSION is set to $IPAM" + fi + if [ -z "$TEST_CNS_VERSION" ] + then + echo "TEST_CNS_VERSION is not set, using default value" + CNS=$(make cns-version) + else + CNS=$(TEST_CNS_VERSION) + echo "TEST_CNS_VERSION is set to $CNS" + fi + fi + + echo "Deploy Azure-CNS" + sudo -E env "PATH=$PATH" make test-integration AZURE_IPAM_VERSION=${IPAM} CNS_VERSION=${CNS} INSTALL_CNS=true INSTALL_OVERLAY=true CNS_IMAGE_REPO=$(CNS_IMAGE_REPO) IPAM_IMAGE_REPO=$(IPAM_IMAGE_REPO) + kubectl get po -owide -A + kubectl get crd -A - job: deploy_pods condition: and( and( not(canceled()), not(failed()) ), or( contains(variables.CONTROL_SCENARIO, 'scaleTest') , contains(variables.CONTROL_SCENARIO, 'all') ) ) @@ -168,6 +233,8 @@ stages: iterations: ${ITERATIONS_CILIUM} nodeCount: ${{ parameters.nodeCount }} cni: cilium + ${{ if eq(parameters.upgradeScenario, 'true') }}: + logType: upgradeScaleTest - template: ../load-test-templates/validate-state-template.yaml parameters: clusterName: ${{ parameters.clusterName }}-$(commitID) @@ -183,6 +250,8 @@ stages: os: ${{ parameters.os }} cni: cilium region: $(location) + ${{ if eq(parameters.upgradeScenario, 'true') }}: + logType: upgradeRestartNode - template: ../load-test-templates/validate-state-template.yaml parameters: clusterName: ${{ parameters.clusterName }}-$(commitID) @@ -200,6 +269,8 @@ stages: scaleup: ${SCALEUP_CILIUM} nodeCount: ${{ parameters.nodeCount }} cni: ${{ parameters.cni }} + ${{ if eq(parameters.upgradeScenario, 'true') }}: + logType: upgradeRestartCNS - job: cni_tests displayName: "Cilium Test" dependsOn: restart_cns @@ -216,12 +287,7 @@ stages: make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) name: "GetCluster" displayName: "Get AKS Cluster" - - script: | - kubectl delete ns load-test - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' - retryCountOnTaskFailure: 6 - name: "CiliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" + - template: ../../templates/cilium-connectivity-tests.yaml - script: | cd hack/scripts chmod +x async-delete-test.sh @@ -231,6 +297,7 @@ stages: fi name: "testAsyncDelete" displayName: "Verify Async Delete when CNS is down" + - template: ../../templates/cilium-mtu-check.yaml - template: ../k8s-e2e/k8s-e2e-job-template.yaml parameters: sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) @@ -238,10 +305,13 @@ stages: os: ${{ parameters.os }} cni: cilium dependsOn: cni_tests - datapath: true dns: true portforward: true service: true + ${{ if eq(parameters.dualstackVersion, '') }}: + datapath: true + ${{ else }}: + dualstack: true - job: failedE2ELogs displayName: "Failure Logs" dependsOn: diff --git a/.pipelines/cni/cilium/cilium-scale-test.yaml b/.pipelines/cni/cilium/cilium-scale-test.yaml index 5691f858b5..523e71c2eb 100644 --- a/.pipelines/cni/cilium/cilium-scale-test.yaml +++ b/.pipelines/cni/cilium/cilium-scale-test.yaml @@ -2,6 +2,32 @@ pr: none trigger: none stages: + - stage: create_rg_cluster + displayName: "Create RG & Cluster" + jobs: + - job: create_rg_cluster + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(TEST_SUB_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -ex + az extension add --name aks-preview + + subscriptionId="$(az account list --query "[?isDefault].id" --output tsv)" + make -C ./hack/aks AZCLI=az ${CLUSTER_TYPE} CLUSTER=${CLUSTER} GROUP=${RESOURCE_GROUP} REGION=${LOCATION} NODE_COUNT=5 VM_SIZE=${VMSIZE} SUB=${subscriptionId} || { + echo "Failed to create Cluster" + exit 1 + } + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A - stage: update_daemonset_versions displayName: "Update Cilium + CNS Version and Restart Nodes" jobs: @@ -16,24 +42,55 @@ stages: scriptType: "bash" addSpnToEnvironment: true inlineScript: | - az aks get-credentials --resource-group ${CLUSTER} --name ${CLUSTER} + az aks get-credentials --resource-group ${RESOURCE_GROUP} --name ${CLUSTER} echo "Redeploy all cilium components and update cilium version. Redeploy all to catch all changes between versions" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/cilium-config.yaml + pwd + echo "install Cilium ${CILIUM_VERSION_TAG}" - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f - - kubectl apply -f test/integration/manifests/cilium/cilium-agent - kubectl apply -f test/integration/manifests/cilium/cilium-operator - echo "Keep CNS version up to date, grabbing pipeline parameter" - CNS_IMAGE=${CNS_IMAGE} - sed -i '/containers:/{n;n;s/\(image\).*/\1: '"${CNS_IMAGE//\//\\/}"'/}' test/integration/manifests/cns/daemonset.yaml - kubectl apply -f test/integration/manifests/cns/daemonset.yaml + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + + echo "deploy Cilium ConfigMap" + if ${IS_DUALSTACK}; then + echo "Use dualstack configmap for Cilium" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml + else + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml + fi + + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + export CILIUM_VERSION_TAG=${CILIUM_VERSION_TAG} + export CILIUM_IMAGE_REGISTRY=${CILIUM_IMAGE_REGISTRY} + if ${IS_DUALSTACK}; then + echo "Use dualstack daemonset for Cilium" + export IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + else + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + fi + + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + kubectl get po -owide -A + + echo "Deploy Azure-CNS" + sudo -E env "PATH=$PATH" make test-load AZURE_IPAM_VERSION=v$(AZURE_IPAM_VERSION) CNS_ONLY=true CNS_VERSION=v$(CNS_VERSION) INSTALL_CNS=true INSTALL_OVERLAY=true CNS_IMAGE_REPO=$(CNS_IMAGE_REPO) + kubectl get po -owide -A + kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=10m || { + echo "Not all pods ready" + kubectl get po -owide -A + exit 1 + } + kubectl get po -owide -A + + echo "Restart Nodes" for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} done kubectl get node - kubectl get pod -A + kubectl get po -owide -A name: "UpdateCiliumandCNSVersion" displayName: "Update Cilium and CNS Version" - stage: scale_up_cluster @@ -51,11 +108,29 @@ stages: addSpnToEnvironment: true inlineScript: | set -ex - az aks get-credentials --resource-group ${CLUSTER} --name ${CLUSTER} - echo "Scaling up nodes" - az aks nodepool scale --name nodepool1 --cluster-name ${CLUSTER} --resource-group ${CLUSTER} --node-count ${NODE_COUNT_UP} + az aks get-credentials --resource-group ${RESOURCE_GROUP} --name ${CLUSTER} + current_node_count=$(az aks show --resource-group ${RESOURCE_GROUP} --name ${CLUSTER} --query agentPoolProfiles[].count -o tsv) + if [ $current_node_count -lt ${NODE_COUNT_UP} ]; then + increments=$(((${NODE_COUNT_UP} - current_node_count + 199) / 200)) # Calculate how many increments of 200 are needed, rounding up + for ((i=0; i 1.24 + # nodes -> procs + # flakeAttempts -> flake-attempts + # dryRun -> dry-run + + ./ginkgo --nodes=${{ parameters.processes }} \ + ./e2e.test -- \ + --num-nodes=2 \ + --provider=skeleton \ + --ginkgo.focus='${{ parameters.ginkgoFocus }}' \ + --ginkgo.skip="${{ parameters.ginkgoSkip }}$SKIP" \ + --ginkgo.flakeAttempts=${{ parameters.attempts }} \ + --ginkgo.v \ + --node-os-distro=${{ parameters.os }} \ + --kubeconfig=$HOME/.kube/config + + # Untaint Linux nodes once testing is complete + if ${{ lower(eq(parameters.os, 'windows')) }} + then + kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule- + fi + + # Untaint Windows nodes once testing is complete + if ${{ lower(eq(parameters.os, 'linux')) }} + then + kubectl taint nodes -l kubernetes.azure.com/mode=user node-role.kubernetes.io/control-plane:NoSchedule- + fi + name: ${{ parameters.name }} + displayName: k8s E2E - ${{ parameters.testName }} + retryCountOnTaskFailure: 5 diff --git a/.pipelines/cni/load-test-templates/create-cluster-template.yaml b/.pipelines/cni/load-test-templates/create-cluster-template.yaml index 006f2bc32d..9fb8d319ed 100644 --- a/.pipelines/cni/load-test-templates/create-cluster-template.yaml +++ b/.pipelines/cni/load-test-templates/create-cluster-template.yaml @@ -18,12 +18,16 @@ steps: addSpnToEnvironment: true inlineScript: | set -ex + if ! [ -z ${K8S_VERSION} ]; then + echo "Default k8s version, $(make -C ./hack/aks vars | grep K8S | cut -d'=' -f 2), is manually set to ${K8S_VERSION}" + export K8S_VER=${K8S_VERSION} + fi + make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} make -C ./hack/aks ${{ parameters.clusterType }} \ AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \ CLUSTER=${{ parameters.clusterName }} NODE_COUNT=${{ parameters.nodeCount }} NODE_COUNT_WIN=${{ parameters.nodeCountWin }} \ VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ - WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} \ OS_SKU=${{ parameters.osSKU }} OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} echo "Cluster successfully created" diff --git a/.pipelines/cni/load-test-templates/pod-deployment-template.yaml b/.pipelines/cni/load-test-templates/pod-deployment-template.yaml index 002b25ab36..d87c510914 100644 --- a/.pipelines/cni/load-test-templates/pod-deployment-template.yaml +++ b/.pipelines/cni/load-test-templates/pod-deployment-template.yaml @@ -6,6 +6,7 @@ parameters: nodeCount: 10 cni: "" jobName: "deploy_pods" + logType: "scaleTest" steps: - task: AzureCLI@2 @@ -26,7 +27,7 @@ steps: - template: ../../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }} - logType: scaleTest os: ${{ parameters.os }} cni: ${{ parameters.cni }} jobName: ${{ parameters.jobName }} + logType: ${{ parameters.logType }} diff --git a/.pipelines/cni/load-test-templates/restart-cns-template.yaml b/.pipelines/cni/load-test-templates/restart-cns-template.yaml index e0527c408f..f0d02cb1c7 100644 --- a/.pipelines/cni/load-test-templates/restart-cns-template.yaml +++ b/.pipelines/cni/load-test-templates/restart-cns-template.yaml @@ -5,6 +5,7 @@ parameters: nodeCount: 10 os: "" jobName: "restart_cns" + logType: "restartCNS" steps: - task: AzureCLI@2 @@ -38,7 +39,7 @@ steps: - template: ../../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }} - logType: restartCNS + logType: ${{ parameters.logType }} os: ${{ parameters.os }} cni: ${{ parameters.cni }} jobName: ${{ parameters.jobName }} diff --git a/.pipelines/cni/load-test-templates/restart-node-template.yaml b/.pipelines/cni/load-test-templates/restart-node-template.yaml index 2cdd4230f3..f07940a992 100644 --- a/.pipelines/cni/load-test-templates/restart-node-template.yaml +++ b/.pipelines/cni/load-test-templates/restart-node-template.yaml @@ -4,6 +4,7 @@ parameters: cni: "" jobName: "restart_nodes" region: "" + logType: "restartNode" steps: - task: AzureCLI@2 @@ -56,7 +57,7 @@ steps: - template: ../../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }} - logType: restartNode + logType: ${{ parameters.logType }} os: ${{ parameters.os }} cni: ${{ parameters.cni }} jobName: ${{ parameters.jobName }} diff --git a/.pipelines/cni/lsg/lsg-cni-intergration-template.yaml b/.pipelines/cni/lsg/lsg-cni-intergration-template.yaml index 73a399b8cb..a2f75e7190 100644 --- a/.pipelines/cni/lsg/lsg-cni-intergration-template.yaml +++ b/.pipelines/cni/lsg/lsg-cni-intergration-template.yaml @@ -95,7 +95,7 @@ stages: kubectl get po -owide -A echo "install Cilium ${CILIUM_VERSION_TAG}" - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" echo "deploy Cilium ConfigMap" @@ -176,7 +176,7 @@ stages: echo "Delete load-test Namespace" make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}' - delete=`kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}'` + delete=`kubectl get ns --no-headers | grep -v 'kube\|default\|gatekeeper' | awk '{print $1}'` kubectl delete ns $delete kubectl cluster-info kubectl get po -owide -A @@ -187,24 +187,7 @@ stages: dependsOn: recover condition: and( succeeded(), ${{ contains(parameters.cni, 'cilium') }} ) steps: - - script: | - echo "install cilium CLI" - if [[ ${CILIUM_VERSION_TAG} =~ ^1.1[1-3].[0-9]{1,2} ]]; then - echo "Cilium Agent Version ${BASH_REMATCH[0]}" - CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable-v0.14.txt) - else - echo "Cilium Agent Version ${CILIUM_VERSION_TAG}" - CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt) - fi - CLI_ARCH=amd64 - curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} - sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum - sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin - rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} - cilium status - cilium version - name: "InstallCiliumCli" - displayName: "Install Cilium CLI" + - template: ../../templates/cilium-cli.yaml - task: AzureCLI@2 inputs: azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) @@ -216,12 +199,7 @@ stages: make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) name: "GetCluster" displayName: "Get AKS Cluster" - - script: | - kubectl delete ns load-test - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' - retryCountOnTaskFailure: 6 - name: "CiliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" + - template: ../../templates/cilium-connectivity-tests.yaml - ${{ if contains(parameters.cni, 'cilium') }}: - template: ../k8s-e2e/k8s-e2e-job-template.yaml parameters: diff --git a/.pipelines/cni/lsg/pipeline.yaml b/.pipelines/cni/lsg/pipeline.yaml index 2001926583..73418f7b08 100644 --- a/.pipelines/cni/lsg/pipeline.yaml +++ b/.pipelines/cni/lsg/pipeline.yaml @@ -100,3 +100,5 @@ stages: name: $(name) clusterName: $(clusterName)-$(commitID) region: $(LOCATION) + sub: $(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) + svcConn: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) diff --git a/.pipelines/cni/pipeline.yaml b/.pipelines/cni/pipeline.yaml index d961052745..25b8caa62f 100644 --- a/.pipelines/cni/pipeline.yaml +++ b/.pipelines/cni/pipeline.yaml @@ -5,6 +5,10 @@ trigger: - dropgz/* - azure-ipam/* - v* +parameters: + - name: upgradeScenario + type: boolean + default: false stages: - stage: setup @@ -20,11 +24,10 @@ stages: go version echo "##vso[task.setvariable variable=commitID;isOutput=true]$(echo $(make revision)-$(date "+%d%H%M"))" echo "##vso[task.setvariable variable=npmVersion;isOutput=true]$(make npm-version)" - echo "##vso[task.setvariable variable=cnsVersion;isOutput=true]$(CNS_VERSION)" name: "SetEnvVars" displayName: "Set Environment Variables" condition: always() - + - stage: build_images dependsOn: setup displayName: "Build Images" @@ -39,30 +42,26 @@ stages: arch: amd64 name: azure-ipam os: linux - azure_ipam_windows2019_amd64: + azure_ipam_windows_amd64: arch: amd64 name: azure-ipam os: windows - os_version: ltsc2019 - azure_ipam_windows2022_amd64: - arch: amd64 - name: azure-ipam - os: windows - os_version: ltsc2022 cni_linux_amd64: arch: amd64 name: cni os: linux - cni_windows2019_amd64: + cni_windows_amd64: arch: amd64 name: cni os: windows - os_version: ltsc2019 - cni_windows2022_amd64: + cns_linux_amd64: arch: amd64 - name: cni + name: cns + os: linux + cns_windows_amd64: + arch: amd64 + name: cns os: windows - os_version: ltsc2022 ipv6_hp_bpf_linux_amd64: arch: amd64 name: ipv6-hp-bpf @@ -71,47 +70,49 @@ stages: arch: amd64 name: npm os: linux - npm_windows2022_amd64: + npm_windows_amd64: arch: amd64 name: npm os: windows - os_version: ltsc2022 steps: - template: ../containers/container-template.yaml parameters: arch: $(arch) name: $(name) os: $(os) - os_version: $(os_version) - job: containerize_linux_arm64 displayName: Build Images pool: name: "$(BUILD_POOL_NAME_LINUX_ARM64)" strategy: matrix: - azure_ipam_linux_arm64: - arch: arm64 - name: azure-ipam - os: linux - cni_linux_arm64: - arch: arm64 - name: cni - os: linux - ipv6_hp_bpf_linux_arm64: - arch: arm64 - name: ipv6-hp-bpf - os: linux - npm_linux_arm64: - arch: arm64 - name: npm - os: linux + azure_ipam_linux_arm64: + arch: arm64 + name: azure-ipam + os: linux + cni_linux_arm64: + arch: arm64 + name: cni + os: linux + cns_linux_arm64: + arch: arm64 + name: cns + os: linux + ipv6_hp_bpf_linux_arm64: + arch: arm64 + name: ipv6-hp-bpf + os: linux + npm_linux_arm64: + arch: arm64 + name: npm + os: linux steps: - template: ../containers/container-template.yaml parameters: arch: $(arch) name: $(name) os: $(os) - + - stage: binaries displayName: Build Binaries dependsOn: setup @@ -160,14 +161,15 @@ stages: platforms: linux/amd64 linux/arm64 windows/amd64 cni: name: cni - os_versions: ltsc2019 ltsc2022 + platforms: linux/amd64 linux/arm64 windows/amd64 + cns: + name: cns platforms: linux/amd64 linux/arm64 windows/amd64 ipv6-hp-bpf: name: ipv6-hp-bpf platforms: linux/amd64 linux/arm64 npm: name: npm - os_versions: ltsc2022 platforms: linux/amd64 linux/arm64 windows/amd64 steps: - template: ../containers/manifest-template.yaml @@ -180,7 +182,7 @@ stages: - template: singletenancy/cniv1-template.yaml parameters: name: win22_cniv1 - clusterType: windows-cniv1-up + clusterType: cniv1-up clusterName: "win22-cniv1" nodeCount: ${NODE_COUNT_WINCLUSTER_SYSTEMPOOL} nodeCountWin: ${NODE_COUNT_WIN} @@ -225,27 +227,11 @@ stages: scaleup: ${SCALEUP_WIN} iterations: ${ITERATIONS_WIN} - - template: singletenancy/cniv2-template.yaml - parameters: - name: windows19_overlay - clusterType: overlay-byocni-up - clusterName: w19-amd-ov - nodeCount: ${NODE_COUNT_WINCLUSTER_SYSTEMPOOL} - nodeCountWin: ${NODE_COUNT_WIN} - vmSize: ${VM_SIZE_WINCLUSTER_SYSTEMPOOL} - vmSizeWin: ${VM_SIZE_WIN} - arch: amd64 - os: windows - os_version: 'ltsc2019' - osSkuWin: 'Windows2019' - scaleup: ${SCALEUP_WIN} - iterations: ${ITERATIONS_WIN} - ## Linux E2E - template: singletenancy/cniv1-template.yaml parameters: name: linux_cniv1 - clusterType: linux-cniv1-up + clusterType: cniv1-up clusterName: "linux-cniv1" nodeCount: ${NODE_COUNT_LINUX} vmSize: ${VM_SIZE} @@ -320,6 +306,7 @@ stages: vmSize: ${VM_SIZE_CILIUM} arch: amd64 cni: "cilium" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -331,6 +318,7 @@ stages: vmSize: ${VM_SIZE_CILIUM} arch: amd64 cni: "cilium" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -342,6 +330,7 @@ stages: arch: amd64 osSKU: AzureLinux cni: "cilium" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -352,6 +341,7 @@ stages: vmSize: Standard_D8ps_v5 arch: arm64 cni: "cilium" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -362,6 +352,7 @@ stages: vmSize: Standard_HC44-16rs arch: amd64 cni: "cilium" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -373,6 +364,7 @@ stages: arch: amd64 dualstackVersion: ${CILIUM_DUALSTACK_VERSION} cni: "cilium_dualstack" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -384,6 +376,7 @@ stages: arch: arm64 dualstackVersion: ${CILIUM_DUALSTACK_VERSION} cni: "cilium_dualstack" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -396,6 +389,7 @@ stages: osSKU: AzureLinux dualstackVersion: ${CILIUM_DUALSTACK_VERSION} cni: "cilium_dualstack" + upgradeScenario: false - template: cilium/cilium-overlay-load-test-template.yaml parameters: @@ -407,6 +401,125 @@ stages: arch: amd64 dualstackVersion: ${CILIUM_DUALSTACK_VERSION} cni: "cilium_dualstack" + upgradeScenario: false + + + ## If upgradeScenario is set, redeploy new IPAM version to existing clusters and run tests + - ${{if eq(parameters.upgradeScenario, true)}}: + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_upgrade + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cilium-over" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: ${VM_SIZE_CILIUM} + arch: amd64 + cni: "cilium" + upgradeScenario: true + dependsOn: cilium_overlay + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_hubble_upgrade + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cil-over-hub" + hubbleEnabled: true + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: ${VM_SIZE_CILIUM} + arch: amd64 + cni: "cilium" + upgradeScenario: true + dependsOn: cilium_overlay_hubble + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_mariner_upgrade + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cil-over-mar" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: ${VM_SIZE_CILIUM} + arch: amd64 + osSKU: AzureLinux + cni: "cilium" + upgradeScenario: true + dependsOn: cilium_overlay_mariner + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_arm_upgrade + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cil-over-arm" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: Standard_D8ps_v5 + arch: arm64 + cni: "cilium" + upgradeScenario: true + dependsOn: cilium_overlay_arm + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_rdma_upgrade + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cil-over-rdma" + nodeCount: 2 + vmSize: Standard_HC44-16rs + arch: amd64 + cni: "cilium" + upgradeScenario: true + dependsOn: cilium_overlay_rdma + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_overlay_ds_upgrade + clusterType: dualstack-byocni-nokubeproxy-up + clusterName: "cil-ds-ov" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: ${VM_SIZE_CILIUM} + arch: amd64 + dualstackVersion: ${CILIUM_DUALSTACK_VERSION} + cni: "cilium_dualstack" + upgradeScenario: true + dependsOn: cilium_overlay_ds + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_ds_arm_upgrade + clusterType: dualstack-byocni-nokubeproxy-up + clusterName: "cil-ds-arm" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: Standard_D8ps_v5 + arch: arm64 + dualstackVersion: ${CILIUM_DUALSTACK_VERSION} + cni: "cilium_dualstack" + upgradeScenario: true + dependsOn: cilium_ds_arm + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_ds_mariner_upgrade + clusterType: dualstack-byocni-nokubeproxy-up + clusterName: "cil-ds-mar" + nodeCount: ${NODE_COUNT_CILIUM} + vmSize: ${VM_SIZE_CILIUM} + arch: amd64 + osSKU: AzureLinux + dualstackVersion: ${CILIUM_DUALSTACK_VERSION} + cni: "cilium_dualstack" + upgradeScenario: true + dependsOn: cilium_ds_mariner + + - template: cilium/cilium-overlay-load-test-template.yaml + parameters: + name: cilium_ds_rdma_upgrade + clusterType: dualstack-byocni-nokubeproxy-up + clusterName: "cil-ds-rdma" + nodeCount: 2 + vmSize: Standard_HC44-16rs + arch: amd64 + dualstackVersion: ${CILIUM_DUALSTACK_VERSION} + cni: "cilium_dualstack" + upgradeScenario: true + dependsOn: cilium_ds_rdma - stage: delete_resources displayName: "Delete Resources" @@ -432,8 +545,17 @@ stages: - rdma_linux_overlay - windows_podsubnet_HNS - windows_overlay_HNS - - windows19_overlay_HNS - setup + - ${{if eq(parameters.upgradeScenario, true)}}: + - cilium_overlay_upgrade + - cilium_overlay_hubble_upgrade + - cilium_overlay_mariner_upgrade + - cilium_overlay_arm_upgrade + - cilium_overlay_rdma_upgrade + - cilium_overlay_ds_upgrade + - cilium_ds_arm_upgrade + - cilium_ds_mariner_upgrade + - cilium_ds_rdma_upgrade variables: commitID: $[ stagedependencies.setup.env.outputs['SetEnvVars.commitID'] ] jobs: @@ -497,9 +619,6 @@ stages: win-cniv2-overlay: name: windows_overlay clusterName: w22-over - windows19_overlay: - name: windows19_overlay - clusterName: w19-amd-ov steps: - task: AzureCLI@2 inputs: diff --git a/.pipelines/cni/singletenancy/cniv1-template.yaml b/.pipelines/cni/singletenancy/cniv1-template.yaml index c89394bccb..0001402dbc 100644 --- a/.pipelines/cni/singletenancy/cniv1-template.yaml +++ b/.pipelines/cni/singletenancy/cniv1-template.yaml @@ -57,6 +57,7 @@ stages: vmSizeWin: ${{ parameters.vmSizeWin }} region: $(location) osSKU: ${{ parameters.osSKU }} + os: ${{ parameters.os }} # If ensures that only windows template calls are compared against the below condition # Condition confirms that: @@ -203,7 +204,7 @@ stages: echo "Delete load-test Namespace" make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}' - delete=`kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}'` + delete=`kubectl get ns --no-headers | grep -v 'kube\|default\|gatekeeper' | awk '{print $1}'` kubectl delete ns $delete kubectl cluster-info kubectl get po -owide -A @@ -315,7 +316,7 @@ stages: echo "Delete load-test Namespace" make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}' - delete=`kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}'` + delete=`kubectl get ns --no-headers | grep -v 'kube\|default\|gatekeeper' | awk '{print $1}'` kubectl delete ns $delete kubectl cluster-info kubectl get po -owide -A diff --git a/.pipelines/cni/singletenancy/cniv2-template.yaml b/.pipelines/cni/singletenancy/cniv2-template.yaml index b965359b22..d040e2569b 100644 --- a/.pipelines/cni/singletenancy/cniv2-template.yaml +++ b/.pipelines/cni/singletenancy/cniv2-template.yaml @@ -258,7 +258,7 @@ stages: echo "Delete load-test Namespace" make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}' - delete=`kubectl get ns --no-headers | grep -v 'kube\|default' | awk '{print $1}'` + delete=`kubectl get ns --no-headers | grep -v 'kube\|default\|gatekeeper' | awk '{print $1}'` kubectl delete ns $delete kubectl cluster-info kubectl get po -owide -A diff --git a/.pipelines/containers/container-template.yaml b/.pipelines/containers/container-template.yaml index fcbf88d7f2..aaf54b8e8e 100644 --- a/.pipelines/containers/container-template.yaml +++ b/.pipelines/containers/container-template.yaml @@ -2,7 +2,6 @@ parameters: arch: "" name: "" os: "" - os_version: "" steps: - task: AzureCLI@2 @@ -17,7 +16,7 @@ steps: - script: | set -e if [ ${{ parameters.os }} = 'windows' ]; then export BUILDX_ACTION='--push'; fi - make ${{ parameters.name }}-image OS=${{ parameters.os }} ARCH=${{ parameters.arch }} OS_VERSION=${{ parameters.os_version }} + make ${{ parameters.name }}-image OS=${{ parameters.os }} ARCH=${{ parameters.arch }} name: image_build displayName: Image Build retryCountOnTaskFailure: 3 diff --git a/.pipelines/containers/manifest-template.yaml b/.pipelines/containers/manifest-template.yaml index bb41ea5bb1..3a6b386838 100644 --- a/.pipelines/containers/manifest-template.yaml +++ b/.pipelines/containers/manifest-template.yaml @@ -1,7 +1,6 @@ parameters: name: "" platforms: "" - os_versions: "" steps: @@ -16,7 +15,7 @@ steps: - script: | set -e - make ${{ parameters.name }}-manifest-build PLATFORMS="${{ parameters.platforms }}" OS_VERSIONS="${{ parameters.os_versions }}" + make ${{ parameters.name }}-manifest-build PLATFORMS="${{ parameters.platforms }}" name: manifest_build displayName: Manifest Build retryCountOnTaskFailure: 3 diff --git a/.pipelines/mdnc/azure-cns-cni-1.4.39.1.yaml b/.pipelines/mdnc/azure-cns-cni-1.4.39.1.yaml index 47ae2f3557..749c68a5a6 100644 --- a/.pipelines/mdnc/azure-cns-cni-1.4.39.1.yaml +++ b/.pipelines/mdnc/azure-cns-cni-1.4.39.1.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/.pipelines/mdnc/azure-cns-cni-1.5.28.yaml b/.pipelines/mdnc/azure-cns-cni-1.5.28.yaml index 3db8a46a3c..e6678c47de 100644 --- a/.pipelines/mdnc/azure-cns-cni-1.5.28.yaml +++ b/.pipelines/mdnc/azure-cns-cni-1.5.28.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/.pipelines/mdnc/azure-cns-cni-1.5.4.yaml b/.pipelines/mdnc/azure-cns-cni-1.5.4.yaml index f33fbba69d..6a7729974a 100644 --- a/.pipelines/mdnc/azure-cns-cni-1.5.4.yaml +++ b/.pipelines/mdnc/azure-cns-cni-1.5.4.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/.pipelines/mdnc/azure-cns-cni.yaml b/.pipelines/mdnc/azure-cns-cni.yaml index 469f25c669..f3ecdac9c0 100644 --- a/.pipelines/mdnc/azure-cns-cni.yaml +++ b/.pipelines/mdnc/azure-cns-cni.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/.pipelines/multitenancy/swiftv2-e2e.steps.yaml b/.pipelines/multitenancy/swiftv2-e2e.steps.yaml new file mode 100644 index 0000000000..10a6a29982 --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e.steps.yaml @@ -0,0 +1,78 @@ +parameters: + name: "" + clusterName: "" + continueOnError: true + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + workingDirectory: $(ACN_DIR) + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "Apply the pod network yaml to start the delegation" + less test/integration/manifests/swiftv2/podnetwork.yaml + envsubst '${SUBNET_TOKEN},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f - + echo "Check the podnetwork yaml file" + less test/integration/manifests/swiftv2/podnetwork.yaml + kubectl get pn + kubectl describe pn + echo "Apply the pod network instance yaml to reserve IP" + kubectl apply -f test/integration/manifests/swiftv2/pni.yaml + kubectl get pni + kubectl describe pni + export NODE_NAME_0="$(kubectl get nodes -o json | jq -r .items[0].metadata.name)" + echo $NODE_NAME_0 + echo "Start the first pod using the reserved IP" + envsubst '$NODE_NAME_0' < test/integration/manifests/swiftv2/mtpod0.yaml | kubectl apply -f - + export NODE_NAME_1="$(kubectl get nodes -o json | jq -r .items[1].metadata.name)" + echo $NODE_NAME_1 + echo "Start another pod using the reserved IP" + envsubst '$NODE_NAME_1' < test/integration/manifests/swiftv2/mtpod1.yaml | kubectl apply -f - + sleep 2m + kubectl get pod -o wide -A + sleep 2m + echo "Check pods after 4 minutes" + kubectl get po -owide -A + kubectl describe pni + name: "start_swiftv2_pods" + displayName: "Start Swiftv2 Pods" + continueOnError: ${{ parameters.continueOnError }} + env: + SUBNET_TOKEN: $(SUBNET_TOKEN) + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/swiftv2 + echo "TestSwiftv2PodToPod and will run it after migration from scripts." + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration -v + workingDirectory: $(ACN_DIR) + retryCountOnTaskFailure: 3 + name: "Swiftv2_Tests_future_version" + displayName: "Swiftv2 Tests through code" + continueOnError: ${{ parameters.continueOnError }} + diff --git a/.pipelines/networkobservability/pipeline.yaml b/.pipelines/networkobservability/pipeline.yaml index c42b989210..2f3283b8ee 100644 --- a/.pipelines/networkobservability/pipeline.yaml +++ b/.pipelines/networkobservability/pipeline.yaml @@ -77,7 +77,7 @@ stages: scriptType: "bash" addSpnToEnvironment: true inlineScript: | - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files @@ -113,14 +113,9 @@ stages: displayName: "Wait for all pods to be running" retryCountOnTaskFailure: 3 - - script: | - echo "Run Cilium Connectivity Tests" - cilium status - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' - retryCountOnTaskFailure: 3 - name: "ciliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" - enabled: true + - template: ../templates/cilium-connectivity-tests.yaml + parameters: + skipTests: '!pod-to-pod-encryption,!node-to-node-encryption,!check-log-errors,!to-fqdns' - script: | export DIR=${CILIUM_VERSION_TAG%.*} diff --git a/.pipelines/npm/npm-cni-integration-test.yaml b/.pipelines/npm/npm-cni-integration-test.yaml index f15404bedb..4660ced284 100644 --- a/.pipelines/npm/npm-cni-integration-test.yaml +++ b/.pipelines/npm/npm-cni-integration-test.yaml @@ -15,81 +15,81 @@ jobs: - job: npm_k8se2e displayName: "NPM k8s E2E" dependsOn: ${{ parameters.dependsOn }} - condition: and( and( not(canceled()), not(failed()) ), ${{ or(contains(parameters.os_version, '2022'), eq(parameters.os, 'linux') ) }} , or( contains(variables.CONTROL_SCENARIO, 'npm') , contains(variables.CONTROL_SCENARIO, 'all') ) ) + condition: and( and( not(canceled()), not(failed()) ), ${{ or(contains(parameters.os_version, '2022'), and( eq(parameters.os, 'linux'), not(contains(parameters.clusterName, 'linux-podsub')) ) ) }} , or( contains(variables.CONTROL_SCENARIO, 'npm') , contains(variables.CONTROL_SCENARIO, 'all') ) ) timeoutInMinutes: 180 # This is for testing windows, due to the 3m between the 14 tests -> results in 42m of wasted time pool: name: $(BUILD_POOL_NAME_DEFAULT) demands: - - agent.os -equals Linux - - Role -equals Build + - agent.os -equals Linux + - Role -equals Build steps: - ${{ if eq(parameters.os, 'linux') }}: - - task: AzureCLI@2 - displayName: "Deploy NPM to Test Cluster" - inputs: - azureSubscription: ${{ parameters.sub }} - scriptType: "bash" - scriptLocation: "inlineScript" - inlineScript: | - set -ex + - task: AzureCLI@2 + displayName: "Deploy NPM to Test Cluster" + inputs: + azureSubscription: ${{ parameters.sub }} + scriptType: "bash" + scriptLocation: "inlineScript" + inlineScript: | + set -ex - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} - # deploy azure-npm - kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml - kubectl set image daemonset/azure-npm -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:${{ parameters.tag }} - kubectl rollout status -n kube-system daemonset/azure-npm + # deploy azure-npm + kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml + kubectl set image daemonset/azure-npm -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:${{ parameters.tag }} + kubectl rollout status -n kube-system daemonset/azure-npm - kubectl get po -n kube-system -owide -A + kubectl get po -n kube-system -owide -A - # FQDN=`az aks show -n $CLUSTER_NAME -g $CLUSTER_NAME --query fqdn -o tsv` - FQDN=`az aks show -g ${{ parameters.clusterName }} -n ${{ parameters.clusterName }} --query fqdn -o tsv` - echo $FQDN - echo "##vso[task.setvariable variable=FQDN]$FQDN" + # FQDN=`az aks show -n $CLUSTER_NAME -g $CLUSTER_NAME --query fqdn -o tsv` + FQDN=`az aks show -g ${{ parameters.clusterName }} -n ${{ parameters.clusterName }} --query fqdn -o tsv` + echo $FQDN + echo "##vso[task.setvariable variable=FQDN]$FQDN" - artifact=npm_k8s - echo $artifact/e2e.test - echo "##vso[task.setvariable variable=artifact]$artifact" + artifact=npm_k8s + echo $artifact/e2e.test + echo "##vso[task.setvariable variable=artifact]$artifact" - - download: current - artifact: npm_k8s + - download: current + artifact: npm_k8s - ${{ if eq(parameters.os, 'windows') }}: - - task: AzureCLI@2 - displayName: "Deploy Windows NPM to Test Cluster" - inputs: - azureSubscription: ${{ parameters.sub }} - scriptType: "bash" - scriptLocation: "inlineScript" - inlineScript: | - set -ex - - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} - - # deploy azure-npm-win - # Windows - kubectl apply -f npm/examples/windows/azure-npm.yaml - kubectl set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-${{ parameters.tag }} - kubectl rollout status -n kube-system daemonset/azure-npm-win - - # konnectivity agent tends to fail after rollout. Give it time to recover - sleep 60 - # Taint Linux (system) nodes so windows tests do not run on them - kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule - - kubectl get po -n kube-system -owide -A - - # FQDN=`az aks show -n $CLUSTER_NAME -g $CLUSTER_NAME --query fqdn -o tsv` - FQDN=`az aks show -g ${{ parameters.clusterName }} -n ${{ parameters.clusterName }} --query fqdn -o tsv` - echo $FQDN - echo "##vso[task.setvariable variable=FQDN]$FQDN" - - artifact=npm_k8s_windows - echo $artifact/e2e.test - echo "##vso[task.setvariable variable=artifact]$artifact" - - - download: current - artifact: npm_k8s_windows + - task: AzureCLI@2 + displayName: "Deploy Windows NPM to Test Cluster" + inputs: + azureSubscription: ${{ parameters.sub }} + scriptType: "bash" + scriptLocation: "inlineScript" + inlineScript: | + set -ex + + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + + # deploy azure-npm-win + # Windows + kubectl apply -f npm/examples/windows/azure-npm.yaml + kubectl set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-${{ parameters.tag }} + kubectl rollout status -n kube-system daemonset/azure-npm-win + + # konnectivity agent tends to fail after rollout. Give it time to recover + sleep 60 + # Taint Linux (system) nodes so windows tests do not run on them + kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule + + kubectl get po -n kube-system -owide -A + + # FQDN=`az aks show -n $CLUSTER_NAME -g $CLUSTER_NAME --query fqdn -o tsv` + FQDN=`az aks show -g ${{ parameters.clusterName }} -n ${{ parameters.clusterName }} --query fqdn -o tsv` + echo $FQDN + echo "##vso[task.setvariable variable=FQDN]$FQDN" + + artifact=npm_k8s_windows + echo $artifact/e2e.test + echo "##vso[task.setvariable variable=artifact]$artifact" + + - download: current + artifact: npm_k8s_windows - bash: | set -e @@ -126,12 +126,12 @@ jobs: continueOnError: ${{ parameters.continueOnError }} - ${{ if eq(parameters.os, 'windows') }}: - - bash: | - # Untaint Linux (system) nodes once testing is complete - kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule- + - bash: | + # Untaint Linux (system) nodes once testing is complete + kubectl taint nodes -l kubernetes.azure.com/mode=system node-role.kubernetes.io/control-plane:NoSchedule- - displayName: Untaint Linux Nodes - condition: always() + displayName: Untaint Linux Nodes + condition: always() - bash: | npmLogs=$(System.DefaultWorkingDirectory)/${{ parameters.clusterName }}_npmLogs_Attempt_#$(System.StageAttempt) diff --git a/.pipelines/npm/npm-conformance-tests-latest-release.yaml b/.pipelines/npm/npm-conformance-tests-latest-release.yaml index d1d1b2dcc8..458ac3d12e 100644 --- a/.pipelines/npm/npm-conformance-tests-latest-release.yaml +++ b/.pipelines/npm/npm-conformance-tests-latest-release.yaml @@ -179,7 +179,7 @@ jobs: az aks get-credentials -n $(AZURE_CLUSTER) -g $(RESOURCE_GROUP) --file ./kubeconfig ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/examples/windows/azure-npm.yaml - ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) else echo "Creating Linux Cluster"; @@ -409,7 +409,7 @@ jobs: # ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/examples/windows/azure-npm.yaml # # swap azure-npm image with one built during run - # ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + # ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) # echo "sleeping 3 minutes to allow NPM pods to restart" # sleep 180 diff --git a/.pipelines/npm/npm-conformance-tests.yaml b/.pipelines/npm/npm-conformance-tests.yaml index 1c76e11890..ae68233dc0 100644 --- a/.pipelines/npm/npm-conformance-tests.yaml +++ b/.pipelines/npm/npm-conformance-tests.yaml @@ -74,18 +74,16 @@ stages: arch: amd64 name: npm os: linux - npm_windows2022_amd64: + npm_windows_amd64: arch: amd64 name: npm os: windows - os_version: ltsc2022 steps: - template: ../containers/container-template.yaml parameters: arch: $(arch) name: $(name) os: $(os) - os_version: $(os_version) - stage: Create_Cluster_and_Run_Test displayName: NPM Conformance @@ -110,8 +108,12 @@ stages: IS_STRESS_TEST: "false" v2-linux-stress: AZURE_CLUSTER: "conformance-v2-linux-stress" - PROFILE: "v2-background" + PROFILE: "v2-linux-stress" IS_STRESS_TEST: "true" + v2-place-first: + AZURE_CLUSTER: "conformance-v2-place-first" + PROFILE: "v2-place-first" + IS_STRESS_TEST: "false" pool: name: $(BUILD_POOL_NAME_DEFAULT) demands: @@ -121,6 +123,7 @@ stages: RESOURCE_GROUP: $[ stagedependencies.setup.setup.outputs['EnvironmentalVariables.RESOURCE_GROUP'] ] TAG: $[ stagedependencies.setup.setup.outputs['EnvironmentalVariables.TAG'] ] FQDN: empty + PUBLIC_IP_NAME: $(RESOURCE_GROUP)-$(PROFILE)-public-ip steps: - checkout: self @@ -135,6 +138,23 @@ stages: echo created RG $(RESOURCE_GROUP) in $(LOCATION) az version + - task: AzureCLI@2 + displayName: "Create public IP with a service tag" + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptType: "bash" + scriptLocation: "inlineScript" + inlineScript: | + az network public-ip create \ + --name $(PUBLIC_IP_NAME) \ + --resource-group $(RESOURCE_GROUP) \ + --allocation-method Static \ + --ip-tags 'FirstPartyUsage=/DelegatedNetworkControllerTest' \ + --location $(LOCATION) \ + --sku Standard \ + --tier Regional \ + --version IPv4 + - task: AzureCLI@2 displayName: "Deploy NPM to Test Cluster" inputs: @@ -148,6 +168,9 @@ stages: chmod +x kubectl echo Cluster $(AZURE_CLUSTER) echo Resource $(RESOURCE_GROUP) + echo Public IP $(PUBLIC_IP_NAME) + export PUBLIC_IP_ID=$(az network public-ip show -g $(RESOURCE_GROUP) -n $(PUBLIC_IP_NAME) --query id -o tsv) + echo Public IP ID $PUBLIC_IP_ID if [[ $(AZURE_CLUSTER) == *ws22 ]] # * is used for pattern matching then @@ -164,7 +187,8 @@ stages: --network-plugin azure \ --vm-set-type VirtualMachineScaleSets \ --node-vm-size Standard_D4s_v3 \ - --node-count 1 + --node-count 1 \ + --load-balancer-outbound-ips $PUBLIC_IP_ID if [ $? != 0 ] then @@ -204,14 +228,15 @@ stages: az aks get-credentials -n $(AZURE_CLUSTER) -g $(RESOURCE_GROUP) --file ./kubeconfig ./kubectl --kubeconfig=./kubeconfig apply -f $(Pipeline.Workspace)/s/npm/examples/windows/azure-npm.yaml - ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) else echo "Creating Linux Cluster"; az aks create --no-ssh-key \ --resource-group $(RESOURCE_GROUP) \ --name $(AZURE_CLUSTER) \ - --network-plugin azure + --network-plugin azure \ + --load-balancer-outbound-ips $PUBLIC_IP_ID if [ $? != 0 ] then @@ -253,15 +278,23 @@ stages: set -e make -C ./hack/aks set-kubeconf AZCLI=az GROUP=$(RESOURCE_GROUP) CLUSTER=$(AZURE_CLUSTER) - # sig-release provides test suite tarball(s) per k8s release. Just need to provide k8s version "v1.xx.xx" - # pulling k8s version from AKS. - eval k8sVersion="v"$( az aks show -g $(RESOURCE_GROUP) -n $(AZURE_CLUSTER) --query "currentKubernetesVersion") - echo $k8sVersion - curl -L https://dl.k8s.io/$k8sVersion/kubernetes-test-linux-amd64.tar.gz -o ./kubernetes-test-linux-amd64.tar.gz - - # https://github.com/kubernetes/sig-release/blob/master/release-engineering/artifacts.md#content-of-kubernetes-test-system-archtargz-on-example-of-kubernetes-test-linux-amd64targz-directories-removed-from-list - # explictly unzip and strip directories from ginkgo and e2e.test - tar -xvzf kubernetes-test-linux-amd64.tar.gz --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test + if [ $(PROFILE) == "v2-place-first" ]; then + git clone --depth=1 --branch=huntergregory/service-types https://github.com/huntergregory/network-policy-api.git + cd network-policy-api/cmd/policy-assistant + make policy-assistant + cd ../../.. + mv network-policy-api/cmd/policy-assistant/cmd/policy-assistant/policy-assistant . + else + # sig-release provides test suite tarball(s) per k8s release. Just need to provide k8s version "v1.xx.xx" + # pulling k8s version from AKS. + eval k8sVersion="v"$( az aks show -g $(RESOURCE_GROUP) -n $(AZURE_CLUSTER) --query "currentKubernetesVersion") + echo $k8sVersion + curl -L https://dl.k8s.io/$k8sVersion/kubernetes-test-linux-amd64.tar.gz -o ./kubernetes-test-linux-amd64.tar.gz + + # https://github.com/kubernetes/sig-release/blob/master/release-engineering/artifacts.md#content-of-kubernetes-test-system-archtargz-on-example-of-kubernetes-test-linux-amd64targz-directories-removed-from-list + # explictly unzip and strip directories from ginkgo and e2e.test + tar -xvzf kubernetes-test-linux-amd64.tar.gz --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test + fi displayName: "Setup Environment" @@ -282,8 +315,13 @@ stages: echo $FQDN runConformance () { - KUBERNETES_SERVICE_HOST="$FQDN" KUBERNETES_SERVICE_PORT=443 ./e2e.test --provider=local --ginkgo.focus="NetworkPolicy" --ginkgo.skip="SCTP" --kubeconfig=./kubeconfig - # there can't be a command after e2e.test because the exit code is important + if [ $(PROFILE) == "v2-place-first" ]; then + # 15 minute timeout for creating LoadBalancer with Azure-internal "external IPs" + ./policy-assistant generate --noisy=true --job-timeout-seconds=2 --pod-creation-timeout-seconds 900 --server-protocol TCP,UDP --ignore-loopback --include special-services --exclude cni-brings-source-pod-info-to-other-node + else + KUBERNETES_SERVICE_HOST="$FQDN" KUBERNETES_SERVICE_PORT=443 ./e2e.test --provider=local --ginkgo.focus="NetworkPolicy" --ginkgo.skip="SCTP" --kubeconfig=./kubeconfig + fi + # there can't be a command after e2e.test/policy-assistant because the exit code is important } runConformanceWindows () { @@ -515,7 +553,7 @@ stages: # ./kubectl --kubeconfig=./kubeconfig apply -f $(Pipeline.Workspace)/s/npm/examples/windows/azure-npm.yaml # # swap azure-npm image with one built during run - # ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + # ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) # echo "sleeping and waiting for NPM pods to be ready..." # sleep 1m diff --git a/.pipelines/npm/npm-cyc-win-tests-latest-release.yaml b/.pipelines/npm/npm-cyc-win-tests-latest-release.yaml index 8cf9d5a553..2cf620f6be 100644 --- a/.pipelines/npm/npm-cyc-win-tests-latest-release.yaml +++ b/.pipelines/npm/npm-cyc-win-tests-latest-release.yaml @@ -153,7 +153,7 @@ jobs: ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/examples/windows/azure-npm.yaml # swap azure-npm image with one built during run - ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) echo "sleeping 3 minutes to allow NPM pods to restart" sleep 180 diff --git a/.pipelines/npm/npm-scale-test.yaml b/.pipelines/npm/npm-scale-test.yaml index 7117e4997d..f297d27baf 100644 --- a/.pipelines/npm/npm-scale-test.yaml +++ b/.pipelines/npm/npm-scale-test.yaml @@ -67,18 +67,16 @@ jobs: arch: amd64 name: npm os: linux - npm_windows2022_amd64: + npm_windows_amd64: arch: amd64 name: npm os: windows - os_version: ltsc2022 steps: - template: ../containers/container-template.yaml parameters: arch: $(arch) name: $(name) os: $(os) - os_version: $(os_version) - job: Create_Cluster_and_Run_Test timeoutInMinutes: 360 @@ -141,6 +139,19 @@ jobs: echo "Creating resource group named $(RESOURCE_GROUP)" az group create --name $(RESOURCE_GROUP) -l $(LOCATION) -o table + export PUBLIC_IP_NAME=$(RESOURCE_GROUP)-$(PROFILE)-public-ip + echo "Creating public IP with a service tag named $PUBLIC_IP_NAME" + az network public-ip create \ + --name $PUBLIC_IP_NAME \ + --resource-group $(RESOURCE_GROUP) \ + --allocation-method Static \ + --ip-tags 'FirstPartyUsage=/DelegatedNetworkControllerTest' \ + --location $(LOCATION) \ + --sku Standard \ + --tier Regional \ + --version IPv4 + export PUBLIC_IP_ID=$(az network public-ip show -g $(RESOURCE_GROUP) -n $PUBLIC_IP_NAME --query id -o tsv) + export CLUSTER_NAME=$(RESOURCE_GROUP)-$(PROFILE) echo "Creating cluster named $CLUSTER_NAME" az aks create \ @@ -154,7 +165,8 @@ jobs: --node-vm-size Standard_D4s_v3 \ --node-count 1 \ --tier standard \ - --max-pods 100 + --max-pods 100 \ + --load-balancer-outbound-ips $PUBLIC_IP_ID echo "Getting credentials to $CLUSTER_NAME" az aks get-credentials -g $(RESOURCE_GROUP) -n $CLUSTER_NAME --overwrite-existing --file ./kubeconfig @@ -204,7 +216,7 @@ jobs: # swap azure-npm image with one built during run kubectl set image daemonset/azure-npm -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:linux-amd64-$(TAG) - kubectl set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-ltsc2022-$(TAG) + kubectl set image daemonset/azure-npm-win -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:windows-amd64-$(TAG) sleep 30s echo "waiting for NPM to start running..." diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index bc2935d92f..4ad03945aa 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -15,10 +15,18 @@ trigger: branches: include: - gh-readonly-queue/master/* + - gh-readonly-queue/release/* tags: include: - "*" +schedules: +- cron: "0 2 * * *" # Every day at 2am + displayName: "Official Nightly Pipeline" + branches: + include: ["master"] + always: true + stages: - stage: setup displayName: ACN @@ -49,98 +57,10 @@ stages: name: "EnvironmentalVariables" displayName: "Set environmental variables" condition: always() - - ${{ if contains(variables['Build.SourceBranch'], 'refs/pull') }}: - - stage: test - displayName: Test ACN - dependsOn: - - setup - jobs: - - job: test - displayName: Run Tests - variables: - STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] - pool: - name: "$(BUILD_POOL_NAME_DEFAULT)" - steps: - - script: | - make tools - # run test, echo exit status code to fd 3, pipe output from test to tee, which splits output to stdout and go-junit-report (which converts test output to report.xml), stdout from tee is redirected to fd 4. Take output written to fd 3 (which is the exit code of test), redirect to stdout, pipe to read from stdout then exit with that status code. Read all output from fd 4 (output from tee) and write to top stdout - { { { { - sudo -E env "PATH=$PATH" make test-all; - echo $? >&3; - } | tee >(build/tools/bin/go-junit-report > report.xml) >&4; - } 3>&1; - } | { read xs; exit $xs; } - } 4>&1 - retryCountOnTaskFailure: 3 - name: "Test" - displayName: "Run Tests" - - - stage: test_windows - displayName: Test ACN Windows - dependsOn: - - setup - jobs: - - job: test - displayName: Run Tests - variables: - STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] - pool: - name: "$(BUILD_POOL_NAME_DEFAULT_WINDOWS_ALT)" - steps: - - script: | - cd npm/ - go test ./... - retryCountOnTaskFailure: 3 - name: "TestWindows" - displayName: "Run Windows Tests" - - ${{ else }}: - - stage: test - displayName: Test ACN - dependsOn: - - setup - jobs: - - job: test - displayName: Run Tests - variables: - STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] - pool: - name: "$(BUILD_POOL_NAME_DEFAULT)" - steps: - - script: | - make tools - # run test, echo exit status code to fd 3, pipe output from test to tee, which splits output to stdout and go-junit-report (which converts test output to report.xml), stdout from tee is redirected to fd 4. Take output written to fd 3 (which is the exit code of test), redirect to stdout, pipe to read from stdout then exit with that status code. Read all output from fd 4 (output from tee) and write to top stdout - { { { { - sudo -E env "PATH=$PATH" make test-all; - echo $? >&3; - } | tee >(build/tools/bin/go-junit-report > report.xml) >&4; - } 3>&1; - } | { read xs; exit $xs; } - } 4>&1 - retryCountOnTaskFailure: 3 - name: "Test" - displayName: "Run Tests" - - - stage: test_windows - displayName: Test ACN Windows - dependsOn: - - setup - jobs: - - job: test - displayName: Run Tests - variables: - STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] - pool: - name: "$(BUILD_POOL_NAME_DEFAULT_WINDOWS_ALT)" - steps: - - script: | - cd npm/ - go test ./... - retryCountOnTaskFailure: 3 - name: "TestWindows" - displayName: "Run Windows Tests" + - template: templates/run-unit-tests.yaml + - ${{ if not(contains(variables['Build.SourceBranch'], 'refs/pull')) }}: - stage: binaries displayName: Build Binaries dependsOn: @@ -198,68 +118,34 @@ stages: arch: amd64 name: azure-ipam os: linux - azure_ipam_windows2019_amd64: + azure_ipam_windows: arch: amd64 name: azure-ipam os: windows - os_version: ltsc2019 - azure_ipam_windows2022_amd64: + azure_ip_masq_merger_linux_amd64: arch: amd64 - name: azure-ipam - os: windows - os_version: ltsc2022 - cni_linux_amd64: - arch: amd64 - name: cni + name: azure-ip-masq-merger os: linux - cni_windows2019_amd64: + azure_iptables_monitor_linux_amd64: arch: amd64 - name: cni - os: windows - os_version: ltsc2019 - cni_windows2022_amd64: - arch: amd64 - name: cni - os: windows - os_version: ltsc2022 - cni_windows2025_amd64: + name: azure-iptables-monitor + os: linux + cni_linux_amd64: arch: amd64 name: cni - os: windows - os_version: ltsc2025 - cni_dropgz_linux_amd64: - arch: amd64 - name: cni-dropgz os: linux - cni_dropgz_windows2019_amd64: + cni_windows_amd64: arch: amd64 - name: cni-dropgz - os: windows - os_version: ltsc2019 - cni_dropgz_windows2022_amd64: - arch: amd64 - name: cni-dropgz + name: cni os: windows - os_version: ltsc2022 cns_linux_amd64: arch: amd64 name: cns os: linux - cns_windows2019_amd64: - arch: amd64 - name: cns - os: windows - os_version: ltsc2019 - cns_windows2022_amd64: - arch: amd64 - name: cns - os: windows - os_version: ltsc2022 - cns_windows2025_amd64: + cns_windows_amd64: arch: amd64 name: cns os: windows - os_version: ltsc2025 ipv6_hp_bpf_linux_amd64: arch: amd64 name: ipv6-hp-bpf @@ -268,11 +154,10 @@ stages: arch: amd64 name: npm os: linux - npm_windows2022_amd64: + npm_windows_amd64: arch: amd64 name: npm os: windows - os_version: ltsc2022 steps: - template: containers/container-template.yaml parameters: @@ -290,13 +175,17 @@ stages: arch: arm64 name: azure-ipam os: linux - cni_linux_arm64: + azure_ip_masq_merger_linux_arm64: arch: arm64 - name: cni + name: azure-ip-masq-merger os: linux - cni_dropgz_linux_arm64: + azure_iptables_monitor_linux_arm64: arch: arm64 - name: cni-dropgz + name: azure-iptables-monitor + os: linux + cni_linux_arm64: + arch: arm64 + name: cni os: linux cns_linux_arm64: arch: arm64 @@ -332,45 +221,66 @@ stages: matrix: azure_ipam: name: azure-ipam - os_versions: ltsc2019 ltsc2022 platforms: linux/amd64 linux/arm64 windows/amd64 cni: name: cni - os_versions: ltsc2019 ltsc2022 ltsc2025 - platforms: linux/amd64 linux/arm64 windows/amd64 - cni_dropgz: - name: cni-dropgz - os_versions: ltsc2019 ltsc2022 platforms: linux/amd64 linux/arm64 windows/amd64 cns: name: cns - os_versions: ltsc2019 ltsc2022 ltsc2025 platforms: linux/amd64 linux/arm64 windows/amd64 ipv6_hp_bpf: name: ipv6-hp-bpf platforms: linux/amd64 linux/arm64 npm: name: npm - os_versions: ltsc2022 platforms: linux/amd64 linux/arm64 windows/amd64 + azure_ip_masq_merger: + name: azure-ip-masq-merger + platforms: linux/amd64 linux/arm64 + azure_iptables_monitor: + name: azure-iptables-monitor + platforms: linux/amd64 linux/arm64 steps: - template: containers/manifest-template.yaml parameters: name: $(name) - os_versions: $(os_versions) platforms: $(platforms) # Cilium Podsubnet E2E tests - template: singletenancy/cilium/cilium-e2e-job-template.yaml parameters: name: "cilium_e2e" - displayName: Cilium + displayName: Cilium Podsubnet clusterType: swift-byocni-nokubeproxy-up clusterName: "ciliume2e" vmSize: Standard_B2ms k8sVersion: "" dependsOn: "containerize" + # Cilium Podsubnet Vnet Scale E2E tests + - template: singletenancy/cilium/cilium-e2e-job-template.yaml + parameters: + name: "cilium_vnetscale_e2e" + displayName: Cilium Podsubnet Vnet Scale Ubuntu + os: linux + clusterType: vnetscale-swift-byocni-nokubeproxy-up + clusterName: "ciliumvscalee2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: "containerize" + + + # Cilium Nodesubnet E2E tests + - template: singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml + parameters: + name: "cilium_nodesubnet_e2e" + displayName: Cilium NodeSubnet + clusterType: nodesubnet-byocni-nokubeproxy-up + clusterName: "cilndsubnete2e" + vmSize: Standard_B2s + k8sVersion: "" + dependsOn: "containerize" + # Cilium Overlay E2E tests - template: singletenancy/cilium-overlay/cilium-overlay-e2e-job-template.yaml parameters: @@ -409,14 +319,38 @@ stages: # Azure Overlay E2E tests - template: singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml parameters: - name: "azure_overlay_e2e" - displayName: Azure Overlay + name: "linux_azure_overlay_e2e" + displayName: Azure Overlay Linux os: linux clusterType: overlay-byocni-up - clusterName: "azovere2e" + clusterName: "linuxazovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: "containerize" + scaleup: 100 + + - template: singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml + parameters: + name: "win_azure_overlay_e2e" + displayName: Azure Overlay Windows + os: windows + clusterType: overlay-byocni-up + clusterName: "winazovere2e" vmSize: Standard_B2ms k8sVersion: "" dependsOn: "containerize" + scaleup: 50 + + # Azure Overlay E2E Stateless CNI tests + - template: singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml + parameters: + name: "azure_overlay_stateless_e2e" + displayName: Azure Stateless CNI Overlay + os: windows + clusterType: overlay-byocni-up + clusterName: "statelesswin" + vmSize: Standard_B2ms + dependsOn: "containerize" # AKS Swift E2E tests - template: singletenancy/aks-swift/e2e-job-template.yaml @@ -439,7 +373,7 @@ stages: clusterType: vnetscale-swift-byocni-up clusterName: "vscaleswifte2e" vmSize: Standard_B2ms - k8sVersion: "1.28" + k8sVersion: "1.30" dependsOn: "containerize" # CNIv1 E2E tests @@ -449,7 +383,7 @@ stages: displayName: AKS Ubuntu 22 arch: "amd64" os: "linux" - clusterType: linux-cniv1-up + clusterType: cniv1-up clusterName: "ubuntu22e2e" vmSize: Standard_B2s k8sVersion: 1.25 @@ -461,8 +395,8 @@ stages: name: "aks_windows_22_e2e" displayName: AKS Windows 2022 arch: amd64 - os: windows - clusterType: windows-cniv1-up + os: "windows" + clusterType: cniv1-up clusterName: "win22e2e" vmSize: Standard_B2ms os_version: "ltsc2022" @@ -472,49 +406,50 @@ stages: # CNI dual stack overlay E2E tests - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml parameters: - name: "dualstackoverlay_e2e" - displayName: AKS DualStack Overlay + name: "linux_dualstackoverlay_e2e" + displayName: AKS DualStack Overlay Linux os: linux clusterType: dualstack-overlay-byocni-up - clusterName: "dsovere2e" + clusterName: "linuxdsovere2e" vmSize: Standard_B2ms dependsOn: "containerize" + scaleup: 100 - # Swiftv2 E2E tests with multitenancy cluster start up - - template: multitenancy/swiftv2-e2e-job-template.yaml + - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml parameters: - name: "swiftv2_e2e" - displayName: Swiftv2 Multitenancy - os: linux - clusterType: swiftv2-multitenancy-cluster-up - clusterName: "mtacluster" - nodePoolName: "mtapool" - vmSize: $(SWIFTV2_MT_CLUSTER_SKU) + name: "win_dualstackoverlay_e2e" + displayName: AKS DualStack Overlay Windows + os: windows + clusterType: dualstack-overlay-byocni-up + clusterName: "windsovere2e" + vmSize: Standard_B2ms dependsOn: "containerize" - dummyClusterName: "swiftv2dummy" - dummyClusterType: "swiftv2-dummy-cluster-up" - dummyClusterDisplayName: Swiftv2 Multitenancy Dummy Cluster + scaleup: 50 - stage: delete displayName: Delete Clusters condition: always() dependsOn: - setup - - azure_overlay_e2e + - linux_azure_overlay_e2e + - win_azure_overlay_e2e + - azure_overlay_stateless_e2e - aks_swift_e2e - cilium_e2e + - cilium_vnetscale_e2e + - cilium_nodesubnet_e2e - cilium_overlay_e2e - cilium_h_overlay_e2e - aks_ubuntu_22_linux_e2e - aks_swift_vnetscale_e2e - aks_windows_22_e2e - - dualstackoverlay_e2e + - linux_dualstackoverlay_e2e + - win_dualstackoverlay_e2e - cilium_dualstackoverlay_e2e - - swiftv2_e2e variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: - - job: delete + - job: delete_build displayName: Delete Cluster pool: name: "$(BUILD_POOL_NAME_DEFAULT)" @@ -523,43 +458,68 @@ stages: cilium_e2e: name: cilium_e2e clusterName: "ciliume2e" + region: $(REGION_AKS_CLUSTER_TEST) + cilium_vnetscale_e2e: + name: cilium_vnetscale_e2e + clusterName: "ciliumvscalee2e" + region: $(REGION_AKS_CLUSTER_TEST) + cilium_nodesubnet_e2e: + name: cilium_nodesubnet_e2e + clusterName: "cilndsubnete2e" + region: $(REGION_AKS_CLUSTER_TEST) cilium_overlay_e2e: name: cilium_overlay_e2e clusterName: "cilovere2e" + region: $(REGION_AKS_CLUSTER_TEST) cilium_h_overlay_e2e: name: cilium_h_overlay_e2e clusterName: "cilwhleovere2e" - azure_overlay_e2e: - name: azure_overlay_e2e - clusterName: "azovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + linux_azure_overlay_e2e: + name: linux_azure_overlay_e2e + clusterName: "linuxazovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + windows_azure_overlay_e2e: + name: win_azure_overlay_e2e + clusterName: "winazovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + azure_overlay_stateless_e2e: + name: azure_overlay_stateless_e2e + clusterName: "statelesswin" + region: $(REGION_AKS_CLUSTER_TEST) aks_swift_e2e: name: aks_swift_e2e clusterName: "swifte2e" + region: $(REGION_AKS_CLUSTER_TEST) aks_swift_vnetscale_e2e: name: aks_swift_vnetscale_e2e clusterName: "vscaleswifte2e" + region: $(REGION_AKS_CLUSTER_TEST) aks_ubuntu_22_linux_e2e: name: aks_ubuntu_22_linux_e2e clusterName: "ubuntu22e2e" + region: $(REGION_AKS_CLUSTER_TEST) aks_windows_22_e2e: name: aks_windows_22_e2e clusterName: "win22e2e" - dualstackoverlay_e2e: - name: dualstackoverlay_e2e - clusterName: "dsovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + linux_dualstackoverlay_e2e: + name: linux_dualstackoverlay_e2e + clusterName: "linuxdsovere2e" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + windows_dualstackoverlay_e2e: + name: windows_dualstackoverlay_e2e + clusterName: "windsovere2e" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) cilium_dualstackoverlay_e2e: name: cilium_dualstackoverlay_e2e clusterName: "cildsovere2e" - swiftv2_e2e: - name: swiftv2_e2e - clusterName: "mtcluster" - swiftv2_dummy_e2e: - name: swiftv2_dummy_e2e - clusterName: "swiftv2dummy" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) steps: - template: templates/delete-cluster.yaml parameters: name: $(name) clusterName: $(clusterName)-$(commitID) - region: $(REGION_AKS_CLUSTER_TEST) - + region: $(region) + sub: $(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) + svcConn: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) diff --git a/.pipelines/run-pipeline.yaml b/.pipelines/run-pipeline.yaml new file mode 100644 index 0000000000..f759f11e8f --- /dev/null +++ b/.pipelines/run-pipeline.yaml @@ -0,0 +1,576 @@ +stages: +- stage: setup + displayName: ACN + variables: + ACN_DIR: azure-container-networking + jobs: + - job: env + displayName: Setup + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory) + ob_artifactSuffix: _source + + ACR_DIR: $(Build.SourcesDirectory)/azure-container-networking + BUILD_TYPE: $(IMAGE_ACR_TYPE) + steps: + - checkout: azure-container-networking + - template: build/ob-prepare.steps.yaml + +- template: templates/run-unit-tests.stages.yaml + +- stage: build + displayName: "Build Project" + dependsOn: + - setup + - unittest + variables: + ACN_DIR: drop_setup_env_source + ACN_PACKAGE_PATH: github.com/Azure/azure-container-networking + CNI_AI_PATH: $(ACN_PACKAGE_PATH)/telemetry.aiMetadata + CNS_AI_PATH: $(ACN_PACKAGE_PATH)/cns/logger.aiMetadata + NPM_AI_PATH: $(ACN_PACKAGE_PATH)/npm.aiMetadata + + STORAGE_ID: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] + TAG: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + AZURE_IP_MASQ_MERGER_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpMasqMergerVersion'] ] + AZURE_IPTABLES_MONITOR_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIptablesMonitorVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPV6_HP_BPF_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.ipv6HpBpfVersion'] ] + NPM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.npmVersion'] ] + jobs: + - template: /.pipelines/build/images.jobs.yaml + parameters: + images: + - job: linux_amd64 + displayName: "Linux/AMD64" + templateContext: + repositoryArtifact: drop_setup_env_source + buildScript: .pipelines/build/scripts/$(name).sh + obDockerfile: .pipelines/build/dockerfiles/$(name).Dockerfile + strategy: + maxParallel: 5 + matrix: + azure_ipam: + name: azure-ipam + extraArgs: '' + archiveName: azure-ipam + archiveVersion: $(AZURE_IPAM_VERSION) + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + azure_ip_masq_merger: + name: azure-ip-masq-merger + extraArgs: '' + archiveName: azure-ip-masq-merger + archiveVersion: $(AZURE_IP_MASQ_MERGER_VERSION) + imageTag: $(Build.BuildNumber) + azure_iptables_monitor: + name: azure-iptables-monitor + extraArgs: '' + archiveName: azure-iptables-monitor + archiveVersion: $(AZURE_IPTABLES_MONITOR_VERSION) + imageTag: $(Build.BuildNumber) + cni: + name: cni + extraArgs: '--build-arg CNI_AI_PATH=$(CNI_AI_PATH) --build-arg CNI_AI_ID=$(CNI_AI_ID)' + archiveName: azure-cni + archiveVersion: $(CNI_VERSION) + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + cns: + name: cns + extraArgs: '--build-arg CNS_AI_PATH=$(CNS_AI_PATH) --build-arg CNS_AI_ID=$(CNS_AI_ID)' + archiveName: azure-cns + archiveVersion: $(CNS_VERSION) + imageTag: $(Build.BuildNumber) + ipv6_hp_bpf: + name: ipv6-hp-bpf + extraArgs: "--build-arg DEBUG=$(System.Debug)" + archiveName: ipv6-hp-bpf + archiveVersion: $(IPV6_HP_BPF_VERSION) + imageTag: $(Build.BuildNumber) + # npm: + # name: npm + # extraArgs: '--build-arg NPM_AI_PATH=$(NPM_AI_PATH) --build-arg NPM_AI_ID=$(NPM_AI_ID)' + # archiveName: azure-npm + # archiveVersion: $(NPM_VERSION) + # imageTag: $(Build.BuildNumber) + + - job: windows_amd64 + displayName: "Windows" + templateContext: + repositoryArtifact: drop_setup_env_source + buildScript: .pipelines/build/scripts/$(name).sh + obDockerfile: .pipelines/build/dockerfiles/$(name).Dockerfile + strategy: + maxParallel: 5 + matrix: + azure_ipam: + name: azure-ipam + extraArgs: '' + archiveName: azure-ipam + archiveVersion: $(AZURE_IPAM_VERSION) + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + cni: + name: cni + extraArgs: '--build-arg CNI_AI_PATH=$(CNI_AI_PATH) --build-arg CNI_AI_ID=$(CNI_AI_ID)' + archiveName: azure-cni + archiveVersion: $(CNI_VERSION) + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + cns: + name: cns + extraArgs: '--build-arg CNS_AI_PATH=$(CNS_AI_PATH) --build-arg CNS_AI_ID=$(CNS_AI_ID)' + archiveName: azure-cns + archiveVersion: $(CNS_VERSION) + imageTag: $(Build.BuildNumber) + # npm: + # name: npm + # extraArgs: '--build-arg NPM_AI_PATH=$(NPM_AI_PATH) --build-arg NPM_AI_ID=$(NPM_AI_ID)' + # archiveName: azure-npm + # archiveVersion: $(NPM_VERSION) + # imageTag: $(Build.BuildNumber) + + - job: linux_arm64 + displayName: "Linux/ARM64" + templateContext: + repositoryArtifact: drop_setup_env_source + buildScript: .pipelines/build/scripts/$(name).sh + obDockerfile: .pipelines/build/dockerfiles/$(name).Dockerfile + strategy: + maxParallel: 3 + matrix: + azure_ipam: + name: azure-ipam + archiveName: azure-ipam + archiveVersion: $(AZURE_IPAM_VERSION) + extraArgs: '' + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + azure_ip_masq_merger: + name: azure-ip-masq-merger + extraArgs: '' + archiveName: azure-ip-masq-merger + archiveVersion: $(AZURE_IP_MASQ_MERGER_VERSION) + imageTag: $(Build.BuildNumber) + azure_iptables_monitor: + name: azure-iptables-monitor + extraArgs: '' + archiveName: azure-iptables-monitor + archiveVersion: $(AZURE_IPTABLES_MONITOR_VERSION) + imageTag: $(Build.BuildNumber) + cni: + name: cni + extraArgs: '--build-arg CNI_AI_PATH=$(CNI_AI_PATH) --build-arg CNI_AI_ID=$(CNI_AI_ID)' + archiveName: azure-cni + archiveVersion: $(CNI_VERSION) + imageTag: $(Build.BuildNumber) + packageWithDropGZ: True + cns: + name: cns + extraArgs: '--build-arg CNS_AI_PATH=$(CNS_AI_PATH) --build-arg CNS_AI_ID=$(CNS_AI_ID)' + archiveName: azure-cns + archiveVersion: $(CNS_VERSION) + imageTag: $(Build.BuildNumber) + ipv6_hp_bpf: + name: ipv6-hp-bpf + extraArgs: "--build-arg DEBUG=$(System.Debug)" + archiveName: ipv6-hp-bpf + archiveVersion: $(IPV6_HP_BPF_VERSION) + imageTag: $(Build.BuildNumber) + # npm: + # name: npm + # extraArgs: '--build-arg NPM_AI_PATH=$(NPM_AI_PATH) --build-arg NPM_AI_ID=$(NPM_AI_ID)' + # archiveName: azure-npm + # archiveVersion: $(NPM_VERSION) + # imageTag: $(Build.BuildNumber) + + +- ${{ if not(contains(variables['Build.SourceBranch'], 'refs/pull')) }}: + - stage: manifests + displayName: "Image Manifests" + dependsOn: + - setup + - build + variables: + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + AZURE_IP_MASQ_MERGER_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpMasqMergerVersion'] ] + AZURE_IPTABLES_MONITOR_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIptablesMonitorVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPV6_HP_BPF_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.ipv6HpBpfVersion'] ] + NPM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.npmVersion'] ] + + IPAM_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/azure-ipam:$(Build.BuildNumber) + IPAM_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/azure-ipam:$(Build.BuildNumber) + IPAM_WINDOWS_AMD64_REF: $(IMAGE_REPO_PATH)/windows-amd64/azure-ipam:$(Build.BuildNumber) + + IP_MASQ_MERGER_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/azure-ip-masq-merger:$(Build.BuildNumber) + IP_MASQ_MERGER_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/azure-ip-masq-merger:$(Build.BuildNumber) + + IPTABLES_MONITOR_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/azure-iptables-monitor:$(Build.BuildNumber) + IPTABLES_MONITOR_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/azure-iptables-monitor:$(Build.BuildNumber) + + CNI_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/cni:$(Build.BuildNumber) + CNI_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/cni:$(Build.BuildNumber) + CNI_WINDOWS_AMD64_REF: $(IMAGE_REPO_PATH)/windows-amd64/cni:$(Build.BuildNumber) + + CNS_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/cns:$(Build.BuildNumber) + CNS_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/cns:$(Build.BuildNumber) + CNS_WINDOWS_AMD64_REF: $(IMAGE_REPO_PATH)/windows-amd64/cns:$(Build.BuildNumber) + + IPV6_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/ipv6-hp-bpf:$(Build.BuildNumber) + IPV6_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/ipv6-hp-bpf:$(Build.BuildNumber) + + NPM_LINUX_AMD64_REF: $(IMAGE_REPO_PATH)/linux-amd64/npm:$(Build.BuildNumber) + NPM_LINUX_ARM64_REF: $(IMAGE_REPO_PATH)/linux-arm64/npm:$(Build.BuildNumber) + NPM_WINDOWS_AMD64_REF: $(IMAGE_REPO_PATH)/windows-amd64/npm:$(Build.BuildNumber) + jobs: + + - template: build/manifests.jobs.yaml + parameters: + generate: + - job: azure_ipam + templateContext: + name: azure-ipam + image_tag: $(AZURE_IPAM_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(IPAM_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(IPAM_LINUX_ARM64_REF) + - platform: windows/amd64 + imageReference: $(IPAM_WINDOWS_AMD64_REF) + - job: azure_ip_masq_merger + templateContext: + name: azure-ip-masq-merger + image_tag: $(AZURE_IP_MASQ_MERGER_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(IP_MASQ_MERGER_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(IP_MASQ_MERGER_LINUX_ARM64_REF) + - job: azure_iptables_monitor + templateContext: + name: azure-iptables-monitor + image_tag: $(AZURE_IPTABLES_MONITOR_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(IPTABLES_MONITOR_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(IPTABLES_MONITOR_LINUX_ARM64_REF) + - job: cni + templateContext: + name: cni + image_tag: $(CNI_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(CNI_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(CNI_LINUX_ARM64_REF) + - platform: windows/amd64 + imageReference: $(CNI_WINDOWS_AMD64_REF) + - job: cns + templateContext: + name: cns + image_tag: $(CNS_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(CNS_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(CNS_LINUX_ARM64_REF) + - platform: windows/amd64 + imageReference: $(CNS_WINDOWS_AMD64_REF) + - job: ipv6_hp_bpf + templateContext: + name: ipv6-hp-bpf + image_tag: $(IPV6_HP_BPF_VERSION) + platforms: + - platform: linux/amd64 + imageReference: $(IPV6_LINUX_AMD64_REF) + - platform: linux/arm64 + imageReference: $(IPV6_LINUX_ARM64_REF) + # - job: npm + # templateContext: + # name: npm + # image_tag: $(NPM_VERSION) + # platforms: + # - platform: linux/amd64 + # imageReference: $(NPM_LINUX_AMD64_REF) + # - platform: linux/arm64 + # imageReference: $(NPM_LINUX_ARM64_REF) + # - platform: windows/amd64 + # imageReference: $(NPM_WINDOWS_AMD64_REF) + + + # Cilium Podsubnet E2E tests + - template: singletenancy/cilium/cilium-e2e.stages.yaml + parameters: + name: "cilium_e2e" + displayName: Cilium + clusterType: swift-byocni-nokubeproxy-up + clusterName: "ciliume2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + + # Cilium Nodesubnet E2E tests + - template: singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.stages.yaml + parameters: + name: "cilium_nodesubnet_e2e" + displayName: Cilium NodeSubnet + clusterType: nodesubnet-byocni-nokubeproxy-up + clusterName: "cilndsubnete2e" + vmSize: Standard_B2s + k8sVersion: "" + dependsOn: manifests + + # Cilium Overlay E2E tests + - template: singletenancy/cilium-overlay/cilium-overlay-e2e.stages.yaml + parameters: + name: "cilium_overlay_e2e" + displayName: Cilium on AKS Overlay + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cilovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + + # Cilium Dualstack Overlay E2E tests + - template: singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.stages.yaml + parameters: + name: "cilium_dualstackoverlay_e2e" + displayName: Cilium on AKS DualStack Overlay + os: linux + clusterType: dualstack-byocni-nokubeproxy-up + clusterName: "cildsovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + + # Cilium Overlay with hubble E2E tests + - template: singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.stages.yaml + parameters: + name: "cilium_h_overlay_e2e" + displayName: Cilium on AKS Overlay with Hubble + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cilwhleovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + testHubble: true + + # Azure Overlay E2E tests + - template: singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml + parameters: + name: "linux_azure_overlay_e2e" + displayName: Azure Overlay Linux + os: linux + clusterType: overlay-byocni-up + clusterName: "linuxazovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + scaleup: 100 + + - template: singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml + parameters: + name: "win_azure_overlay_e2e" + displayName: Azure Overlay Windows + os: windows + clusterType: overlay-byocni-up + clusterName: "winazovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + scaleup: 50 + + # Azure Overlay E2E Stateless CNI tests + - template: singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml + parameters: + name: "azure_overlay_stateless_e2e" + displayName: Azure Stateless CNI Overlay + os: windows + clusterType: overlay-byocni-up + clusterName: "statelesswin" + vmSize: Standard_B2ms + dependsOn: manifests + + # AKS Swift E2E tests + - template: singletenancy/aks-swift/e2e.stages.yaml + parameters: + name: "aks_swift_e2e" + displayName: AKS Swift Ubuntu + os: linux + clusterType: swift-byocni-up + clusterName: "swifte2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: manifests + + # AKS Swift Vnet Scale E2E tests + - template: singletenancy/aks-swift/e2e.stages.yaml + parameters: + name: "aks_swift_vnetscale_e2e" + displayName: AKS Swift Vnet Scale Ubuntu + os: linux + clusterType: vnetscale-swift-byocni-up + clusterName: "vscaleswifte2e" + vmSize: Standard_B2ms + k8sVersion: "1.30" + dependsOn: manifests + + # CNIv1 E2E tests + - template: singletenancy/aks/e2e.stages.yaml + parameters: + name: "aks_ubuntu_22_linux_e2e" + displayName: AKS Ubuntu 22 + arch: "amd64" + os: "linux" + clusterType: cniv1-up + clusterName: "ubuntu22e2e" + vmSize: Standard_B2s + k8sVersion: 1.25 + scaleup: 100 + dependsOn: manifests + + - template: singletenancy/aks/e2e.stages.yaml + parameters: + name: "aks_windows_22_e2e" + displayName: AKS Windows 2022 + arch: amd64 + os: "windows" + clusterType: cniv1-up + clusterName: "win22e2e" + vmSize: Standard_B2ms + os_version: "ltsc2022" + scaleup: 50 + dependsOn: manifests + + # CNI dual stack overlay E2E tests + - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml + parameters: + name: "linux_dualstackoverlay_e2e" + displayName: AKS DualStack Overlay Linux + os: linux + clusterType: dualstack-overlay-byocni-up + clusterName: "linuxdsovere2e" + vmSize: Standard_B2ms + dependsOn: manifests + scaleup: 100 + + - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml + parameters: + name: "win_dualstackoverlay_e2e" + displayName: AKS DualStack Overlay Windows + os: windows + clusterType: dualstack-overlay-byocni-up + clusterName: "windsovere2e" + vmSize: Standard_B2ms + dependsOn: manifests + scaleup: 50 + + + - stage: delete + displayName: Delete Clusters + condition: always() + dependsOn: + - setup + - linux_azure_overlay_e2e + - win_azure_overlay_e2e + - azure_overlay_stateless_e2e + - aks_swift_e2e + - cilium_e2e + - cilium_nodesubnet_e2e + - cilium_overlay_e2e + - cilium_h_overlay_e2e + - aks_ubuntu_22_linux_e2e + - aks_swift_vnetscale_e2e + - aks_windows_22_e2e + - linux_dualstackoverlay_e2e + - win_dualstackoverlay_e2e + - cilium_dualstackoverlay_e2e + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - job: delete_build + displayName: Delete Cluster + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + isCustom: true + type: linux + strategy: + matrix: + cilium_e2e: + name: cilium_e2e + clusterName: "ciliume2e" + region: $(REGION_AKS_CLUSTER_TEST) + cilium_nodesubnet_e2e: + name: cilium_nodesubnet_e2e + clusterName: "cilndsubnete2e" + region: $(REGION_AKS_CLUSTER_TEST) + cilium_overlay_e2e: + name: cilium_overlay_e2e + clusterName: "cilovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + cilium_h_overlay_e2e: + name: cilium_h_overlay_e2e + clusterName: "cilwhleovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + linux_azure_overlay_e2e: + name: linux_azure_overlay_e2e + clusterName: "linuxazovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + windows_azure_overlay_e2e: + name: win_azure_overlay_e2e + clusterName: "winazovere2e" + region: $(REGION_AKS_CLUSTER_TEST) + azure_overlay_stateless_e2e: + name: azure_overlay_stateless_e2e + clusterName: "statelesswin" + region: $(REGION_AKS_CLUSTER_TEST) + aks_swift_e2e: + name: aks_swift_e2e + clusterName: "swifte2e" + region: $(REGION_AKS_CLUSTER_TEST) + aks_swift_vnetscale_e2e: + name: aks_swift_vnetscale_e2e + clusterName: "vscaleswifte2e" + region: $(REGION_AKS_CLUSTER_TEST) + aks_ubuntu_22_linux_e2e: + name: aks_ubuntu_22_linux_e2e + clusterName: "ubuntu22e2e" + region: $(REGION_AKS_CLUSTER_TEST) + aks_windows_22_e2e: + name: aks_windows_22_e2e + clusterName: "win22e2e" + region: $(REGION_AKS_CLUSTER_TEST) + linux_dualstackoverlay_e2e: + name: linux_dualstackoverlay_e2e + clusterName: "linuxdsovere2e" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + windows_dualstackoverlay_e2e: + name: windows_dualstackoverlay_e2e + clusterName: "windsovere2e" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + cilium_dualstackoverlay_e2e: + name: cilium_dualstackoverlay_e2e + clusterName: "cildsovere2e" + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + steps: + - checkout: azure-container-networking + - template: templates/delete-cluster.steps.yaml + parameters: + name: $(name) + clusterName: $(clusterName)-$(commitID) + region: $(region) + sub: $(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) + svcConn: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) diff --git a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml index 126b56d766..64b612da45 100644 --- a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml @@ -5,6 +5,7 @@ parameters: clusterName: "" vmSize: "" k8sVersion: "" + os: "" dependsOn: "" stages: @@ -27,6 +28,7 @@ stages: vmSize: ${{ parameters.vmSize }} k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} region: $(REGION_AKS_CLUSTER_TEST) - stage: ${{ parameters.name }} diff --git a/.pipelines/singletenancy/aks-swift/e2e-step-template.yaml b/.pipelines/singletenancy/aks-swift/e2e-step-template.yaml index 1e5a923d4f..00cc69aed4 100644 --- a/.pipelines/singletenancy/aks-swift/e2e-step-template.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e-step-template.yaml @@ -71,6 +71,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS diff --git a/.pipelines/singletenancy/aks-swift/e2e.stages.yaml b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml new file mode 100644 index 0000000000..5fcb7f3b87 --- /dev/null +++ b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml @@ -0,0 +1,103 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + os: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + type: linux + isCustom: true + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ] + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) + jobs: + - job: ${{ parameters.name }} + displayName: Singletenancy AKS Swift Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + type: linux + isCustom: true + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - checkout: azure-container-networking + - template: e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 100 + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + hostport: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_linux + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv2 diff --git a/.pipelines/singletenancy/aks-swift/e2e.steps.yaml b/.pipelines/singletenancy/aks-swift/e2e.steps.yaml new file mode 100644 index 0000000000..5c8e5db52d --- /dev/null +++ b/.pipelines/singletenancy/aks-swift/e2e.steps.yaml @@ -0,0 +1,100 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - script: | + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_AZURE_VNET=true CLEANUP=true \ + CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "aksswifte2e" + displayName: "Run AKS Swift E2E" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cniv2 + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + cd hack/scripts + chmod +x async-delete-test.sh + ./async-delete-test.sh + if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then + kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' + fi + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" diff --git a/.pipelines/singletenancy/aks/e2e-job-template.yaml b/.pipelines/singletenancy/aks/e2e-job-template.yaml index 869ef28484..5a8c3c28b5 100644 --- a/.pipelines/singletenancy/aks/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks/e2e-job-template.yaml @@ -32,6 +32,7 @@ stages: vmSizeWin: ${{ parameters.vmSize }} # Matching linux vmSize k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} region: $(REGION_AKS_CLUSTER_TEST) - stage: ${{ parameters.name }} @@ -50,6 +51,7 @@ stages: jobs: - job: ${{ parameters.name }} displayName: Singletenancy AKS - (${{ parameters.name }}) + timeoutInMinutes: 120 pool: name: $(BUILD_POOL_NAME_DEFAULT) demands: diff --git a/.pipelines/singletenancy/aks/e2e-step-template.yaml b/.pipelines/singletenancy/aks/e2e-step-template.yaml index 397bac27dd..ccb7df8492 100644 --- a/.pipelines/singletenancy/aks/e2e-step-template.yaml +++ b/.pipelines/singletenancy/aks/e2e-step-template.yaml @@ -64,6 +64,7 @@ steps: done displayName: "Restart Nodes" - script: | + set -e kubectl get pods -A -o wide echo "Deploying test pods" cd test/integration/load diff --git a/.pipelines/singletenancy/aks/e2e.stages.yaml b/.pipelines/singletenancy/aks/e2e.stages.yaml new file mode 100644 index 0000000000..0870a98dd6 --- /dev/null +++ b/.pipelines/singletenancy/aks/e2e.stages.yaml @@ -0,0 +1,108 @@ +parameters: + name: "" + displayName: "" + arch: "" + os: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + os_version: "" + scaleup: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + vmSizeWin: ${{ parameters.vmSize }} # Matching linux vmSize + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + + AZURE_REGISTRY: acnpublic.azurecr.io + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + dependsOn: + - setup + - ${{ parameters.clusterName }} + jobs: + - job: ${{ parameters.name }} + displayName: Singletenancy AKS - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + arch: ${{ parameters.arch }} + os: ${{ parameters.os }} + os_version: ${{ parameters.os_version }} + scaleup: ${{ parameters.scaleup }} + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + datapath: true + dns: true + portforward: true + hybridWin: true + service: true + hostport: true + dependsOn: ${{ parameters.name }} + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv1 diff --git a/.pipelines/singletenancy/aks/e2e.steps.yaml b/.pipelines/singletenancy/aks/e2e.steps.yaml new file mode 100644 index 0000000000..691c9ad34e --- /dev/null +++ b/.pipelines/singletenancy/aks/e2e.steps.yaml @@ -0,0 +1,74 @@ +parameters: + name: "" + clusterName: "" + arch: "" + os: "" + os_version: "" + scaleup: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + echo "Upload CNI" + echo "Deploying on Linux nodes" + export CNI_IMAGE="$AZURE_REGISTRY"/"$CNI_IMAGE_NAME_OVERRIDE":"$CNI_VERSION" + echo "CNI image: $CNI_IMAGE" + if [ "${{parameters.os}}" == "windows" ]; then + envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f - + kubectl rollout status daemonset/azure-cni -n kube-system + echo "Deploying on windows nodes" + envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f - + kubectl rollout status daemonset/azure-cni-windows -n kube-system + else + envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f - + kubectl rollout status daemonset/azure-cni -n kube-system + fi + name: "deployCNI" + displayName: "Deploy CNI" + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + - script: | + set -e + kubectl get pods -A -o wide + echo "Deploying test pods" + cd test/integration/load + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=${{ parameters.os }} go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + cd ../../.. + + make test-validate-state OS_TYPE=${{ parameters.os }} CNI_TYPE=cniv1 + + kubectl delete ns load-test + displayName: "Validate State" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml new file mode 100644 index 0000000000..3c955293d3 --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml @@ -0,0 +1,92 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_AKS_CLUSTER_TEST) + - template: ../../templates/add-windows-nodepool-job.yaml + parameters: + depend: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }}_windows + displayName: Azure Stateless CNI Overlay Test Suite | Windows - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: azure-cni-overlay-stateless-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + vmSizeWin: ${{ parameters.vmSize }} + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + dependsOn: ${{ parameters.name }}_windows + datapath: true + dns: true + portforward: true + hostport: true + service: true + hybridWin: true + + - job: failedE2ELogs_windows + displayName: "Windows Failure Logs" + dependsOn: + - ${{ parameters.name }}_windows + - cni_windows + condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + cni: cniv2 + diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-step-template.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-step-template.yaml new file mode 100644 index 0000000000..c5b565a42b --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-step-template.yaml @@ -0,0 +1,93 @@ +parameters: + name: "" + clusterName: "" + os: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=stateless VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZURE_VNET_STATELESS=true VALIDATE_V4OVERLAY=true CNS_VERSION=$(make cns-version) CNI_VERSION=$(make cni-version) CLEANUP=true + name: "WindowsOverlayControlPlaneScaleTests" + displayName: "Windows v4Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 2 + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + cd test/integration/load + clusterName=${{ parameters.clusterName }} + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} + make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_AKS_CLUSTER_TEST) + kubectl get pods -owide -A + echo "Validating Node Restart" + CNI_TYPE=stateless RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + echo "IPv4 Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsV4OverlayDatapathTests" + displayName: "Windows v4Overlay Datapath Tests" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml new file mode 100644 index 0000000000..6e30db6d2c --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml @@ -0,0 +1,107 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_AKS_CLUSTER_TEST) + - template: ../../templates/add-windows-nodepool.jobs.yaml + parameters: + depend: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + jobs: + - job: ${{ parameters.name }}_windows + displayName: Azure Stateless CNI Overlay Test Suite | Windows - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: azure-cni-overlay-stateless-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + vmSizeWin: ${{ parameters.vmSize }} + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + dependsOn: ${{ parameters.name }}_windows + datapath: true + dns: true + portforward: true + hostport: true + service: true + hybridWin: true + + - job: failedE2ELogs_windows + displayName: "Windows Failure Logs" + dependsOn: + - ${{ parameters.name }}_windows + - cni_windows + condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + cni: cniv2 + diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.steps.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.steps.yaml new file mode 100644 index 0000000000..59a455ed84 --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.steps.yaml @@ -0,0 +1,98 @@ +parameters: + name: "" + clusterName: "" + os: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux CNI_TYPE=stateless VALIDATE_STATEFILE=true VALIDATE_V4OVERLAY=true \ + INSTALL_CNS=true INSTALL_AZURE_VNET_STATELESS=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) \ + CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + name: "WindowsOverlayControlPlaneScaleTests" + displayName: "Windows v4Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 2 + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + cd test/integration/load + clusterName=${{ parameters.clusterName }} + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} + make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_AKS_CLUSTER_TEST) + kubectl get pods -owide -A + echo "Validating Node Restart" + CNI_TYPE=stateless RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + echo "IPv4 Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsV4OverlayDatapathTests" + displayName: "Windows v4Overlay Datapath Tests" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml index 2bc03a4726..62b1d0a6fa 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml @@ -6,6 +6,8 @@ parameters: vmSize: "" k8sVersion: "" dependsOn: "" + scaleup: "" + os: "" stages: - stage: ${{ parameters.clusterName }} @@ -25,8 +27,10 @@ stages: clusterType: ${{ parameters.clusterType }} clusterName: ${{ parameters.clusterName }}-$(commitID) vmSize: ${{ parameters.vmSize }} + vmSizeWin: ${{ parameters.vmSize }} k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} region: $(REGION_AKS_CLUSTER_TEST) - stage: ${{ parameters.name }} @@ -43,8 +47,8 @@ stages: pool: name: $(BUILD_POOL_NAME_DEFAULT) jobs: - - job: ${{ parameters.name }}_linux - displayName: Azure CNI Overlay Test Suite | Linux - (${{ parameters.name }}) + - job: ${{ parameters.name }}_${{ parameters.os }} + displayName: Azure CNI Overlay Test Suite | ${{ parameters.os }} - (${{ parameters.name }}) timeoutInMinutes: 120 pool: name: $(BUILD_POOL_NAME_DEFAULT) @@ -56,101 +60,31 @@ stages: parameters: name: ${{ parameters.name }} clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - scaleup: 100 - - - job: windows_nodepool - displayName: Add Windows Nodepool - dependsOn: ${{ parameters.name }}_linux - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - task: AzureCLI@2 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) - make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }}-$(commitID) VM_SIZE_WIN=${{ parameters.vmSize }} - echo "Windows node are successfully added to v4 Overlay Cluster" - kubectl cluster-info - kubectl get node -owide - kubectl get po -owide -A - name: "Add_Windows_Node" - displayName: "Add windows node on v4 overlay cluster" - - - - job: ${{ parameters.name }}_windows - displayName: Azure CNI Overlay Test Suite | Windows - (${{ parameters.name }}) - timeoutInMinutes: 120 - dependsOn: windows_nodepool - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - template: azure-cni-overlay-e2e-step-template.yaml - parameters: - name: ${{ parameters.name }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - scaleup: 50 + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} # 50 in windows or 100 in linux - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml parameters: sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) clusterName: ${{ parameters.clusterName }}-$(commitID) os: ${{ parameters.os }} - dependsOn: ${{ parameters.name }}_windows + dependsOn: ${{ parameters.name }}_${{ parameters.os }} datapath: true dns: true portforward: true hostport: true service: true + hybridWin: ${{ eq(parameters.os, 'windows') }} - - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml - parameters: - sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - dependsOn: cni_${{ parameters.os }} - datapath: true - dns: true - portforward: true - hostport: true - service: true - hybridWin: true - - - job: failedE2ELogs_linux - displayName: "Linux Failure Logs" + - job: failedE2ELogs_${{ parameters.os }} + displayName: "${{ parameters.os }} Failure Logs" dependsOn: - - ${{ parameters.name }}_linux - - cni_linux - condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') + - ${{ parameters.name }}_${{ parameters.os }} + - CNI_${{ parameters.os }} + condition: in(dependencies.${{ parameters.name }}_${{ parameters.os }}.result, 'Failed') steps: - template: ../../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux + os: ${{ parameters.os }} cni: cniv2 - - - job: failedE2ELogs_windows - displayName: "Windows Failure Logs" - dependsOn: - - ${{ parameters.name }}_windows - - cni_windows - condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') - steps: - - template: ../../templates/log-template.yaml - parameters: - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - cni: cniv2 - diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-step-template.yaml index 633c427e62..8d8de54f55 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-step-template.yaml @@ -64,6 +64,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS @@ -129,6 +130,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml new file mode 100644 index 0000000000..17a2596fdf --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml @@ -0,0 +1,105 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + scaleup: "" + os: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + vmSizeWin: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + jobs: + - job: ${{ parameters.name }}_${{ parameters.os }} + displayName: Azure CNI Overlay Test Suite | ${{ parameters.os }} - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - checkout: azure-container-networking + - template: azure-cni-overlay-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} # 50 in windows or 100 in linux + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }}_${{ parameters.os }} + datapath: true + dns: true + portforward: true + hostport: true + service: true + hybridWin: ${{ eq(parameters.os, 'windows') }} + + - job: failedE2ELogs_${{ parameters.os }} + displayName: "${{ parameters.os }} Failure Logs" + dependsOn: + - ${{ parameters.name }}_${{ parameters.os }} + - CNI_${{ parameters.os }} + condition: in(dependencies.${{ parameters.name }}_${{ parameters.os }}.result, 'Failed') + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv2 diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.steps.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.steps.yaml new file mode 100644 index 0000000000..2d5e4c3cb9 --- /dev/null +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.steps.yaml @@ -0,0 +1,153 @@ +parameters: + name: "" + clusterName: "" + os: "" + scaleup: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + - ${{ if eq(parameters.os, 'linux') }}: + - script: | + echo "Start Integration Tests on Overlay Cluster" + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true VALIDATE_V4OVERLAY=true \ + INSTALL_CNS=true INSTALL_AZURE_CNI_OVERLAY=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) \ + CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 2 + name: "integrationTest" + displayName: "Run CNS Integration Tests on AKS Overlay" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cniv2 + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - ${{ if eq(parameters.os, 'windows') }}: + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true VALIDATE_V4OVERLAY=true \ + INSTALL_CNS=true INSTALL_AZURE_CNI_OVERLAY=true CLEANUP=true \ + CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + name: "WindowsOverlayControlPlaneScaleTests" + displayName: "Windows v4Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 2 + + - script: | + echo "IPv4 Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsV4OverlayDatapathTests" + displayName: "Windows v4Overlay Datapath Tests" + retryCountOnTaskFailure: 3 + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=windows go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=windows RESTART_CASE=true CNI_TYPE=cniv2 + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-job-template.yaml index fc8ece1a3a..054d64e51c 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-job-template.yaml @@ -40,11 +40,13 @@ stages: GOBIN: "$(GOPATH)/bin" # Go binaries path modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + IPV6_IMAGE_REGISTRY: acnpublic.azurecr.io pool: name: $(BUILD_POOL_NAME_DEFAULT) jobs: - job: ${{ parameters.name }} displayName: Cilium Dualstack Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 pool: name: $(BUILD_POOL_NAME_DEFAULT) demands: diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml index 2475d7f281..9ef039db57 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml @@ -34,7 +34,7 @@ steps: kubectl cluster-info kubectl get po -owide -A echo "install Cilium ${CILIUM_DUALSTACK_VERSION}" - export DIR=${CILIUM_DUALSTACK_VERSION%.*} + export DIR=$(echo ${CILIUM_DUALSTACK_VERSION#v} | cut -d. -f1,2) echo "installing files from ${DIR}" echo "deploy Cilium ConfigMap" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml @@ -45,7 +45,7 @@ steps: export CILIUM_VERSION_TAG=${CILIUM_DUALSTACK_VERSION} export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) echo "install Cilium ${CILIUM_DUALSTACK_VERSION} onto Overlay Cluster" - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - kubectl get po -owide -A name: "installCilium" @@ -65,11 +65,12 @@ steps: echo "Waiting < 2 minutes for cilium to be ready" # Ensure Cilium is ready Xm\Xs cilium status --wait --wait-duration 2m + kubectl get crd -A retryCountOnTaskFailure: 3 name: "CiliumStatus" displayName: "Cilium Status" - - task: AzureCLI@1 + - task: AzureCLI@2 inputs: azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) scriptLocation: "inlineScript" @@ -85,13 +86,14 @@ steps: done displayName: "Restart Nodes" - - task: AzureCLI@1 + - task: AzureCLI@2 inputs: azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) scriptLocation: "inlineScript" scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS @@ -105,15 +107,14 @@ steps: displayName: "Validate Node Restart" retryCountOnTaskFailure: 3 + - template: ../../templates/cilium-connectivity-tests.yaml + - script: | - echo "Run Cilium Connectivity Tests" - cilium status - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption,!no-unexpected-packet-drops' --force-deploy ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` echo "##vso[task.setvariable variable=ciliumNamespace]$ns" retryCountOnTaskFailure: 3 - name: "ciliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" - script: | set -e @@ -130,9 +131,6 @@ steps: - script: | echo "validate pod IP assignment and check systemd-networkd restart" kubectl get pod -owide -A - # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. - # Saves 17 minutes - kubectl delete deploy -n $(ciliumNamespace) echo-external-node cd test/integration/load CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ echo "delete cilium connectivity test resources and re-validate state" @@ -158,3 +156,5 @@ steps: fi name: "testAsyncDelete" displayName: "Verify Async Delete when CNS is down" + + - template: ../../templates/cilium-mtu-check.yaml diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.stages.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.stages.yaml new file mode 100644 index 0000000000..1e68afda7d --- /dev/null +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.stages.yaml @@ -0,0 +1,102 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + + IPV6_IMAGE_REGISTRY: acnpublic.azurecr.io + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + IPV6_HP_BPF_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.ipv6HpBpfVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + IPV6_HP_BPF_IMAGE_REPO_PATH: $(IMAGE_REPO_PATH) + jobs: + - job: ${{ parameters.name }} + displayName: Cilium Dualstack Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: cilium-dualstackoverlay-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 100 + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + dualstack: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml new file mode 100644 index 0000000000..db342405e1 --- /dev/null +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml @@ -0,0 +1,172 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "install Cilium ${CILIUM_DUALSTACK_VERSION}" + export DIR=$(echo ${CILIUM_DUALSTACK_VERSION#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + export CILIUM_VERSION_TAG=${CILIUM_DUALSTACK_VERSION} + echo "$CILIUM_VERSION_TAG" + [[ -z $IPV6_HP_BPF_VERSION ]] && IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) + echo "$IPV6_HP_BPF_VERSION" + [[ -z $IPV6_IMAGE_REGISTRY ]] && IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io + [[ -n $IPV6_HP_BPF_IMAGE_REPO_PATH ]] && IPV6_IMAGE_REGISTRY="$IPV6_IMAGE_REGISTRY"/"$IPV6_HP_BPF_IMAGE_REPO_PATH" + echo "$IPV6_HP_BPF_IMAGE_REGISTRY" + echo "install Cilium ${CILIUM_DUALSTACK_VERSION} onto Overlay Cluster" + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + kubectl get po -owide -A + name: "installCilium" + displayName: "Install Cilium on AKS Dualstack Overlay" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Azilium E2E Tests on Overlay Cluster" + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cilium_dualstack VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_OVERLAY=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Run Azilium E2E on AKS Overlay" + + - script: | + kubectl get pods -A + echo "Waiting < 2 minutes for cilium to be ready" + # Ensure Cilium is ready Xm\Xs + cilium status --wait --wait-duration 2m + kubectl get crd -A + retryCountOnTaskFailure: 3 + name: "CiliumStatus" + displayName: "Cilium Status" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=cilium_dualstack + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - template: ../../templates/cilium-connectivity-tests.yaml + + - script: | + ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` + echo "##vso[task.setvariable variable=ciliumNamespace]$ns" + retryCountOnTaskFailure: 3 + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/datapath + echo "Dualstack Overlay Linux datapath IPv6 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + echo "Dualstack Overlay Linux datapath IPv4 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + retryCountOnTaskFailure: 3 + name: "DualStack_Overlay_Linux_Tests" + displayName: "DualStack Overlay Linux Tests" + + - script: | + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. + # Saves 17 minutes + kubectl delete deploy -n $(ciliumNamespace) echo-external-node + cd test/integration/load + CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + echo "delete cilium connectivity test resources and re-validate state" + kubectl delete ns $(ciliumNamespace) + kubectl get pod -owide -A + CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + name: "validatePods" + displayName: "Validate Pods" + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + cd hack/scripts + chmod +x async-delete-test.sh + ./async-delete-test.sh + if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then + kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' + fi + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" + + - template: ../../templates/cilium-mtu-check.yaml diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml new file mode 100644 index 0000000000..5c2bd468ef --- /dev/null +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml @@ -0,0 +1,91 @@ +parameters: + dependsOn: "" + name: "cilium_nodesubnet_e2e" + clusterType: "nodesubnet-byocni-nokubeproxy-up" + clusterName: "cilndsubnete2e" + vmSize: "" + os: "linux" + arch: "" + osSKU: Ubuntu + hubbleEnabled: false + dualstackVersion: "" + cni: "cilium" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + variables: + TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ] + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }} + displayName: Nodesubnet with Cilium - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: cilium-nodesubnet-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + arch: ${{ parameters.arch }} + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + datapath: true + dns: true + cni: cilium + portforward: true + service: true + dependsOn: ${{ parameters.name }} + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml new file mode 100644 index 0000000000..f3b488ade5 --- /dev/null +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml @@ -0,0 +1,89 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + - bash: | + echo $UID + sudo rm -rf $(System.DefaultWorkingDirectory)/* + displayName: "Set up OS environment" + + - checkout: self + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: AzureCLI@2 + displayName: 'Update IP configs' + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + SCALE_UP=${{ parameters.scaleup }} + if [ -z "$SCALE_UP" ]; then + SCALE_UP=32 + fi + SECONDARY_IP_COUNT=$((SCALE_UP * 2)) \ + RESOURCE_GROUP="MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST)" \ + go run $(Build.SourcesDirectory)/test/integration/cilium-nodesubnet/ipconfigupdate.go + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "install Cilium ${CILIUM_VERSION_TAG}" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + kubectl get po -owide -A + kubectl get crd -A + name: "installCilium" + displayName: "Install Cilium" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Nodesubnet E2E Tests" + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_CNS_NODESUBNET=true AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) CLEANUP=true + retryCountOnTaskFailure: 3 + name: "nodeSubnetE2ETests" + displayName: "Run NodeSubnet E2E" + + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.stages.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.stages.yaml new file mode 100644 index 0000000000..67bb6c85b4 --- /dev/null +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.stages.yaml @@ -0,0 +1,107 @@ +parameters: + dependsOn: "" + name: "cilium_nodesubnet_e2e" + clusterType: "nodesubnet-byocni-nokubeproxy-up" + clusterName: "cilndsubnete2e" + vmSize: "" + os: "linux" + arch: "" + osSKU: Ubuntu + hubbleEnabled: false + dualstackVersion: "" + cni: "cilium" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + variables: + TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ] + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + NPM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.npmVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) + dependsOn: + - setup + - ${{ parameters.clusterName }} + jobs: + - job: ${{ parameters.name }} + displayName: Nodesubnet with Cilium - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: cilium-nodesubnet-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + arch: ${{ parameters.arch }} + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + datapath: true + dns: true + cni: cilium + portforward: true + service: true + dependsOn: ${{ parameters.name }} + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml new file mode 100644 index 0000000000..a41c7f3353 --- /dev/null +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml @@ -0,0 +1,87 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + displayName: 'Update IP configs' + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + SCALE_UP=${{ parameters.scaleup }} + if [ -z "$SCALE_UP" ]; then + SCALE_UP=32 + fi + SECONDARY_IP_COUNT=$((SCALE_UP * 2)) \ + RESOURCE_GROUP="MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST)" \ + go run $(Build.SourcesDirectory)/test/integration/cilium-nodesubnet/ipconfigupdate.go + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "install Cilium ${CILIUM_VERSION_TAG}" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + kubectl get po -owide -A + kubectl get crd -A + name: "installCilium" + displayName: "Install Cilium" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Nodesubnet E2E Tests" + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_CNS_NODESUBNET=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "nodeSubnetE2ETests" + displayName: "Run NodeSubnet E2E" + + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml index 38dc6eb17d..3ef28746c8 100644 --- a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml @@ -38,7 +38,7 @@ steps: make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} ls -lah export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files @@ -69,6 +69,7 @@ steps: echo "Waiting < 2 minutes for cilium to be ready" # Ensure Cilium is ready Xm\Xs cilium status --wait --wait-duration 2m + kubectl get crd -A retryCountOnTaskFailure: 3 name: "CiliumStatus" displayName: "Cilium Status" @@ -96,6 +97,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS @@ -109,21 +111,21 @@ steps: displayName: "Validate Node Restart" retryCountOnTaskFailure: 3 + - template: ../../templates/cilium-connectivity-tests.yaml + - script: | - echo "Run Cilium Connectivity Tests" - cilium status - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` echo "##vso[task.setvariable variable=ciliumNamespace]$ns" retryCountOnTaskFailure: 3 - name: "ciliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" - ${{ if eq( parameters['testHubble'], true) }}: - script: | - echo "enable Hubble metrics server" + export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml - kubectl apply -f test/integration/manifests/cilium/v1.14.4/cilium-config/cilium-config-hubble.yaml + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml kubectl rollout restart ds cilium -n kube-system echo "wait <3 minutes for pods to be ready after restart" kubectl rollout status ds cilium -n kube-system --timeout=3m @@ -135,22 +137,16 @@ steps: displayName: "Run Hubble Connectivity Tests" - script: | + set -e echo "validate pod IP assignment and check systemd-networkd restart" kubectl get pod -owide -A - # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. - # Saves 17 minutes - kubectl delete deploy -n $(ciliumNamespace) echo-external-node if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" echo "expect the identities to be deleted when the namespace is deleted" kubectl get ciliumidentity | grep cilium-test fi make test-validate-state - echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) - kubectl get pod -owide -A - make test-validate-state name: "validatePods" displayName: "Validate Pods" @@ -198,6 +194,8 @@ steps: name: "testAsyncDelete" displayName: "Verify Async Delete when CNS is down" + - template: ../../templates/cilium-mtu-check.yaml + - script: | ARTIFACT_DIR=$(Build.ArtifactStagingDirectory)/test-output/ echo $ARTIFACT_DIR diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.stages.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.stages.yaml new file mode 100644 index 0000000000..51a9313372 --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.stages.yaml @@ -0,0 +1,102 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "linux" + testHubble: false + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + jobs: + - job: ${{ parameters.name }} + displayName: Cilium Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: cilium-overlay-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + testHubble: ${{ parameters.testHubble }} + scaleup: 100 + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml new file mode 100644 index 0000000000..950fa5fe1b --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml @@ -0,0 +1,209 @@ +parameters: + name: "" + clusterName: "" + testHubble: false + scaleup: "" + +steps: + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + # Use different file directories for nightly and current cilium version + name: "installCilium" + displayName: "Install Cilium on AKS Overlay" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Azilium E2E Tests on Overlay Cluster" + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ] + then + CNS=$(CNS_VERSION) IPAM=$(AZURE_IPAM_VERSION) && echo "Running nightly" + else + CNS=$(make cns-version) IPAM=$(make azure-ipam-version) + fi + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_OVERLAY=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Run Azilium E2E on AKS Overlay" + + - script: | + kubectl get pods -A + echo "Waiting < 2 minutes for cilium to be ready" + # Ensure Cilium is ready Xm\Xs + cilium status --wait --wait-duration 2m + kubectl get crd -A + retryCountOnTaskFailure: 3 + name: "CiliumStatus" + displayName: "Cilium Status" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - template: ../../templates/cilium-connectivity-tests.yaml + + - script: | + ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` + echo "##vso[task.setvariable variable=ciliumNamespace]$ns" + retryCountOnTaskFailure: 3 + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" + + - ${{ if eq( parameters['testHubble'], true) }}: + - script: | + echo "enable Hubble metrics server" + export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml + kubectl rollout restart ds cilium -n kube-system + echo "wait <3 minutes for pods to be ready after restart" + kubectl rollout status ds cilium -n kube-system --timeout=3m + kubectl get pods -Aowide + echo "verify Hubble metrics endpoint is usable" + go test ./test/integration/networkobservability -v -tags=networkobservability + retryCountOnTaskFailure: 3 + name: "HubbleConnectivityTests" + displayName: "Run Hubble Connectivity Tests" + + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" + echo "expect the identities to be deleted when the namespace is deleted" + kubectl get ciliumidentity | grep cilium-test + fi + make test-validate-state + echo "delete cilium connectivity test resources and re-validate state" # TODO Delete this and the next 4 lines if connectivity no longer has bug + kubectl delete ns $(ciliumNamespace) + kubectl get pod -owide -A + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" + + - script: | + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + kubectl get pod -owide -n $(ciliumNamespace) + echo "wait for pod and cilium identity deletion in cilium-test namespace" + while true; do + pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) + if [[ -z "$pods" ]]; then + echo "No pods found" + break + fi + sleep 2s + done + sleep 20s + echo "Verify cilium identities are deleted from cilium-test" + checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" + if [[ -n $checkIdentity ]]; then + echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" + exit 1 + else + printf -- "Identities deleted from $(ciliumNamespace) namespace\n" + fi + else + echo "skip cilium identities check for PR pipeline" + fi + name: "CiliumIdentities" + displayName: "Verify Cilium Identities Deletion" + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + cd hack/scripts + chmod +x async-delete-test.sh + ./async-delete-test.sh + if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then + kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' + fi + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" + + - template: ../../templates/cilium-mtu-check.yaml + + - script: | + ARTIFACT_DIR=$(Build.ArtifactStagingDirectory)/test-output/ + echo $ARTIFACT_DIR + sudo rm -rf $ARTIFACT_DIR + sudo rm -rf test/integration/logs + name: "Cleanupartifactdir" + displayName: "Cleanup artifact dir" + condition: always() diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml index c5c4c97021..c620cdfb29 100644 --- a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml @@ -2,6 +2,7 @@ parameters: name: "" clusterName: "" testHubble: false + testLRP: false scaleup: "" @@ -54,7 +55,7 @@ steps: kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator else echo "install Cilium ${CILIUM_VERSION_TAG}" - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" echo "deploy Cilium ConfigMap" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml @@ -91,9 +92,20 @@ steps: echo "Waiting < 2 minutes for cilium to be ready" # Ensure Cilium is ready Xm\Xs cilium status --wait --wait-duration 2m + kubectl get crd -A retryCountOnTaskFailure: 3 name: "CiliumStatus" displayName: "Cilium Status" + # Run LRP test after cns and config with lrp enabled config deployed + - ${{ if eq( parameters['testLRP'], true) }}: + - script: | + set -e + cd test/integration/lrp/ + go test ./lrp_test.go -v -tags "lrp" -count=1 -run ^TestLRP$ + kubectl get pods -Aowide + retryCountOnTaskFailure: 3 + name: "LRPTest" + displayName: "Run Cilium Local Redirect Policy Test" - task: AzureCLI@1 inputs: @@ -118,6 +130,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS @@ -131,26 +144,23 @@ steps: displayName: "Validate Node Restart" retryCountOnTaskFailure: 3 + - template: ../../templates/cilium-connectivity-tests.yaml + - script: | - echo "Run Cilium Connectivity Tests" - cilium status - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ] - then - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption,!check-log-errors' --force-deploy - else - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy - fi ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` echo "##vso[task.setvariable variable=ciliumNamespace]$ns" retryCountOnTaskFailure: 3 - name: "ciliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" - ${{ if eq( parameters['testHubble'], true) }}: - script: | echo "enable Hubble metrics server" + export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml - kubectl apply -f test/integration/manifests/cilium/cilium-config-hubble.yaml + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml kubectl rollout restart ds cilium -n kube-system echo "wait <3 minutes for pods to be ready after restart" kubectl rollout status ds cilium -n kube-system --timeout=3m @@ -162,11 +172,9 @@ steps: displayName: "Run Hubble Connectivity Tests" - script: | + set -e echo "validate pod IP assignment and check systemd-networkd restart" kubectl get pod -owide -A - # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. - # Saves 17 minutes - kubectl delete deploy -n $(ciliumNamespace) echo-external-node if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" echo "expect the identities to be deleted when the namespace is deleted" @@ -207,19 +215,6 @@ steps: name: "CiliumIdentities" displayName: "Verify Cilium Identities Deletion" - - script: | - echo "validate pod IP assignment before CNS restart" - kubectl get pod -owide -A - make test-validate-state - echo "restart CNS" - kubectl rollout restart ds azure-cns -n kube-system - kubectl rollout status ds azure-cns -n kube-system - kubectl get pod -owide -A - echo "validate pods after CNS restart" - make test-validate-state - name: "restartCNS" - displayName: "Restart CNS and validate pods" - - script: | echo "Run wireserver and metadata connectivity Tests" bash test/network/wireserver_metadata_test.sh @@ -240,3 +235,5 @@ steps: fi name: "testAsyncDelete" displayName: "Verify Async Delete when CNS is down" + + - template: ../../templates/cilium-mtu-check.yaml diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.stages.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.stages.yaml new file mode 100644 index 0000000000..422ca60bf7 --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.stages.yaml @@ -0,0 +1,100 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "linux" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + jobs: + - job: ${{ parameters.name }} + displayName: Cilium Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - checkout: azure-container-networking + - template: cilium-overlay-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 100 + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml new file mode 100644 index 0000000000..a7bcdb4aec --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml @@ -0,0 +1,252 @@ +parameters: + name: "" + clusterName: "" + testHubble: false + testLRP: false + scaleup: "" + + +steps: + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + FILE_PATH=-nightly + echo "Running nightly" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-config.yaml + # Passes Cilium image to daemonset and deployment + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f - + # Use different file directories for nightly and current cilium version + kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-agent + kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator + else + echo "install Cilium ${CILIUM_VERSION_TAG}" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + fi + + kubectl get po -owide -A + name: "installCilium" + displayName: "Install Cilium on AKS Overlay" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Azilium E2E Tests on Overlay Cluster" + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ] + then + CNS=$(CNS_VERSION) IPAM=$(AZURE_IPAM_VERSION) && echo "Running nightly" + else + CNS=$(make cns-version) IPAM=$(make azure-ipam-version) + fi + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_OVERLAY=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Run Azilium E2E on AKS Overlay" + + - script: | + kubectl get po -owide -A + echo "Waiting < 2 minutes for cilium to be ready" + # Ensure Cilium is ready Xm\Xs + cilium status --wait --wait-duration 2m + kubectl get crd -A + retryCountOnTaskFailure: 3 + name: "CiliumStatus" + displayName: "Cilium Status" + # Run LRP test after cns and config with lrp enabled config deployed + - ${{ if eq( parameters['testLRP'], true) }}: + - script: | + set -e + cd test/integration/lrp/ + go test ./lrp_test.go -v -tags "lrp" -count=1 -run ^TestLRP$ + kubectl get pods -Aowide + retryCountOnTaskFailure: 3 + name: "LRPTest" + displayName: "Run Cilium Local Redirect Policy Test" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - template: ../../templates/cilium-connectivity-tests.yaml + + - script: | + ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` + echo "##vso[task.setvariable variable=ciliumNamespace]$ns" + retryCountOnTaskFailure: 3 + name: "nsCapture" + displayName: "Capture Connectivity Test Namespace" + + - ${{ if eq( parameters['testHubble'], true) }}: + - script: | + echo "enable Hubble metrics server" + export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + kubectl apply -f test/integration/manifests/cilium/hubble/hubble-peer-svc.yaml + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml + kubectl rollout restart ds cilium -n kube-system + echo "wait <3 minutes for pods to be ready after restart" + kubectl rollout status ds cilium -n kube-system --timeout=3m + kubectl get pods -Aowide + echo "verify Hubble metrics endpoint is usable" + go test ./test/integration/networkobservability -count=1 -v -tags=networkobservability + retryCountOnTaskFailure: 3 + name: "HubbleConnectivityTests" + displayName: "Run Hubble Connectivity Tests" + + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" + echo "expect the identities to be deleted when the namespace is deleted" + kubectl get ciliumidentity | grep cilium-test + fi + make test-validate-state + echo "delete cilium connectivity test resources and re-validate state" + kubectl delete ns $(ciliumNamespace) + kubectl get pod -owide -A + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" + + - script: | + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + kubectl get pod -owide -n $(ciliumNamespace) + echo "wait for pod and cilium identity deletion in $(ciliumNamespace) namespace" + while true; do + pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) + if [[ -z "$pods" ]]; then + echo "No pods found" + break + fi + sleep 2s + done + sleep 20s + echo "Verify cilium identities are deleted from $(ciliumNamespace)" + checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" + if [[ -n $checkIdentity ]]; then + echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" + exit 1 + else + printf -- "Identities deleted from $(ciliumNamespace) namespace\n" + fi + else + echo "skip cilium identities check for PR pipeline" + fi + name: "CiliumIdentities" + displayName: "Verify Cilium Identities Deletion" + + - script: | # TODO REMOVE THIS STEP, make test-load covers this + set -e + echo "validate pod IP assignment before CNS restart" + kubectl get pod -owide -A + make test-validate-state + echo "restart CNS" + kubectl rollout restart ds azure-cns -n kube-system + kubectl rollout status ds azure-cns -n kube-system + kubectl get pod -owide -A + echo "validate pods after CNS restart" + make test-validate-state + name: "restartCNS" + displayName: "Restart CNS and validate pods" + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then + echo "Running nightly, skip async delete test" + else + cd hack/scripts + chmod +x async-delete-test.sh + ./async-delete-test.sh + if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then + kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' + fi + fi + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" + + - template: ../../templates/cilium-mtu-check.yaml + + diff --git a/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml b/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml index 7620006765..5284285541 100644 --- a/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml @@ -40,7 +40,7 @@ steps: kubectl cluster-info kubectl get po -owide -A echo "install Cilium ${CILIUM_VERSION_TAG}" - export DIR=${CILIUM_VERSION_TAG%.*} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "installing files from ${DIR}" echo "deploy Cilium ConfigMap" kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml @@ -64,88 +64,8 @@ steps: name: "aziliumTest" displayName: "Run Azilium E2E" - - script: | - kubectl get po -owide -A - echo "Waiting < 2 minutes for cilium to be ready" - # Ensure Cilium is ready Xm\Xs - cilium status --wait --wait-duration 2m - retryCountOnTaskFailure: 3 - name: "CiliumStatus" - displayName: "Cilium Status" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - kubectl get po -owide -A - clusterName=${{ parameters.clusterName }} - echo "Restarting nodes" - for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do - make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} - done - displayName: "Restart Nodes" + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - cd test/integration/load - - # Scale Cluster Up/Down to confirm functioning CNS - ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ - kubectl get pods -owide -A - - cd ../../.. - echo "Validating Node Restart" - make test-validate-state OS_TYPE=linux RESTART_CASE=true - kubectl delete ns load-test - displayName: "Validate Node Restart" - retryCountOnTaskFailure: 3 - - - script: | - echo "Run Cilium Connectivity Tests" - cilium status - cilium connectivity test --connect-timeout 4s --request-timeout 30s --test '!pod-to-pod-encryption,!node-to-node-encryption' --force-deploy - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "ciliumConnectivityTests" - displayName: "Run Cilium Connectivity Tests" - - - script: | - echo "validate pod IP assignment and check systemd-networkd restart" - kubectl get pod -owide -A - # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. - # Saves 17 minutes - kubectl delete deploy -n $(ciliumNamespace) echo-external-node - make test-validate-state - echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) - kubectl get pod -owide -A - make test-validate-state - name: "validatePods" - displayName: "Validate Pods" - - - script: | - echo "Run wireserver and metadata connectivity Tests" - bash test/network/wireserver_metadata_test.sh - retryCountOnTaskFailure: 3 - name: "WireserverMetadataConnectivityTests" - displayName: "Run Wireserver and Metadata Connectivity Tests" - - - script: | - cd hack/scripts - chmod +x async-delete-test.sh - ./async-delete-test.sh - if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then - kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' - fi - name: "testAsyncDelete" - displayName: "Verify Async Delete when CNS is down" diff --git a/.pipelines/singletenancy/cilium/cilium-e2e.stages.yaml b/.pipelines/singletenancy/cilium/cilium-e2e.stages.yaml new file mode 100644 index 0000000000..b87e8bcc35 --- /dev/null +++ b/.pipelines/singletenancy/cilium/cilium-e2e.stages.yaml @@ -0,0 +1,101 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "linux" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ] + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + IPAM_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/azure-ipam + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) + jobs: + - job: ${{ parameters.name }} + displayName: Cilium Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: cilium-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 100 + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + pool: + type: linux + isCustom: true + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml b/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml new file mode 100644 index 0000000000..eb94724ef1 --- /dev/null +++ b/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml @@ -0,0 +1,70 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "install Cilium ${CILIUM_VERSION_TAG}" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing files from ${DIR}" + echo "deploy Cilium ConfigMap" + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml + # Passes Cilium image to daemonset and deployment + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files + kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files + + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + kubectl get po -owide -A + name: "installCilium" + displayName: "Install Cilium" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Azilium E2E Tests" + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true \ + INSTALL_CNS=true INSTALL_AZILIUM=true CLEANUP=true \ + AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + IPAM_IMAGE_NAME_OVERRIDE=$(IPAM_IMAGE_NAME_OVERRIDE) CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) \ + CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Run Azilium E2E" + + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} + diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml index 8d4daa617e..5302998d16 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml @@ -6,6 +6,8 @@ parameters: vmSize: "" k8sVersion: "" dependsOn: "" + os: "" + scaleup: "" stages: - stage: ${{ parameters.clusterName }} @@ -25,12 +27,13 @@ stages: clusterType: ${{ parameters.clusterType }} clusterName: ${{ parameters.clusterName }}-$(commitID) vmSize: ${{ parameters.vmSize }} + vmSizeWin: ${{ parameters.vmSize }} k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement - stage: ${{ parameters.name }} - condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies displayName: E2E - ${{ parameters.displayName }} dependsOn: - setup @@ -44,8 +47,8 @@ stages: pool: name: $(BUILD_POOL_NAME_DEFAULT) jobs: - - job: ${{ parameters.name }}_linux - displayName: DualStack Overlay Test Suite | Linux - (${{ parameters.name }}) + - job: ${{ parameters.name }}_${{ parameters.os }} + displayName: DualStack Overlay Test Suite | ${{ parameters.os }} - (${{ parameters.name }}) timeoutInMinutes: 120 pool: name: $(BUILD_POOL_NAME_DEFAULT) @@ -57,99 +60,31 @@ stages: parameters: name: ${{ parameters.name }} clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - scaleup: 100 - - - job: windows_nodepool - displayName: Add Windows Nodepool - dependsOn: ${{ parameters.name }}_linux - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - task: AzureCLI@2 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) - make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }}-$(commitID) VM_SIZE_WIN=${{ parameters.vmSize }} - echo "Windows nodes have been successfully added to DualStack Overlay Cluster" - kubectl cluster-info - kubectl get node -owide - kubectl get po -owide -A - name: "Add_Windows_Node" - displayName: "Add windows node" - - - job: ${{ parameters.name }}_windows - displayName: DualStack Overlay Test Suite | Windows - (${{ parameters.name }}) - timeoutInMinutes: 120 - dependsOn: windows_nodepool - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - template: dualstackoverlay-e2e-step-template.yaml - parameters: - name: ${{ parameters.name }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - scaleup: 50 + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} # 50 in windows or 100 in linux - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml parameters: sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - dependsOn: ${{ parameters.name }}_windows - dualstack: true + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }}_${{ parameters.os }} + dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template dns: true portforward: true + service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. hostport: true - service: true - - - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml - parameters: - sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - dependsOn: cni_linux - dualstack: true - dns: true - portforward: true - service: true - hostport: true - hybridWin: true - - - job: failedE2ELogs_linux - displayName: "Linux Failure Logs" - dependsOn: - - ${{ parameters.name }}_linux - - cni_linux - condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') - steps: - - template: ../../templates/log-template.yaml - parameters: - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - cni: cniv2 + hybridWin: ${{ eq(parameters.os, 'windows') }} - - job: failedE2ELogs_windows - displayName: "Windows Failure Logs" + - job: failedE2ELogs_${{ parameters.os }} + displayName: "${{ parameters.os }} Failure Logs" dependsOn: - - ${{ parameters.name }}_windows - - cni_windows - condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') + - ${{ parameters.name }}_${{ parameters.os }} + - CNI_${{ parameters.os }} + condition: in(dependencies.${{ parameters.name }}_${{ parameters.os }}.result, 'Failed') steps: - template: ../../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows + os: ${{ parameters.os }} cni: cniv2 diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml index 9dcbe186db..71a99f9362 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml @@ -77,6 +77,7 @@ steps: scriptType: "bash" addSpnToEnvironment: true inlineScript: | + set -e cd test/integration/load # Scale Cluster Up/Down to confirm functioning CNS @@ -112,37 +113,4 @@ steps: displayName: "Windows DualStack Overlay Datapath Tests" retryCountOnTaskFailure: 3 - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - clusterName=${{ parameters.clusterName }} - echo "Restarting nodes" - for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do - make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} - done - displayName: "Restart Nodes" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - cd test/integration/load - - # Scale Cluster Up/Down to confirm functioning CNS - ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=windows go test -count 1 -timeout 30m -tags load -run ^TestLoad$ - kubectl get pods -owide -A - - cd ../../.. - echo "Validating Node Restart" - make test-validate-state OS_TYPE=windows RESTART_CASE=true CNI_TYPE=cniv2 - kubectl delete ns load-test - displayName: "Validate Node Restart" - retryCountOnTaskFailure: 3 + # Windows node restart and validation tests removed due to flakiness diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml new file mode 100644 index 0000000000..d5c34b4ef6 --- /dev/null +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml @@ -0,0 +1,104 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "" + scaleup: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.jobs.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + vmSizeWin: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + os: ${{ parameters.os }} + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - ${{ parameters.clusterName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + + IMAGE_REPO_PATH: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath'] ] + AZURE_IPAM_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.azureIpamVersion'] ] + CNI_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cniVersion'] ] + CNS_VERSION: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.cnsVersion'] ] + CNS_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cns + CNI_IMAGE_NAME_OVERRIDE: $(IMAGE_REPO_PATH)/cni + jobs: + - job: ${{ parameters.name }}_${{ parameters.os }} + displayName: DualStack Overlay Test Suite | ${{ parameters.os }} - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - template: dualstackoverlay-e2e.steps.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + scaleup: ${{ parameters.scaleup }} # 50 in windows or 100 in linux + + - template: ../../cni/k8s-e2e/k8s-e2e.jobs.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }}_${{ parameters.os }} + dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template + dns: true + portforward: true + service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. + hostport: true + hybridWin: ${{ eq(parameters.os, 'windows') }} + + - job: failedE2ELogs_${{ parameters.os }} + displayName: "${{ parameters.os }} Failure Logs" + dependsOn: + - ${{ parameters.name }}_${{ parameters.os }} + - CNI_${{ parameters.os }} + condition: in(dependencies.${{ parameters.name }}_${{ parameters.os }}.result, 'Failed') + pool: + type: linux + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out + steps: + - checkout: azure-container-networking + - template: ../../templates/log.steps.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv2 diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.steps.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.steps.yaml new file mode 100644 index 0000000000..14dccf8f1a --- /dev/null +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.steps.yaml @@ -0,0 +1,124 @@ +parameters: + name: "" + clusterName: "" + cni: "dualstack" + os: "" + scaleup: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - ${{ if eq(parameters.os, 'linux') }}: + - script: | + kubectl cluster-info + kubectl get node + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=linux CNI_TYPE=dualstack VALIDATE_STATEFILE=true VALIDATE_DUALSTACK=true \ + INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true CLEANUP=true \ + CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + retryCountOnTaskFailure: 3 + name: "integrationTest" + displayName: "Run CNS Integration Tests on AKS DualStack Overlay" + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/datapath + echo "Dualstack Overlay Linux datapath IPv6 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + echo "Dualstack Overlay Linux datapath IPv4 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + retryCountOnTaskFailure: 3 + name: "DualStack_Overlay_Linux_Tests" + displayName: "DualStack Overlay Linux Tests" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true CNI_TYPE=dualstack + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - ${{ if eq(parameters.os, 'windows') }}: + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load \ + SCALE_UP=32 OS_TYPE=windows CNI_TYPE=dualstack VALIDATE_STATEFILE=true VALIDATE_DUALSTACK=true \ + INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true CLEANUP=true \ + CNS_VERSION=$(CNS_VERSION) CNI_VERSION=$(CNI_VERSION) \ + CNS_IMAGE_NAME_OVERRIDE=$(CNS_IMAGE_NAME_OVERRIDE) CNI_IMAGE_NAME_OVERRIDE=$(CNI_IMAGE_NAME_OVERRIDE) + name: "WindowsDualStackOverlayControlPlaneScaleTests" + displayName: "Windows DualStack Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 3 + + - script: | + echo "DualStack Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsDualStackOverlayDatapathTests" + displayName: "Windows DualStack Overlay Datapath Tests" + retryCountOnTaskFailure: 3 + + # Windows node restart and validation tests removed due to flakiness diff --git a/.pipelines/template.trigger.jobs.yaml b/.pipelines/template.trigger.jobs.yaml new file mode 100644 index 0000000000..ce7ba6354c --- /dev/null +++ b/.pipelines/template.trigger.jobs.yaml @@ -0,0 +1,176 @@ +parameters: +- name: mainRepoName + type: string + +- name: mainRepoRef + type: string + +- name: mainRepoCommit + type: string + +- name: mainRepoType + type: string + + +jobs: +- job: trigger + displayName: Test ACN Pull Request Changes + # 4 hour timeout + timeoutInMinutes: 240 + steps: + - checkout: azure-container-networking + clean: true + + - bash: | + set -e + [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ $IS_TRUE ]] && set -x || set +x + + # Verify Branch Name + if [[ $TMPL_REPO_REF =~ $ACCEPTED_REPO_REFS ]]; then + echo >&2 "##[info]Verification passed." + else + echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")." + exit 1 + fi + + # Verify Repo Name + if [[ $TMPL_REPO_NAME =~ $ACCEPTED_REPO_NAME ]]; then + echo >&2 "##[info]Verification passed." + else + echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")." + exit 1 + fi + + # Verify Repo Type + if [[ $TMPL_REPO_TYPE =~ $ACCEPTED_REPO_TYPE ]]; then + echo >&2 "##[info]Verification passed." + else + echo >&2 "##[error]Verification failed (ref: "$TMPL_REPO_REF")." + exit 1 + fi + displayName: "[Check]Primary Template Extends Master NOT Changes Under Test" + env: + TMPL_REPO_REF: '${{ parameters.mainRepoRef }}' + TMPL_REPO_NAME: '${{ parameters.mainRepoName }}' + TMPL_REPO_TYPE: '${{ parameters.mainRepoType }}' + ACCEPTED_REPO_REFS: '^refs/heads/feature/ob-onboard-0$' + ACCEPTED_REPO_NAME: '^Azure/azure-container-networking$' + ACCEPTED_REPO_TYPE: '^github$' + + - bash: | + set -e + [[ -n $SYSTEM_DEBUG ]] && [[ $SYSTEM_DEBUG =~ $IS_TRUE ]] && set -x || set +x + + # Get Build Reason + ACN_BUILD_REASON=$(echo -n "$BUILD_REASON") + + # Get ACN Git Ref + ACN_BUILD_AZURE_ACN_GIT_REF="$BUILD_SOURCEBRANCH" + + # Get Queuer + ACN_BUILD_QUEUEDBY="$BUILD_QUEUEDBY" + + # Get Source Branch + ACN_BUILD_SOURCE_BRANCH="$BUILD_SOURCEBRANCH" + + # Get System PR Queue Variables + ACN_BUILD_EXTRAPARAMETERS=$(jq -n \ + --arg PRID "$SYSTEM_PULLREQUEST_PULLREQUESTID" \ + --arg PRNUM "$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" \ + --arg MERGEDAT "$SYSTEM_PULLREQUEST_MERGEDAT" \ + --arg SRCBRANCH "$SYSTEM_PULLREQUEST_SOURCEBRANCH" \ + --arg TARGETBRANCH "$SYSTEM_PULLREQUEST_TARGETBRANCH" \ + --arg TARGETBRANCHNAME "$SYSTEM_PULLREQUEST_TARGETBRANCHNAME" \ + --arg SRCREPOURI "$SYSTEM_PULLREQUEST_SOURCEREPOSITORYURI" \ + --arg SRCCOMMITID "$SYSTEM_PULLREQUEST_SOURCECOMMITID" \ + --arg ISFORK "$SYSTEM_PULLREQUEST_ISFORK" \ + '{ + "pullRequestId": $PRID, + "pullRequestNumber": $PRNUM, + "mergedAt": $MERGEDAT, + "sourceBranch": $SRCBRANCH, + "targetBranch": $TARGETBRANCH, + "targetBranchName": $TARGETBRANCHNAME, + "sourceRepositoryUri": $SRCREPOURI, + "sourceCommitID": $SRCCOMMITID, + "isFork": $ISFORK + }') + echo "$ACN_BUILD_EXTRAPARAMETERS" | jq . + + ACN_BUILD_PARAMETERS=$(jq -rcn \ + --arg REASON "$ACN_BUILD_REASON" \ + --arg REF "$ACN_BUILD_AZURE_ACN_GIT_REF" \ + --arg BRANCH "$ACN_BUILD_SOURCE_BRANCH" \ + --arg QUEUEDBY "$ACN_BUILD_QUEUEDBY" \ + --argjson EXTRA "$ACN_BUILD_EXTRAPARAMETERS" \ + '{ "reason": $REASON, "ref": $REF, "queuedBy": $QUEUEDBY, "sourceBranch": $BRANCH, "extra": $EXTRA }') + echo "$ACN_BUILD_PARAMETERS" | jq . + + ACN_BUILD_PARAMETERS="TriggerDetails: $ACN_BUILD_PARAMETERS" + + echo >&2 "Triggering Pull Request build for ${BUILD_SOURCEBRANCH}." + echo >&2 "##vso[task.setvariable variable=templateParameters]$ACN_BUILD_PARAMETERS" + displayName: Retrieve PR Source Details + + - task: TriggerBuild@4 + name: trigger + displayName: Trigger Compliant Build + # 3 hour timeout + timeoutInMinutes: 180 + inputs: + definitionIsInCurrentTeamProject: false + tfsServer: $(ADO_COMPLIANT_BUILD_PROJECT_URI) + teamProject: $(ADO_COMPLIANT_BUILD_ORG) + buildDefinition: $(ADO_COMPLIANT_PIPELINE_ID) + queueBuildForUserThatTriggeredBuild: true + useSameBranch: false + # master + branchToUse: feature/ob-onboard-0 + authenticationMethod: $(ADO_AUTH_METHOD) + password: $(ADO_AUTHORIZATION) + storeInEnvironmentVariable: true + waitForQueuedBuildsToFinish: true + treatPartiallySucceededBuildAsSuccessful: false + downloadBuildArtifacts: false + failTaskIfBuildsNotSuccessful: true + # Refresh every 10 min + # Seconds + waitForQueuedBuildsToFinishRefreshTime: 600 + ignoreSslCertificateErrors: false + templateParameters: $(templateParameters) + +## Report Build Results +# - task: GitHubComment@0 +# displayName: "Post PR Comment" +# condition: canceled() +# inputs: +# gitHubConnection: $(ADO_AUTHORIZATION) +# id: '$(System.PullRequest.PullRequestId)' +# comment: | +# 'The build (id: "$(TRIGGERED_BUILDID)") was canceled.' +# env: +# TRIGGERED_BUILDID: $(TriggeredBuildIds) +# +# - task: GitHubComment@0 +# displayName: "Post PR Comment" +# condition: failed() +# inputs: +# gitHubConnection: $(ADO_AUTHORIZATION) +# repositoryName: '$(Build.Repository.Name)' +# id: '$(System.PullRequest.PullRequestId)' +# comment: | +# 'The build (id: "$(TRIGGERED_BUILDID)") failed. Please verify your changes.' +# env: +# TRIGGERED_BUILDID: $(TriggeredBuildIds) +# +# - task: GitHubComment@0 +# displayName: "Post PR Comment" +# condition: succeeded() +# inputs: +# gitHubConnection: $(ADO_AUTHORIZATION) +# repositoryName: '$(Build.Repository.Name)' +# id: '$(System.PullRequest.PullRequestId)' +# comment: | +# 'The build (id: "$(TRIGGERED_BUILDID)") succeeded!' +# env: +# TRIGGERED_BUILDID: $(TriggeredBuildIds) diff --git a/.pipelines/templates/add-windows-nodepool-job.yaml b/.pipelines/templates/add-windows-nodepool-job.yaml new file mode 100644 index 0000000000..fe700371b4 --- /dev/null +++ b/.pipelines/templates/add-windows-nodepool-job.yaml @@ -0,0 +1,63 @@ +parameters: + depend: "" + clusterName: "" # unique identifier + vmSize: "" + +jobs: +- job: windows_nodepool + displayName: Add Windows Nodepool + dependsOn: ${{ parameters.depend }} + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + timeoutInMinutes: 30 + steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + + windows_nodepool=$(az aks nodepool list \ + --resource-group ${{ parameters.clusterName }} \ + --cluster-name ${{ parameters.clusterName }} \ + --query "[?osType=='Windows']" \ + --output tsv) + + if [ -z "$windows_nodepool" ]; then + echo "No Windows node pool found in the AKS cluster." + + # wait for cluster to update + while true; do + cluster_state=$(az aks show \ + --name "${{ parameters.clusterName }}" \ + --resource-group "${{ parameters.clusterName }}" \ + --query provisioningState) + + if echo "$cluster_state" | grep -q "Updating"; then + echo "Cluster is updating. Sleeping for 30 seconds..." + sleep 30 + else + break + fi + done + # cluster state is always set and visible outside the loop + echo "Cluster state is: $cluster_state" + + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSize }} + echo "Windows node was successfully added" + kubectl cluster-info + kubectl get node -owide + kubectl get po -owide -A + else + echo "Windows node pool already exists in the AKS cluster." + fi + name: "Add_Windows_Node" + displayName: "Add windows node to cluster" + retryCountOnTaskFailure: 5 diff --git a/.pipelines/templates/add-windows-nodepool.jobs.yaml b/.pipelines/templates/add-windows-nodepool.jobs.yaml new file mode 100644 index 0000000000..27f32ec447 --- /dev/null +++ b/.pipelines/templates/add-windows-nodepool.jobs.yaml @@ -0,0 +1,66 @@ +parameters: + depend: "" + clusterName: "" # unique identifier + vmSize: "" + +jobs: +- job: windows_nodepool + displayName: Add Windows Nodepool + dependsOn: ${{ parameters.depend }} + pool: + isCustom: true + type: linux + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + timeoutInMinutes: 30 + steps: + - checkout: azure-container-networking + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + + windows_nodepool=$(az aks nodepool list \ + --resource-group ${{ parameters.clusterName }} \ + --cluster-name ${{ parameters.clusterName }} \ + --query "[?osType=='Windows']" \ + --output tsv) + + if [ -z "$windows_nodepool" ]; then + echo "No Windows node pool found in the AKS cluster." + + # wait for cluster to update + while true; do + cluster_state=$(az aks show \ + --name "${{ parameters.clusterName }}" \ + --resource-group "${{ parameters.clusterName }}" \ + --query provisioningState) + + if echo "$cluster_state" | grep -q "Updating"; then + echo "Cluster is updating. Sleeping for 30 seconds..." + sleep 30 + else + break + fi + done + # cluster state is always set and visible outside the loop + echo "Cluster state is: $cluster_state" + + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSize }} + echo "Windows node was successfully added" + kubectl cluster-info + kubectl get node -owide + kubectl get po -owide -A + else + echo "Windows node pool already exists in the AKS cluster." + fi + name: "Add_Windows_Node" + displayName: "Add windows node to cluster" + retryCountOnTaskFailure: 5 diff --git a/.pipelines/templates/cilium-cli.yaml b/.pipelines/templates/cilium-cli.yaml index 0098ac758f..2c3034365d 100644 --- a/.pipelines/templates/cilium-cli.yaml +++ b/.pipelines/templates/cilium-cli.yaml @@ -1,10 +1,11 @@ steps: - script: | + set -e echo "install cilium CLI" - if [[ ${CILIUM_VERSION_TAG} =~ ^1.1[1-3].[0-9]{1,2} ]]; then + if [[ ${CILIUM_VERSION_TAG#v} =~ ^1.1[1-3].[0-9]{1,2}|1.1[1-3].[0-9]{1,2}-[0-9]{1,6} ]]; then echo "Cilium Agent Version ${BASH_REMATCH[0]}" CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable-v0.14.txt) - elif [[ ${CILIUM_VERSION_TAG} =~ ^1.14.[0-9]{1,2} ]]; then + elif [[ ${CILIUM_VERSION_TAG#v} =~ ^1.1[1-4].[0-9]{1,2}|1.1[1-4].[0-9]{1,2}-[0-9]{1,6} ]]; then echo "Cilium Agent Version ${BASH_REMATCH[0]}" CILIUM_CLI_VERSION=v0.15.22 else @@ -17,6 +18,9 @@ steps: sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} + + # We can ignore failures from cilium status as cilium agent will not be in ready status until CNS is installed. + set +e cilium status cilium version name: "installCiliumCLI" diff --git a/.pipelines/templates/cilium-connectivity-tests.yaml b/.pipelines/templates/cilium-connectivity-tests.yaml new file mode 100644 index 0000000000..2baadca7b0 --- /dev/null +++ b/.pipelines/templates/cilium-connectivity-tests.yaml @@ -0,0 +1,13 @@ +parameters: + skipTests: '!pod-to-pod-encryption,!node-to-node-encryption,!check-log-errors,!no-unexpected-packet-drops,!to-fqdns' + +steps: + - script: | + if ! cilium connectivity test --connect-timeout 4s --request-timeout 30s --test ${{ parameters.skipTests }} --force-deploy + then + echo "--- Connectivity Tests failed! ---" + echo "--- Running Connectivity Tests with --debug ---" + cilium connectivity test --debug --connect-timeout 4s --request-timeout 30s --test ${{ parameters.skipTests }} --force-deploy + fi + name: "CiliumConnectivityTests" + displayName: "Run Cilium Connectivity Tests" diff --git a/.pipelines/templates/cilium-mtu-check.yaml b/.pipelines/templates/cilium-mtu-check.yaml new file mode 100644 index 0000000000..6cf1967493 --- /dev/null +++ b/.pipelines/templates/cilium-mtu-check.yaml @@ -0,0 +1,7 @@ +steps: + - script: | + cd hack/scripts + chmod +x cilium-mtu-validation.sh + ./cilium-mtu-validation.sh + name: "CiliumMTUValidation" + displayName: "Run Cilium MTU Validation" \ No newline at end of file diff --git a/.pipelines/templates/cilium-tests.yaml b/.pipelines/templates/cilium-tests.yaml new file mode 100644 index 0000000000..34c5cadcf7 --- /dev/null +++ b/.pipelines/templates/cilium-tests.yaml @@ -0,0 +1,76 @@ +steps: + - script: | + kubectl get po -owide -A + echo "Waiting < 2 minutes for cilium to be ready" + # Ensure Cilium is ready Xm\Xs + cilium status --wait --wait-duration 2m + kubectl get crd -A + retryCountOnTaskFailure: 3 + name: "CiliumStatus" + displayName: "Cilium Status" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + kubectl get po -owide -A + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_AKS_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_AKS_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + cd test/integration/load + + # Scale Cluster Up/Down to confirm functioning CNS + ITERATIONS=2 SCALE_UP=${{ parameters.scaleup }} OS_TYPE=linux go test -count 1 -timeout 30m -tags load -run ^TestLoad$ + kubectl get pods -owide -A + + cd ../../.. + echo "Validating Node Restart" + make test-validate-state OS_TYPE=linux RESTART_CASE=true + kubectl delete ns load-test + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - template: ./cilium-connectivity-tests.yaml + + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" + + - script: | + echo "Run wireserver and metadata connectivity Tests" + bash test/network/wireserver_metadata_test.sh + retryCountOnTaskFailure: 3 + name: "WireserverMetadataConnectivityTests" + displayName: "Run Wireserver and Metadata Connectivity Tests" + + - script: | + cd hack/scripts + chmod +x async-delete-test.sh + ./async-delete-test.sh + if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then + kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' + fi + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" + + - template: ./cilium-mtu-check.yaml diff --git a/.pipelines/templates/create-cluster-steps.yaml b/.pipelines/templates/create-cluster-steps.yaml new file mode 100644 index 0000000000..5863b6a1f9 --- /dev/null +++ b/.pipelines/templates/create-cluster-steps.yaml @@ -0,0 +1,55 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" # Recommended to pass in unique identifier + vmSize: "" + vmSizeWin: "" + k8sVersion: "" + osSkuWin: "Windows2022" # Currently we only support Windows2022 + dependsOn: "" + region: "" + os: linux + + +steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + echo "Check az version" + az version + if ${{ lower(contains(parameters.clusterType, 'dualstack')) }} + then + echo "Install az cli extension preview" + az extension add --name aks-preview + az extension update --name aks-preview + fi + + if ! [ -z ${{ parameters.k8sVersion }} ]; then + echo "Set K8S_VER with ${{ parameters.k8sVersion }}" + export K8S_VER=${{ parameters.k8sVersion }} + fi + + if ! [ -z ${K8S_VERSION} ]; then + echo "Default k8s version, $(make -C ./hack/aks vars | grep K8S | cut -d'=' -f 2), is manually set to ${K8S_VERSION}" + export K8S_VER=${K8S_VERSION} + fi + + mkdir -p ~/.kube/ + make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ + OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} \ + LTS=${{ lower(parameters.LTS) }} + + echo "Cluster successfully created" + displayName: Cluster - ${{ parameters.clusterType }} + continueOnError: ${{ contains(parameters.clusterType, 'dualstack') }} diff --git a/.pipelines/templates/create-cluster.jobs.yaml b/.pipelines/templates/create-cluster.jobs.yaml new file mode 100644 index 0000000000..4ef2f19ee1 --- /dev/null +++ b/.pipelines/templates/create-cluster.jobs.yaml @@ -0,0 +1,60 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" # Recommended to pass in unique identifier + vmSize: "" + vmSizeWin: "" + k8sVersion: "" + osSkuWin: "Windows2022" # Currently we only support Windows2022 + dependsOn: "" + region: "" + os: linux + +jobs: + - job: ${{ parameters.name }} + displayName: Cluster - ${{ parameters.name }} + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + isCustom: true + type: linux + steps: + - checkout: azure-container-networking + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + echo "Check az version" + az version + # if ${{ lower(contains(parameters.clusterType, 'dualstack')) }} + # then + # echo "Install az cli extension preview" + # az extension add --name aks-preview + # az extension update --name aks-preview + # fi + + + if ! [ -z ${K8S_VERSION} ]; then + echo "Default k8s version, $(make -C ./hack/aks vars | grep K8S | cut -d'=' -f 2), is manually set to ${K8S_VERSION}" + export K8S_VER=${K8S_VERSION} + fi + + mkdir -p ~/.kube/ + # make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + + #Temp fix for azcli aks preview bug + az extension add --name aks-preview --version 14.0.0b3 + + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ + OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} + + echo "Cluster successfully created" + displayName: Cluster - ${{ parameters.clusterType }} + continueOnError: ${{ contains(parameters.clusterType, 'dualstack') }} diff --git a/.pipelines/templates/create-cluster.yaml b/.pipelines/templates/create-cluster.yaml index 534c486a2d..60573922a3 100644 --- a/.pipelines/templates/create-cluster.yaml +++ b/.pipelines/templates/create-cluster.yaml @@ -31,6 +31,12 @@ jobs: az extension add --name aks-preview az extension update --name aks-preview fi + + if ! [ -z ${K8S_VERSION} ]; then + echo "Default k8s version, $(make -C ./hack/aks vars | grep K8S | cut -d'=' -f 2), is manually set to ${K8S_VERSION}" + export K8S_VER=${K8S_VERSION} + fi + mkdir -p ~/.kube/ make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} @@ -38,8 +44,7 @@ jobs: AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \ CLUSTER=${{ parameters.clusterName }} \ VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ - OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \ - WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} + OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} echo "Cluster successfully created" displayName: Cluster - ${{ parameters.clusterType }} diff --git a/.pipelines/templates/create-multitenant-cluster.steps.yaml b/.pipelines/templates/create-multitenant-cluster.steps.yaml new file mode 100644 index 0000000000..27bbcb9047 --- /dev/null +++ b/.pipelines/templates/create-multitenant-cluster.steps.yaml @@ -0,0 +1,52 @@ +parameters: +- name: region + type: string + +- name: project_select + values: + - cni + +- name: project_config + default: + - swift-byocni-nokubeproxy-up + - overlay-byocni-nokubeproxy-up + - dualstack-byocni-nokubeproxy-up + - overlay-byocni-nokubeproxy-up + - overlay-byocni-up + - swift-byocni-up + - vnetscale-swift-byocni-up + - linux-cniv1-up + - windows-cniv1-up + - dualstack-overlay-byocni-up + - swiftv2-multitenancy-cluster-up + +steps: +- task: AzureCLI@2 + inputs: + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + workingDirectory: $(ACN_DIR) + addSpnToEnvironment: true + inlineScript: | + set -e + echo "Check az version" + az version + if ${{ lower(contains(parameters.clusterType, 'dualstack')) }} + then + echo "Install az cli extension preview" + az extension add --name aks-preview + az extension update --name aks-preview + fi + mkdir -p ~/.kube/ + make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} OS=${{parameters.os}} \ + + echo "Cluster successfully created" + displayName: Multitenant Cluster - ${{ parameters.clusterType }} + continueOnError: ${{ parameters.continueOnError }} + diff --git a/.pipelines/templates/delete-cluster.steps.yaml b/.pipelines/templates/delete-cluster.steps.yaml new file mode 100644 index 0000000000..516b782167 --- /dev/null +++ b/.pipelines/templates/delete-cluster.steps.yaml @@ -0,0 +1,23 @@ +parameters: + name: "" + clusterName: "" + region: "" + +steps: + - task: AzureCLI@2 + inputs: + azureSubscription: ${{ parameters.svcConn }} + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + echo "Deleting cluster" + # make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + #Temp fix for azcli aks preview bug + az extension add --name aks-preview --version 14.0.0b3 + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + make -C ./hack/aks down AZCLI=az REGION=${{ parameters.region }} SUB=${{ parameters.sub }} CLUSTER=${{ parameters.clusterName }} + echo "Cluster and resources down" + name: delete + displayName: Delete - ${{ parameters.name }} + condition: always() diff --git a/.pipelines/templates/delete-cluster.yaml b/.pipelines/templates/delete-cluster.yaml index bcb84be00a..66e0b62d49 100644 --- a/.pipelines/templates/delete-cluster.yaml +++ b/.pipelines/templates/delete-cluster.yaml @@ -6,7 +6,7 @@ parameters: steps: - task: AzureCLI@2 inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + azureSubscription: ${{ parameters.svcConn }} scriptLocation: "inlineScript" scriptType: "bash" addSpnToEnvironment: true @@ -14,7 +14,8 @@ steps: echo "Deleting cluster" make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} - make -C ./hack/aks down AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} + make -C ./hack/aks down AZCLI=az REGION=${{ parameters.region }} SUB=${{ parameters.sub }} CLUSTER=${{ parameters.clusterName }} echo "Cluster and resources down" name: delete displayName: Delete - ${{ parameters.name }} + condition: always() diff --git a/.pipelines/templates/k8s-yaml-test.yaml b/.pipelines/templates/k8s-yaml-test.yaml new file mode 100644 index 0000000000..5045c141e9 --- /dev/null +++ b/.pipelines/templates/k8s-yaml-test.yaml @@ -0,0 +1,34 @@ +parameters: + clusterName: "" + +steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + deployments="cilium-operator" + daemonsets="cilium|azure-cns|ip-masq" + + exit=0 + for ds in $(kubectl get ds -n kube-system | grep -E $daemonsets | awk '{print $1}'); do + if ! kubectl rollout restart ds -n kube-system $ds --warnings-as-errors=true; then + exit=1 + fi + done + + for deploy in $(kubectl get deploy -n kube-system | grep -E $deployments | awk '{print $1}'); do + if ! kubectl rollout restart deploy -n kube-system $deploy --warnings-as-errors=true; then + exit=1 + fi + done + + if [ ${exit} == 1 ]; then + echo "Warnings within maintained daemonsets/deployment need to be resolved." + exit 1 + fi + name: "k8syaml" + displayName: "Check k8s YAML" diff --git a/.pipelines/templates/log-check-template.yaml b/.pipelines/templates/log-check-template.yaml new file mode 100644 index 0000000000..f7bed4d6fd --- /dev/null +++ b/.pipelines/templates/log-check-template.yaml @@ -0,0 +1,23 @@ +parameters: + clusterName: "" + podLabel: "" + logGrep: "" + +steps: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + + if ! [ -z "$(kubectl logs -n kube-system -l ${{ parameters.podLabel }} --tail=-1 | grep ${{ parameters.logGrep }})" ]; then + kubectl logs -n kube-system -l ${{ parameters.podLabel }} --tail=-1 | grep ${{ parameters.logGrep }} + echo "Logs found with ${{ parameters.logGrep }}" + exit 1 + fi + # Leaving "name:" blank as this template could be called multiple times in a single job with the same parameters. + displayName: "Check pod with ${{ parameters.podLabel }} label for ${{ parameters.logGrep }}" + condition: always() diff --git a/.pipelines/templates/log-template.yaml b/.pipelines/templates/log-template.yaml index fee969889f..17a9d25ab7 100644 --- a/.pipelines/templates/log-template.yaml +++ b/.pipelines/templates/log-template.yaml @@ -270,6 +270,6 @@ steps: - publish: $(System.DefaultWorkingDirectory)/${{ parameters.clusterName }}_${{ parameters.logType }}_Attempt_#$(System.StageAttempt) condition: always() - artifact: ${{ parameters.clusterName }}_${{ parameters.os }}${{ parameters.jobName }}_Attempt_#$(System.StageAttempt) + artifact: ${{ parameters.clusterName }}_${{ parameters.logType }}_${{ parameters.os }}${{ parameters.jobName }}_Attempt_#$(System.StageAttempt) name: acnLogs_${{ parameters.logType }} displayName: Publish Cluster logs diff --git a/.pipelines/templates/log.steps.yaml b/.pipelines/templates/log.steps.yaml new file mode 100644 index 0000000000..5451fe218b --- /dev/null +++ b/.pipelines/templates/log.steps.yaml @@ -0,0 +1,279 @@ +# -- Captures -- +# CNS, CNI, and Cilium Logs +# CNS, CNI, and Cilium State files +# Daemonset and Deployment Images +# Node Status +# kube-system namespace logs +# Non-ready pods on failure +# -- Controled by -- +# CNI and OS | ${{ parameters.cni }} and ${{ parameters.os }} +# CNS ConfigMap | "ManageEndpointState" +# -- Generates -- +# Logs on a per-node basis +# Outputs a singluar unique artifact per template call | ${{ parameters.clusterName }}_${{ parameters.jobName }}_Attempt_#$(System.StageAttempt) +# Each artifact is divided into sub-directories +# -- Intent -- +# Provide through debugging information to understand why CNI test scenarios are failing without having to blindly reproduce + +parameters: + clusterName: "" + logType: "failure" + os: "" + cni: "" + jobName: "FailedE2ELogs" + +steps: + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + + acnLogs=$(System.DefaultWorkingDirectory)/${{ parameters.clusterName }}_${{ parameters.logType }}_Attempt_#$(System.StageAttempt) + mkdir -p $acnLogs + echo "Root Directory created: $acnLogs" + echo "##vso[task.setvariable variable=acnLogs]$acnLogs" + + kubectl get pods -n kube-system -owide + podList=`kubectl get pods -n kube-system --no-headers | awk '{print $1}'` + mkdir -p $acnLogs/kube-system + echo "Directory created: $acnLogs/kube-system" + for pod in $podList; do + kubectl logs -n kube-system $pod > $acnLogs/kube-system/$pod-logs.txt + echo "$acnLogs/kube-system/$pod-logs.txt" + done + displayName: Kube-System Logs + condition: always() + continueOnError: true # Tends to fail after node restart due to pods still restarting. This should not block other tests or logs from running. + + - bash: | + kubectl describe nodes + displayName: Node Status + condition: always() + + - bash: | + kubectl get ds -A -owide + echo "Capture daemonset images being used" + dsList=`kubectl get ds -A | grep kube-system | awk '{print $2}'` + for ds in $dsList; do + echo "$ds" + kubectl describe ds -n kube-system $ds | grep Image + done + displayName: Daemonset Images + condition: always() + + - bash: | + kubectl get deploy -A -owide + echo "Capture deployment images being used" + deployList=`kubectl get deploy -A | grep kube-system | awk '{print $2}'` + for deploy in $deployList; do + echo "$deploy" + kubectl describe deploy -n kube-system $deploy | grep Image + done + displayName: Deployment Images + condition: always() + + - ${{ if eq(parameters.logType, 'failure') }}: + - bash: | + kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName + podList=`kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName --no-headers | grep -v Running | awk '{print $1}'` + array=($podList) + + if [ -z ${array[0]} ]; then + echo "There are no kube-system pods in a non-ready state." + else + mkdir -p $acnLogs/${{ parameters.os }}non-ready + echo "Directory created: $acnLogs/${{ parameters.os }}non-ready" + echo "Capturing failed pods" + for pod in $podList; do + kubectl describe pod -n kube-system $pod > $acnLogs/${{ parameters.os }}non-ready/$pod.txt + echo "$acnLogs/${{ parameters.os }}non-ready/$pod.txt" + done + fi + displayName: Failure Logs + condition: always() + + - ${{ if eq(parameters.os, 'linux') }}: + - bash: | + echo "Ensure that privileged pod exists on each node" + kubectl apply -f test/integration/manifests/load/privileged-daemonset.yaml + kubectl rollout status ds -n kube-system privileged-daemonset + + echo "------ Log work ------" + kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide + echo "Capture logs from each linux node. Files located in var/logs/*." + podList=`kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide --no-headers | awk '{print $1}'` + for pod in $podList; do + index=0 + files=(`kubectl exec -i -n kube-system $pod -- find ./var/log -maxdepth 2 -name "azure-*" -type f`) + fileBase=(`kubectl exec -i -n kube-system $pod -- find ./var/log -maxdepth 2 -name "azure-*" -type f -printf "%f\n"`) + + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/log-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/" + + for file in ${files[*]}; do + kubectl exec -i -n kube-system $pod -- cat $file > $(acnLogs)/"$node"_logs/log-output/${fileBase[$index]} + echo "Azure-*.log, ${fileBase[$index]}, captured: $(acnLogs)/"$node"_logs/log-output/${fileBase[$index]}" + ((index++)) + done + if [ ${{ parameters.cni }} = 'cilium' ]; then + file="cilium-cni.log" + kubectl exec -i -n kube-system $pod -- cat var/log/$file > $(acnLogs)/"$node"_logs/log-output/$file + echo "Cilium log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file" + fi + done + + if ! [ ${{ parameters.cni }} = 'cilium' ]; then + echo "------ Privileged work ------" + kubectl get pods -n kube-system -l os=linux,app=privileged-daemonset -owide + echo "Capture State Files from privileged pods" + for pod in $podList; do + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/privileged-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/privileged-output/" + + file="azure-vnet.json" + kubectl exec -i -n kube-system $pod -- cat /var/run/$file > $(acnLogs)/"$node"_logs/privileged-output/$file + echo "CNI State, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file" + if [ ${{ parameters.cni }} = 'cniv1' ]; then + file="azure-vnet-ipam.json" + kubectl exec -i -n kube-system $pod -- cat /var/run/$file > $(acnLogs)/"$node"_logs/privileged-output/$file + echo "CNIv1 IPAM, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file" + fi + done + fi + + if [ ${{ parameters.cni }} = 'cilium' ] || [ ${{ parameters.cni }} = 'cniv2' ]; then + echo "------ CNS work ------" + + + kubectl get pods -n kube-system -l k8s-app=azure-cns + echo "Capture State Files from CNS pods" + cnsPods=`kubectl get pods -n kube-system -l k8s-app=azure-cns --no-headers | awk '{print $1}'` + for pod in $cnsPods; do + managed=`kubectl exec -i -n kube-system $pod -- cat etc/azure-cns/cns_config.json | jq .ManageEndpointState` + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/CNS-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/CNS-output/" + + file="cnsCache.txt" + kubectl exec -i -n kube-system $pod -- curl localhost:10090/debug/ipaddresses -d {\"IPConfigStateFilter\":[\"Assigned\"]} > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS cache, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + + file="azure-cns.json" + kubectl exec -i -n kube-system $pod -- cat /var/lib/azure-network/$file > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + if [ $managed = "true" ]; then + file="azure-endpoints.json" + kubectl exec -i -n kube-system $pod -- cat /var/run/azure-cns/$file > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS Managed State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + fi + done + fi + + if [ ${{ parameters.cni }} = 'cilium' ]; then + echo "------ Cilium work ------" + kubectl get pods -n kube-system -l k8s-app=cilium + echo "Capture State Files from Cilium pods" + ciliumPods=`kubectl get pods -n kube-system -l k8s-app=cilium --no-headers | awk '{print $1}'` + for pod in $ciliumPods; do + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/Cilium-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/Cilium-output/" + + file="cilium-endpoint.json" + kubectl exec -i -n kube-system $pod -- cilium endpoint list -o json > $(acnLogs)/"$node"_logs/Cilium-output/$file + echo "Cilium, $file, captured: $(acnLogs)/"$node"_logs/Cilium-output/$file" + done + fi + displayName: Linux Logs + condition: always() + + - ${{ if eq(parameters.os, 'windows') }}: + - bash: | + echo "Ensure that privileged pod exists on each node" + kubectl apply -f test/integration/manifests/load/privileged-daemonset-windows.yaml + kubectl rollout status ds -n kube-system privileged-daemonset + + echo "------ Log work ------" + kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide + echo "Capture logs from each windows node. Files located in \k" + podList=`kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide --no-headers | awk '{print $1}'` + for pod in $podList; do + files=`kubectl exec -i -n kube-system $pod -- powershell "ls ../../k/azure*.log*" | grep azure | awk '{print $6}'` + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/log-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/log-output/" + + for file in $files; do + kubectl exec -i -n kube-system $pod -- powershell "cat ../../k/$file" > $(acnLogs)/"$node"_logs/log-output/$file + echo "Azure-*.log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file" + done + if [ ${{ parameters.cni }} = 'cniv2' ]; then + file="azure-cns.log" + kubectl exec -i -n kube-system $pod -- cat k/azurecns/$file > $(acnLogs)/"$node"_logs/log-output/$file + echo "CNS Log, $file, captured: $(acnLogs)/"$node"_logs/log-output/$file" + fi + done + + echo "------ Privileged work ------" + kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide + echo "Capture State Files from privileged pods" + for pod in $podList; do + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/privileged-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/privileged-output/" + + file="azure-vnet.json" + kubectl exec -i -n kube-system $pod -- powershell cat ../../k/$file > $(acnLogs)/"$node"_logs/privileged-output/$file + echo "CNI State, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file" + if [ ${{ parameters.cni }} = 'cniv1' ]; then + file="azure-vnet-ipam.json" + kubectl exec -i -n kube-system $pod -- powershell cat ../../k/$file > $(acnLogs)/"$node"_logs/privileged-output/$file + echo "CNI IPAM, $file, captured: $(acnLogs)/"$node"_logs/privileged-output/$file" + fi + done + + if [ ${{ parameters.cni }} = 'cniv2' ]; then + echo "------ CNS work ------" + + + kubectl get pods -n kube-system -l k8s-app=azure-cns-win --no-headers + echo "Capture State Files from CNS pods" + cnsPods=`kubectl get pods -n kube-system -l k8s-app=azure-cns-win --no-headers | awk '{print $1}'` + for pod in $cnsPods; do + managed=`kubectl exec -i -n kube-system pod -- powershell cat etc/azure-cns/cns_config.json | jq .ManageEndpointState` + node=`kubectl get pod -n kube-system $pod -o custom-columns=NODE:.spec.nodeName,NAME:.metadata.name --no-headers | awk '{print $1}'` + mkdir -p $(acnLogs)/"$node"_logs/CNS-output/ + echo "Directory created: $(acnLogs)/"$node"_logs/CNS-output/" + + file="cnsCache.txt" + kubectl exec -i -n kube-system $pod -- powershell 'Invoke-WebRequest -Uri 127.0.0.1:10090/debug/ipaddresses -Method Post -ContentType application/x-www-form-urlencoded -Body "{`"IPConfigStateFilter`":[`"Assigned`"]}" -UseBasicParsing | Select-Object -Expand Content' > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS cache, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + + file="azure-cns.json" + kubectl exec -i -n kube-system $pod -- powershell cat k/azurecns/azure-cns.json > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + if [ $managed = "true" ]; then + file="azure-endpoints.json" + kubectl exec -i -n kube-system $pod -- cat k/azurecns/$file > $(acnLogs)/"$node"_logs/CNS-output/$file + echo "CNS Managed State, $file, captured: $(acnLogs)/"$node"_logs/CNS-output/$file" + fi + done + fi + displayName: Windows Logs + condition: always() + + - task: CopyFiles@2 + displayName: "Add Logs to Artifacts Dir" + inputs: + sourceFolder: $(System.DefaultWorkingDirectory)/${{ parameters.clusterName }}_${{ parameters.logType }}_Attempt_#$(System.StageAttempt) + targetFolder: $(Build.ArtifactStagingDirectory)/out diff --git a/.pipelines/templates/run-unit-tests.stages.yaml b/.pipelines/templates/run-unit-tests.stages.yaml new file mode 100644 index 0000000000..79754e01ad --- /dev/null +++ b/.pipelines/templates/run-unit-tests.stages.yaml @@ -0,0 +1,386 @@ +stages: +- stage: unittest + displayName: "Unit Tests" + dependsOn: + - setup + variables: + STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] + ACN_DIR: azure-container-networking + jobs: + - job: linux + displayName: "Run All" + pool: + type: linux + isCustom: true + name: "$(BUILD_POOL_NAME_DEFAULT)" + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/linux-unittest + + REPORT_DIR: $(Build.ArtifactStagingDirectory)/linux-unittest + REPORT_XML: $(Build.ArtifactStagingDirectory)/linux-unittest/report.xml + COVERAGE_OUT: $(Build.ArtifactStagingDirectory)/linux-unittest/linux-coverage.out + steps: + - checkout: azure-container-networking + + - task: GoTool@0 + inputs: + version: '$(GOVERSION)' + + - script: | + set -e + BIN_INSTALL_DIR=$(realpath bin) + GOBIN="$BIN_INSTALL_DIR" go install github.com/jstemmer/go-junit-report/v2@latest + JUNIT_REPORT_BIN="$BIN_INSTALL_DIR/go-junit-report" + + mkdir -p "$REPORT_DIR" + touch "$REPORT_XML" + make tools + + # run test, echo exit status code to fd 3, pipe output from test to tee, which splits output to stdout and go-junit-report (which converts test output to report.xml), + # stdout from tee is redirected to fd 4. Take output written to fd 3 (which is the exit code of test), redirect to stdout, pipe to read from stdout then exit with that status code. + # Read all output from fd 4 (output from tee) and write to to stdout + { { { { + sudo -E env "PATH=$PATH" make test-all; + echo $? >&3; + } | tee >($JUNIT_REPORT_BIN > "$REPORT_XML") >&4; + } 3>&1; + } | { read xs; exit $xs; } + } 4>&1 + + ls -la "$REPORT_DIR" + retryCountOnTaskFailure: 3 + displayName: "Run Unit Tests - Linux" + #workingDirectory: $(ACN_DIR) + +# - script: | +# BIN_INSTALL_DIR=$(realpath bin) +# GOBIN="$BIN_INSTALL_DIR" go install github.com/axw/gocov/gocov@latest +# GOBIN="$BIN_INSTALL_DIR" go install github.com/AlekSi/gocov-xml@latest +# +# GOCOV_BIN="$BIN_INSTALL_DIR/gocov" +# GOCOV_XML_BIN="$BIN_INSTALL_DIR/gocov-xml" +# +# $GOCOV_BIN convert "$COVERAGE_OUT" > "$REPORT_DIR"/linux-coverage.gocov.json +# $GOCOV_XML_BIN < "$REPORT_DIR"/linux-coverage.gocov.json > "$REPORT_DIR"/linux-coverage.gocov.xml +# +# - task: UsePythonVersion@0 +# retryCountOnTaskFailure: 3 +# inputs: +# versionSpec: '3.x' # string. Required. Version spec. Default: 3.x. +# addToPath: true +# +# - task: PythonScript@0 +# displayName: "Generate Test Reporting" +# name: report +# inputs: +# arguments: $(Build.SourcesDirectory) +# scriptSource: 'inline' +# script: | +# import os +# import zlib +# +# def output_var(var_name, is_output, var_value): +# os.environ[var_name] = var_value +# print(f"##vso[task.setvariable variable={var_name};isOutput={is_output}]{var_value}") +# +# def encode_and_compress(file_path): +# with open(file_path, 'rb') as file: +# compressed_data = zlib.compress(file.read(), level=9) +# return compressed_data.hex() +# +# report_dir = os.environ['REPORT_DIR'] +# report_dir = os.path.realpath(report_dir) +# +# convert_vars = [ +# { 'var_name': 'LINUX_GOCOV_OUT', 'file_path': f'{report_dir}/linux-coverage.out' }, +# { 'var_name': 'LINUX_COVERAGE_OUT_XML', 'file_path': f'{report_dir}/linux-coverage.gocov.xml' }, +# { 'var_name': 'LINUX_COVERAGE_OUT_JSON', 'file_path': f'{report_dir}/linux-coverage.gocov.json' }, +# { 'var_name': 'LINUX_JUNIT_XML', 'file_path': f'{report_dir}/report.xml' } +# ] +# +# for item in convert_vars: +# val = encode_and_compress(item['file_path']) +# output_var(item['var_name'], True, val) + + + - job: windows + displayName: "Run Tests - Windows" + pool: + isCustom: true + type: windows + name: "$(BUILD_POOL_NAME_DEFAULT_WINDOWS_ALT)" + variables: + ob_outputDirectory: $(Build.ArtifactStagingDirectory)/windows-unittest + + INPUT_TEST_MODULES: './npm/... ./cni/... ./platform/...' + REPORT_DIR: $(Build.ArtifactStagingDirectory)/windows-unittest + REPORT_XML: report.xml + GOCOV_OUT: windows-gocov.out + COVERAGE_OUT: windows-coverage.out + steps: + - checkout: azure-container-networking + + - task: GoTool@0 + inputs: + version: '$(GOVERSION)' + + - task: UsePythonVersion@0 + retryCountOnTaskFailure: 3 + inputs: + versionSpec: '3.x' # string. Required. Version spec. Default: 3.x. + addToPath: true + + - task: PythonScript@0 + displayName: "Run Unit Tests - Windows" + retryCountOnTaskFailure: 3 + inputs: + scriptSource: 'inline' + arguments: $(Build.SourcesDirectory) $(INPUT_TEST_MODULES) + script: | + import os + import subprocess + import sys + + # Set environment variables and directories + cwd = sys.argv[1] + cwd = os.path.realpath(cwd) + gotest_packages = sys.argv[2] + bin_install_dir = os.path.join(cwd, 'bin') + os.environ['GOBIN'] = bin_install_dir + + report_dir = os.environ['REPORT_DIR'] + report_dir = os.path.realpath(report_dir) + + log_file = os.path.join(report_dir, 'test.stdout.log') + + coverage_file = os.environ['COVERAGE_OUT'] + coverage_out = os.path.join(report_dir, coverage_file) + + gocover_file = os.environ['GOCOV_OUT'] + gocover_out = os.path.join(report_dir, gocover_file) + + junit_file = os.environ['REPORT_XML'] + junit_xml = os.path.join(report_dir, junit_file) + + # Install the go-junit-report tool + subprocess.check_call(['go', 'install', 'github.com/jstemmer/go-junit-report/v2@latest']) + junit_report_bin = os.path.join(bin_install_dir, 'go-junit-report') + + # Create report directory and touch report XML file + os.makedirs(report_dir, exist_ok=True) + + # Run make tools + subprocess.run('make tools', shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # Function to run the test and capture output + def run_test(packages_to_test): + go_pkgs = ' '.join(packages_to_test) if isinstance(packages_to_test, list) else packages_to_test + + cmd_gotest = f"go test -timeout 30m -covermode atomic -coverprofile={coverage_out} {go_pkgs}" + cmd_junitreport = f'{junit_report_bin} -set-exit-code -in {log_file} -out {junit_xml} -iocopy' + cmd_gocover = f'go tool cover -func={coverage_out}' + + print(cmd_gotest) + gotest_process = subprocess.run(cmd_gotest, shell=True, stdout=open(log_file, 'w'), text=True, cwd=cwd) + + print(cmd_junitreport) + junit_process = subprocess.run(cmd_junitreport, shell=True, stdout=sys.stdout, stderr=sys.stderr, text=True, cwd=cwd) + + print(cmd_gocover) + gocover_process = subprocess.run(cmd_gocover, shell=True, text=True, stdout=open(gocover_out, "w"), cwd=cwd) + + return gotest_process.returncode + + # Run the test function + return_code = run_test(gotest_packages) + + # List report directory contents again + print(os.listdir(report_dir)) + + sys.exit(return_code) + +# - task: PythonScript@0 +# displayName: "Generate Test Reporting" +# name: report +# inputs: +# arguments: $(Build.SourcesDirectory) +# scriptSource: 'inline' +# script: | +# import os +# import subprocess +# import sys +# import zlib +# +# # Define the necessary variables +# cwd = sys.argv[1] +# cwd = os.path.realpath(cwd) +# bin_install_dir = os.path.join(cwd, 'bin') +# os.environ['GOBIN'] = bin_install_dir +# +# report_dir = os.environ['REPORT_DIR'] +# report_dir = os.path.realpath(report_dir) +# +# coverage_file = os.environ['COVERAGE_OUT'] +# coverage_out = os.path.join(report_dir, coverage_file) +# coverage_json = os.path.join(report_dir, 'windows-coverage.json') +# coverage_xml = os.path.join(report_dir, 'windows-coverage.xml') +# +# gocover_file = os.environ['GOCOV_OUT'] +# gocover_out = os.path.join(report_dir, gocover_file) +# +# junit_file = os.environ['REPORT_XML'] +# junit_xml = os.path.join(report_dir, junit_file) +# +# # Install gocov and gocov-xml +# subprocess.run(['go', 'install', 'github.com/axw/gocov/gocov@latest'], check=True) +# subprocess.run(['go', 'install', 'github.com/AlekSi/gocov-xml@latest'], check=True) +# +# # Define the paths to the installed binaries +# gocov_bin = os.path.join(bin_install_dir, 'gocov') +# gocov_xml_bin = os.path.join(bin_install_dir, 'gocov-xml') +# +# # Create the report directory if it doesn't exist +# os.makedirs(report_dir, exist_ok=True) +# +# # Convert coverage output to JSON +# with open(coverage_json, 'w') as json_file: +# subprocess.run([gocov_bin, 'convert', coverage_out], stdout=json_file, check=True) +# +# # create coverage xml +# with open(coverage_xml, 'w') as xml_file: +# with open(coverage_json, 'r') as json_file: +# subprocess.run([gocov_xml_bin], stdin=json_file, stdout=xml_file, check=True) +# +# +# def output_var(var_name, is_output, var_value): +# os.environ[var_name] = var_value +# print(f"##vso[task.setvariable variable={var_name};isOutput={is_output}]{var_value}") +# +# def encode_and_compress(file_path): +# with open(file_path, 'rb') as file: +# compressed_data = zlib.compress(file.read(), level=9) +# return compressed_data.hex() +# +# +# # coverage json +# coverage_json_content = encode_and_compress(coverage_json) +# output_var('COVERAGE_OUT_JSON', True, coverage_json_content) +# +# # coverage xml +# coverage_xml_content = encode_and_compress(coverage_xml) +# output_var('COVERAGE_OUT_XML', True, coverage_xml_content) +# +# # go cover +# gocover_out_content = encode_and_compress(gocover_out) +# output_var('GOCOV_OUT', True, gocover_out_content) +# +# # junit xml +# junit_xml_content = encode_and_compress(junit_xml) +# output_var('JUNIT_XML', True, junit_xml_content) +# +# sys.exit() + + +# - job: coverage +# displayName: "Check Test Coverage" +# condition: always() +# dependsOn: +# - windows +# - linux +# pool: +# type: windows +# variables: +# ob_outputDirectory: $(Build.ArtifactStagingDirectory)/out +# +# WINDOWS_JUNIT_XML: $[ dependencies.windows.outputs['report.JUNIT_XML'] ] +# WINDOWS_COVERAGE_OUT_XML: $[ dependencies.windows.outputs['report.COVERAGE_OUT_XML'] ] +# WINDOWS_COVERAGE_OUT_JSON: $[ dependencies.windows.outputs['report.COVERAGE_OUT_JSON'] ] +# WINDOWS_GOCOV_OUT: $[ dependencies.windows.outputs['report.GOCOV_OUT'] ] +# +# LINUX_COVERAGE_OUT_JSON: $[ dependencies.linux.outputs['report.COVERAGE_OUT_JSON'] ] +# LINUX_COVERAGE_OUT_XML: $[ dependencies.linux.outputs['report.COVERAGE_OUT_XML'] ] +# LINUX_GOCOV_OUT: $[ dependencies.linux.outputs['report.GOCOV_OUT_XML'] ] +# LINUX_JUNIT_XML: $[ dependencies.linux.outputs['report.JUNIT_XML'] ] +# steps: +# - task: UsePythonVersion@0 +# retryCountOnTaskFailure: 3 +# inputs: +# versionSpec: '3.x' # string. Required. Version spec. Default: 3.x. +# addToPath: true +# +# - task: PythonScript@0 +# displayName: "Write Test Output Artifacts" +# retryCountOnTaskFailure: 3 +# inputs: +# scriptSource: 'inline' +# arguments: $(Build.ArtifactStagingDirectory)/out +# script: | +# import os +# import sys +# import zlib +# +# # Define the necessary variables +# cwd = sys.argv[1] +# cwd = os.path.realpath(cwd) +# +# report_dir = os.environ['REPORT_DIR'] +# report_dir = os.path.realpath(report_dir) +# +# # Create the report directory if it doesn't exist +# os.makedirs(report_dir, exist_ok=True) +# +# def decompress_and_decode(compressed_string): +# encoded_string = zlib.decompress(compressed_string) +# return encoded_string +# +# convert_vars = [ +# { var_name: 'LINUX_GOCOV_OUT', file_path: f'{report_dir}/coverage-all.out' }, +# { var_name: 'LINUX_COVERAGE_OUT_XML', file_path: f'{report_dir}/coverage-all.xml' }, +# { var_name: 'LINUX_COVERAGE_OUT_JSON', file_path: f'{report_dir}/coverage-all.json' }, +# { var_name: 'LINUX_JUNIT_XML', file_path: f'{report_dir}/coverage-all.junit.xml' }, +# { var_name: 'WINDOWS_COVERAGE_OUT_XML', file_path: f'{report_dir}/windows-coverage.xml' }, +# { var_name: 'WINDOWS_COVERAGE_OUT_JSON', file_path: f'{report_dir}/windows-coverage.json' }, +# { var_name: 'WINDOWS_GOCOV_OUT', file_path: f'{report_dir}/windows-coverage.out' }, +# { var_name: 'WINDOWS_JUNIT_XML', file_path: f'{report_dir}/windows-coverage.junit.xml' } +# ] +# for item in convert_vars: +# with open(item['file_path'], 'wb') as file_io: +# print(f'Retrieving variable value from env var {item["var_name"]}') +# var_value = bytes.fromhex(os.environ[item['var_name']]) +# print(f'Decompressing data and writing variable value to file {item["file_path"]}') +# file_io.write(decompress_and_decode(var_value)) +# +# sys.exit() +# +# - task: PublishTestResults@2 +# displayName: "Publish Test Results" +# inputs: +# testResultsFormat: 'JUnit' +# testResultsFiles: # string. Required. Test results files. Default: **/TEST-*.xml. +# searchFolder: $(Build.ArtifactStagingDirectory)/out/**/*.junit.xml +# failTaskOnFailedTests: true +# failTaskOnMissingResultsFile: false +# #testRunTitle: # Name of the test runs +# # boolean. Merge test results. Default: false. +# mergeTestResults: true +# #failTaskOnFailureToPublishResults: false # boolean. Fail if there is failure in publishing test results. Default: false. +# # Advanced +# #buildPlatform: windows/amd64 +# #buildConfiguration: # string. Alias: configuration. Build Configuration. +# publishRunAttachments: true +# +# - task: PublishCodeCoverageResults@2 +# displayName: "Publish Code Coverage Report" +# inputs: +# summaryFileLocation: $(Build.ArtifactStagingDirectory)/out/* +# +# - task: BuildQualityChecks@8 +# displayName: "Check Code Coverage Regression" +# inputs: +# checkCoverage: true +# coverageFailOption: "build" +# coverageType: "lines" +# fallbackOnPRTargetBranch: false +# baseBranchRef: "master" +# allowCoverageVariance: true +# coverageVariance: 0.25 diff --git a/.pipelines/templates/run-unit-tests.yaml b/.pipelines/templates/run-unit-tests.yaml new file mode 100644 index 0000000000..553c3f3276 --- /dev/null +++ b/.pipelines/templates/run-unit-tests.yaml @@ -0,0 +1,118 @@ +stages: + - stage: test + displayName: Test ACN + dependsOn: + - setup + jobs: + - job: test + displayName: Run Tests + variables: + STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - script: | + set -e + make tools + + # run test, echo exit status code to fd 3, pipe output from test to tee, which splits output to stdout and go-junit-report (which converts test output to report.xml), + # stdout from tee is redirected to fd 4. Take output written to fd 3 (which is the exit code of test), redirect to stdout, pipe to read from stdout then exit with that status code. + # Read all output from fd 4 (output from tee) and write to top stdout + { { { { + sudo -E env "PATH=$PATH" make test-all; + echo $? >&3; + } | tee >(build/tools/bin/go-junit-report > report.xml) >&4; + } 3>&1; + } | { read xs; exit $xs; } + } 4>&1 + + # combine coverage from multiple modules + (echo "mode: atomic"; tail -q -n +2 coverage-*.out) > coverage.cover + mv coverage.cover linux-coverage.out + retryCountOnTaskFailure: 3 + name: "Test" + displayName: "Run Tests" + - task: PublishPipelineArtifact@1 + inputs: + targetPath: 'linux-coverage.out' + artifactName: 'linux-coverage' + + - stage: test_windows + displayName: Test ACN Windows + dependsOn: + - setup + jobs: + - job: test + displayName: Run Tests + variables: + STORAGE_ID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.StorageID'] ] + pool: + name: "$(BUILD_POOL_NAME_DEFAULT_WINDOWS_ALT)" + steps: + # Only run one go test per script + - script: | + cd azure-container-networking/ + go test -timeout 30m -covermode atomic -coverprofile=windows-coverage.out ./npm/... ./cni/... ./platform/... + go tool cover -func=windows-coverage.out + retryCountOnTaskFailure: 3 + name: "TestWindows" + displayName: "Run Windows Tests" + - task: PublishPipelineArtifact@1 + inputs: + targetPath: 'windows-coverage.out' + artifactName: 'windows-coverage' + + # Looking for PRs from merge queue + # Expected output refs/heads/gh-readonly-queue/master/pr-3780-f0a96a1 + - ${{ if contains(variables['Build.SourceBranch'], 'master/pr') }}: + - stage: code_coverage + displayName: Code Coverage Check + dependsOn: + - test + jobs: + - job: coverage + displayName: Check Coverage + pool: + name: "$(BUILD_POOL_NAME_DEFAULT)" + steps: + - task: DownloadPipelineArtifact@2 + inputs: + artifact: 'linux-coverage' + path: './' + - bash: | + # use go work to include multiple modules or gocov will omit results from those modules + make workspace + + make tools + sudo ln -s $(pwd)/build/tools/bin/gocov /usr/local/bin/gocov + sudo ln -s $(pwd)/build/tools/bin/gocov-xml /usr/local/bin/gocov-xml + + GOOS=linux gocov convert linux-coverage.out > linux-coverage.json + GOOS=linux gocov-xml < linux-coverage.json > linux-coverage.xml + + # TODO: Add windows coverage back in once PublishCodeCoverageResults v2 works with BuildQualityChecks + + mkdir coverage + + mv linux-coverage.xml coverage/ + name: "Coverage" + displayName: "Generate Coverage Report" + condition: always() + + - task: PublishCodeCoverageResults@1 + displayName: "Publish Code Coverage Report" + condition: always() + inputs: + codeCoverageTool: 'Cobertura' + summaryFileLocation: coverage/linux-coverage.xml + - task: BuildQualityChecks@8 + displayName: "Check Code Coverage Regression" + condition: always() + inputs: + checkCoverage: true + coverageFailOption: "build" + coverageType: "lines" + fallbackOnPRTargetBranch: false + baseBranchRef: "master" + allowCoverageVariance: true + coverageVariance: 0.25 diff --git a/.pipelines/trigger.yaml b/.pipelines/trigger.yaml new file mode 100644 index 0000000000..8bed06043a --- /dev/null +++ b/.pipelines/trigger.yaml @@ -0,0 +1,52 @@ +trigger: none +#pr: +# branches: +# include: +# - master +# - release/* +# paths: +# exclude: +# - ".devcontainer" +# - ".hooks" +# - ".vscode" +# - ".github" +# - docs +# +# +#trigger: +# branches: +# include: +# - gh-readonly-queue/master/* +# tags: +# include: +# - "*" + + +resources: + repositories: + - repository: azure-container-networking + type: github + name: Azure/azure-container-networking + endpoint: 'Azure-ACN RO Service Connection' + ref: refs/heads/feature/ob-onboard-0 + + +variables: + REPO_REF: $[ resources.repositories['azure-container-networking'].ref ] + REPO_COMMIT: $[ resources.repositories['azure-container-networking'].version ] + REPO_NAME: $[ resources.repositories['azure-container-networking'].name ] + REPO_TYPE: $[ resources.repositories['azure-container-networking'].type ] + CHANGESET_COMMIT: $[ resources.repositories['self'].version ] + + +pool: + vmImage: ubuntu-latest + + +extends: + template: /.pipelines/template.trigger.jobs.yaml@azure-container-networking + parameters: + mainRepoRef: $(REPO_REF) + mainRepoCommit: $(REPO_COMMIT) + mainRepoName: $(REPO_NAME) + mainRepoType: $(REPO_TYPE) diff --git a/Dockerfile.cnm b/Dockerfile.cnm deleted file mode 100644 index 98e978be29..0000000000 --- a/Dockerfile.cnm +++ /dev/null @@ -1,14 +0,0 @@ -FROM mcr.microsoft.com/oss/mirror/docker.io/library/ubuntu:18.04 -ARG CNM_BUILD_DIR - -# Install dependencies. -RUN apt-get update && apt-get install -y ebtables - -# Create plugins directory. -RUN mkdir -p /run/docker/plugins - -# Install plugin. -COPY $CNM_BUILD_DIR/azure-vnet-plugin /usr/bin -WORKDIR /usr/bin - -CMD ["/usr/bin/azure-vnet-plugin"] diff --git a/Makefile b/Makefile index 4de3cb05dd..2da9e50b71 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,6 @@ GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) GOOSES ?= "linux windows" # To override at the cli do: GOOSES="\"darwin bsd\"" GOARCHES ?= "amd64 arm64" # To override at the cli do: GOARCHES="\"ppc64 mips\"" -ltsc2019 = "10.0.17763.4010" -ltsc2022 = "10.0.20348.643" -ltsc2025 = "10.0.26244.5000" # Windows specific extensions # set these based on the GOOS, not the OS @@ -33,21 +30,24 @@ EXE_EXT = .exe endif # Interrogate the git repo and set some variables -REPO_ROOT = $(shell git rev-parse --show-toplevel) -REVISION ?= $(shell git rev-parse --short HEAD) -ACN_VERSION ?= $(shell git describe --exclude "azure-ipam*" --exclude "dropgz*" --exclude "zapai*" --exclude "ipv6-hp-bpf*" --tags --always) -IPV6_HP_BPF_VERSION ?= $(notdir $(shell git describe --match "ipv6-hp-bpf*" --tags --always)) -AZURE_IPAM_VERSION ?= $(notdir $(shell git describe --match "azure-ipam*" --tags --always)) -CNI_VERSION ?= $(ACN_VERSION) -CNI_DROPGZ_VERSION ?= $(notdir $(shell git describe --match "dropgz*" --tags --always)) -CNS_VERSION ?= $(ACN_VERSION) -NPM_VERSION ?= $(ACN_VERSION) -ZAPAI_VERSION ?= $(notdir $(shell git describe --match "zapai*" --tags --always)) +REPO_ROOT ?= $(shell git rev-parse --show-toplevel) +REVISION ?= $(shell git rev-parse --short HEAD) +ACN_VERSION ?= $(shell git describe --exclude "azure-iptables-monitor*" --exclude "azure-ip-masq-merger*" --exclude "azure-ipam*" --exclude "dropgz*" --exclude "zapai*" --exclude "ipv6-hp-bpf*" --tags --always) +IPV6_HP_BPF_VERSION ?= $(notdir $(shell git describe --match "ipv6-hp-bpf*" --tags --always)) +AZURE_IPAM_VERSION ?= $(notdir $(shell git describe --match "azure-ipam*" --tags --always)) +AZURE_IP_MASQ_MERGER_VERSION ?= $(notdir $(shell git describe --match "azure-ip-masq-merger*" --tags --always)) +AZURE_IPTABLES_MONITOR_VERSION ?= $(notdir $(shell git describe --match "azure-iptables-monitor*" --tags --always)) +CNI_VERSION ?= $(ACN_VERSION) +CNS_VERSION ?= $(ACN_VERSION) +NPM_VERSION ?= $(ACN_VERSION) +ZAPAI_VERSION ?= $(notdir $(shell git describe --match "zapai*" --tags --always)) # Build directories. AZURE_IPAM_DIR = $(REPO_ROOT)/azure-ipam +AZURE_IP_MASQ_MERGER_DIR = $(REPO_ROOT)/azure-ip-masq-merger +AZURE_IPTABLES_MONITOR_DIR = $(REPO_ROOT)/azure-iptables-monitor IPV6_HP_BPF_DIR = $(REPO_ROOT)/bpf-prog/ipv6-hp-bpf -CNM_DIR = $(REPO_ROOT)/cnm/plugin + CNI_NET_DIR = $(REPO_ROOT)/cni/network/plugin CNI_IPAM_DIR = $(REPO_ROOT)/cni/ipam/plugin STATELESS_CNI_NET_DIR = $(REPO_ROOT)/cni/network/stateless @@ -59,9 +59,11 @@ NPM_DIR = $(REPO_ROOT)/npm/cmd OUTPUT_DIR = $(REPO_ROOT)/output BUILD_DIR = $(OUTPUT_DIR)/$(GOOS)_$(GOARCH) AZURE_IPAM_BUILD_DIR = $(BUILD_DIR)/azure-ipam +AZURE_IP_MASQ_MERGER_BUILD_DIR = $(BUILD_DIR)/azure-ip-masq-merger +AZURE_IPTABLES_MONITOR_BUILD_DIR = $(BUILD_DIR)/azure-iptables-monitor IPV6_HP_BPF_BUILD_DIR = $(BUILD_DIR)/bpf-prog/ipv6-hp-bpf IMAGE_DIR = $(OUTPUT_DIR)/images -CNM_BUILD_DIR = $(BUILD_DIR)/cnm + CNI_BUILD_DIR = $(BUILD_DIR)/cni ACNCLI_BUILD_DIR = $(BUILD_DIR)/acncli STATELESS_CNI_BUILD_DIR = $(CNI_BUILD_DIR)/stateless @@ -91,6 +93,7 @@ GOFUMPT := $(TOOLS_BIN_DIR)/gofumpt GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint GO_JUNIT_REPORT := $(TOOLS_BIN_DIR)/go-junit-report MOCKGEN := $(TOOLS_BIN_DIR)/mockgen +RENDERKIT := $(TOOLS_BIN_DIR)/renderkit # Archive file names. ACNCLI_ARCHIVE_NAME = acncli-$(GOOS)-$(GOARCH)-$(ACN_VERSION).$(ARCHIVE_EXT) @@ -101,22 +104,19 @@ CNI_SWIFT_ARCHIVE_NAME = azure-vnet-cni-swift-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$ CNI_OVERLAY_ARCHIVE_NAME = azure-vnet-cni-overlay-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT) CNI_BAREMETAL_ARCHIVE_NAME = azure-vnet-cni-baremetal-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT) CNI_DUALSTACK_ARCHIVE_NAME = azure-vnet-cni-overlay-dualstack-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT) -CNM_ARCHIVE_NAME = azure-vnet-cnm-$(GOOS)-$(GOARCH)-$(ACN_VERSION).$(ARCHIVE_EXT) + CNS_ARCHIVE_NAME = azure-cns-$(GOOS)-$(GOARCH)-$(CNS_VERSION).$(ARCHIVE_EXT) NPM_ARCHIVE_NAME = azure-npm-$(GOOS)-$(GOARCH)-$(NPM_VERSION).$(ARCHIVE_EXT) AZURE_IPAM_ARCHIVE_NAME = azure-ipam-$(GOOS)-$(GOARCH)-$(AZURE_IPAM_VERSION).$(ARCHIVE_EXT) +AZURE_IP_MASQ_MERGER_ARCHIVE_NAME = azure-ip-masq-merger-$(GOOS)-$(GOARCH)-$(AZURE_IP_MASQ_MERGER_VERSION).$(ARCHIVE_EXT) +AZURE_IPTABLES_MONITOR_ARCHIVE_NAME = azure-iptables-monitor-$(GOOS)-$(GOARCH)-$(AZURE_IPTABLES_MONITOR_VERSION).$(ARCHIVE_EXT) IPV6_HP_BPF_ARCHIVE_NAME = ipv6-hp-bpf-$(GOOS)-$(GOARCH)-$(IPV6_HP_BPF_VERSION).$(ARCHIVE_EXT) # Image info file names. CNI_IMAGE_INFO_FILE = azure-cni-$(CNI_VERSION).txt -CNI_DROPGZ_IMAGE_INFO_FILE = cni-dropgz-$(CNI_DROPGZ_VERSION).txt CNS_IMAGE_INFO_FILE = azure-cns-$(CNS_VERSION).txt NPM_IMAGE_INFO_FILE = azure-npm-$(NPM_VERSION).txt -# Docker libnetwork (CNM) plugin v2 image parameters. -CNM_PLUGIN_IMAGE ?= microsoft/azure-vnet-plugin -CNM_PLUGIN_ROOTFS = azure-vnet-plugin-rootfs - # Default target all-binaries-platforms: ## Make all platform binaries @for goos in "$(GOOSES)"; do \ @@ -127,8 +127,8 @@ all-binaries-platforms: ## Make all platform binaries # OS specific binaries/images ifeq ($(GOOS),linux) -all-binaries: acncli azure-cni-plugin azure-cns azure-npm azure-ipam ipv6-hp-bpf -all-images: npm-image cns-image cni-manager-image ipv6-hp-bpf-image +all-binaries: acncli azure-cni-plugin azure-cns azure-npm azure-ipam azure-ip-masq-merger azure-iptables-monitor ipv6-hp-bpf +all-images: npm-image cns-image cni-manager-image azure-ip-masq-merger-image azure-iptables-monitor-image ipv6-hp-bpf-image else all-binaries: azure-cni-plugin azure-cns azure-npm all-images: @@ -136,13 +136,14 @@ all-images: endif # Shorthand target names for convenience. -azure-cnm-plugin: cnm-binary cnm-archive azure-cni-plugin: azure-vnet-binary azure-vnet-stateless-binary azure-vnet-ipam-binary azure-vnet-ipamv6-binary azure-vnet-telemetry-binary cni-archive azure-cns: azure-cns-binary cns-archive acncli: acncli-binary acncli-archive azure-npm: azure-npm-binary npm-archive azure-ipam: azure-ipam-binary azure-ipam-archive ipv6-hp-bpf: ipv6-hp-bpf-binary ipv6-hp-bpf-archive +azure-ip-masq-merger: azure-ip-masq-merger-binary azure-ip-masq-merger-archive +azure-iptables-monitor: azure-iptables-monitor-binary azure-iptables-monitor-archive ##@ Versioning @@ -158,15 +159,18 @@ acncli-version: version azure-ipam-version: ## prints the azure-ipam version @echo $(AZURE_IPAM_VERSION) +azure-ip-masq-merger-version: ## prints the azure-ip-masq-merger version + @echo $(AZURE_IP_MASQ_MERGER_VERSION) + +azure-iptables-monitor-version: ## prints the azure-iptables-monitor version + @echo $(AZURE_IPTABLES_MONITOR_VERSION) + ipv6-hp-bpf-version: ## prints the ipv6-hp-bpf version @echo $(IPV6_HP_BPF_VERSION) cni-version: ## prints the cni version @echo $(CNI_VERSION) -cni-dropgz-version: ## prints the cni-dropgz version - @echo $(CNI_DROPGZ_VERSION) - cns-version: @echo $(CNS_VERSION) @@ -197,10 +201,6 @@ else ifeq ($(GOARCH),arm64) for dir in /usr/include/aarch64-linux-gnu/*; do sudo ln -sfn "$$dir" /usr/include/$$(basename "$$dir"); done endif -# Build the Azure CNM binary. -cnm-binary: - cd $(CNM_DIR) && CGO_ENABLED=0 go build -v -o $(CNM_BUILD_DIR)/azure-vnet-plugin$(EXE_EXT) -ldflags "-X main.version=$(ACN_VERSION)" -gcflags="-dwarflocationlists=true" - # Build the Azure CNI network binary. azure-vnet-binary: cd $(CNI_NET_DIR) && CGO_ENABLED=0 go build -v -o $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION)" -gcflags="-dwarflocationlists=true" @@ -234,6 +234,14 @@ azure-npm-binary: cd $(CNI_TELEMETRY_DIR) && CGO_ENABLED=0 go build -v -o $(NPM_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) -ldflags "-X main.version=$(NPM_VERSION)" -gcflags="-dwarflocationlists=true" cd $(NPM_DIR) && CGO_ENABLED=0 go build -v -o $(NPM_BUILD_DIR)/azure-npm$(EXE_EXT) -ldflags "-X main.version=$(NPM_VERSION) -X $(NPM_AI_PATH)=$(NPM_AI_ID)" -gcflags="-dwarflocationlists=true" +# Build the azure-ip-masq-merger binary. +azure-ip-masq-merger-binary: + cd $(AZURE_IP_MASQ_MERGER_DIR) && CGO_ENABLED=0 go build -v -o $(AZURE_IP_MASQ_MERGER_BUILD_DIR)/azure-ip-masq-merger$(EXE_EXT) -ldflags "-X main.version=$(AZURE_IP_MASQ_MERGER_VERSION)" -gcflags="-dwarflocationlists=true" + +# Build the azure-iptables-monitor binary. +azure-iptables-monitor-binary: + cd $(AZURE_IPTABLES_MONITOR_DIR) && CGO_ENABLED=0 go build -v -o $(AZURE_IPTABLES_MONITOR_BUILD_DIR)/azure-iptables-monitor$(EXE_EXT) -ldflags "-X main.version=$(AZURE_IPTABLES_MONITOR_VERSION)" -gcflags="-dwarflocationlists=true" + ##@ Containers ## Common variables for all containers. @@ -272,25 +280,27 @@ CONTAINER_TRANSPORT = docker endif ## Image name definitions. -ACNCLI_IMAGE = acncli -AZURE_IPAM_IMAGE = azure-ipam -IPV6_HP_BPF_IMAGE = ipv6-hp-bpf -CNI_IMAGE = azure-cni -CNI_DROPGZ_IMAGE = cni-dropgz -CNS_IMAGE = azure-cns -NPM_IMAGE = azure-npm +ACNCLI_IMAGE = acncli +AZURE_IPAM_IMAGE = azure-ipam +IPV6_HP_BPF_IMAGE = ipv6-hp-bpf +CNI_IMAGE = azure-cni +CNS_IMAGE = azure-cns +NPM_IMAGE = azure-npm +AZURE_IP_MASQ_MERGER_IMAGE = azure-ip-masq-merger +AZURE_IPTABLES_MONITOR_IMAGE = azure-iptables-monitor ## Image platform tags. -ACNCLI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(ACN_VERSION) -AZURE_IPAM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(AZURE_IPAM_VERSION) -AZURE_IPAM_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(AZURE_IPAM_VERSION)-$(OS_SKU_WIN) -IPV6_HP_BPF_IMAGE_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(IPV6_HP_BPF_VERSION) -CNI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(CNI_VERSION) -CNI_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(CNI_VERSION)-$(OS_SKU_WIN) -CNI_DROPGZ_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(CNI_DROPGZ_VERSION) -CNS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(CNS_VERSION) -CNS_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(CNS_VERSION)-$(OS_SKU_WIN) -NPM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(NPM_VERSION) +ACNCLI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(ACN_VERSION) +AZURE_IPAM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IPAM_VERSION) +AZURE_IPAM_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IPAM_VERSION)-$(OS_SKU_WIN) +IPV6_HP_BPF_IMAGE_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(IPV6_HP_BPF_VERSION) +CNI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNI_VERSION) +CNI_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNI_VERSION)-$(OS_SKU_WIN) +CNS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNS_VERSION) +CNS_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNS_VERSION)-$(OS_SKU_WIN) +NPM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(NPM_VERSION) +AZURE_IP_MASQ_MERGER_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IP_MASQ_MERGER_VERSION) +AZURE_IPTABLES_MONITOR_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IPTABLES_MONITOR_VERSION) qemu-user-static: ## Set up the host to run qemu multiplatform container builds. @@ -303,7 +313,6 @@ container-buildah: # util target to build container images using buildah. do not buildah bud \ --build-arg ARCH=$(ARCH) \ --build-arg OS=$(OS) \ - --build-arg OS_VERSION=$(OS_VERSION) \ --build-arg PLATFORM=$(PLATFORM) \ --build-arg VERSION=$(TAG) \ $(EXTRA_BUILD_ARGS) \ @@ -316,12 +325,11 @@ container-buildah: # util target to build container images using buildah. do not buildah push $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) container-docker: # util target to build container images using docker buildx. do not invoke directly. - docker buildx create --use --platform $(PLATFORM) + docker buildx create --use --driver-opt image=mcr.microsoft.com/oss/v2/moby/buildkit:v0.16.0-2 --platform $(PLATFORM) docker buildx build \ $(BUILDX_ACTION) \ --build-arg ARCH=$(ARCH) \ --build-arg OS=$(OS) \ - --build-arg OS_VERSION=$(OS_VERSION) \ --build-arg PLATFORM=$(PLATFORM) \ --build-arg VERSION=$(TAG) \ $(EXTRA_BUILD_ARGS) \ @@ -335,7 +343,6 @@ container: # util target to build container images. do not invoke directly. $(MAKE) container-$(CONTAINER_BUILDER) \ ARCH=$(ARCH) \ OS=$(OS) \ - OS_VERSION=$(OS_VERSION) \ PLATFORM=$(PLATFORM) \ TAG=$(TAG) \ TARGET=$(TARGET) @@ -392,8 +399,7 @@ azure-ipam-image: ## build azure-ipam container image. TAG=$(AZURE_IPAM_PLATFORM_TAG) \ TARGET=$(OS) \ OS=$(OS) \ - ARCH=$(ARCH) \ - OS_VERSION=$(OS_VERSION) + ARCH=$(ARCH) azure-ipam-image-push: ## push azure-ipam container image. $(MAKE) container-push \ @@ -405,6 +411,60 @@ azure-ipam-image-pull: ## pull azure-ipam container image. IMAGE=$(AZURE_IPAM_IMAGE) \ TAG=$(AZURE_IPAM_PLATFORM_TAG) +# azure-ip-masq-merger +azure-ip-masq-merger-image-name: # util target to print the azure-ip-masq-merger image name. + @echo $(AZURE_IP_MASQ_MERGER_IMAGE) + +azure-ip-masq-merger-image-name-and-tag: # util target to print the azure-ip-masq-merger image name and tag. + @echo $(IMAGE_REGISTRY)/$(AZURE_IP_MASQ_MERGER_IMAGE):$(AZURE_IP_MASQ_MERGER_PLATFORM_TAG) + +azure-ip-masq-merger-image: ## build azure-ip-masq-merger container image. + $(MAKE) container \ + DOCKERFILE=azure-ip-masq-merger/Dockerfile \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + PLATFORM=$(PLATFORM) \ + TAG=$(AZURE_IP_MASQ_MERGER_PLATFORM_TAG) \ + TARGET=$(OS) \ + OS=$(OS) \ + ARCH=$(ARCH) + +azure-ip-masq-merger-image-push: ## push azure-ip-masq-merger container image. + $(MAKE) container-push \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + TAG=$(AZURE_IP_MASQ_MERGER_PLATFORM_TAG) + +azure-ip-masq-merger-image-pull: ## pull azure-ip-masq-merger container image. + $(MAKE) container-pull \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + TAG=$(AZURE_IP_MASQ_MERGER_PLATFORM_TAG) + +# azure-iptables-monitor +azure-iptables-monitor-image-name: # util target to print the azure-iptables-monitor image name. + @echo $(AZURE_IPTABLES_MONITOR_IMAGE) + +azure-iptables-monitor-image-name-and-tag: # util target to print the azure-iptables-monitor image name and tag. + @echo $(IMAGE_REGISTRY)/$(AZURE_IPTABLES_MONITOR_IMAGE):$(AZURE_IPTABLES_MONITOR_PLATFORM_TAG) + +azure-iptables-monitor-image: ## build azure-iptables-monitor container image. + $(MAKE) container \ + DOCKERFILE=azure-iptables-monitor/Dockerfile \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + PLATFORM=$(PLATFORM) \ + TAG=$(AZURE_IPTABLES_MONITOR_PLATFORM_TAG) \ + TARGET=$(OS) \ + OS=$(OS) \ + ARCH=$(ARCH) + +azure-iptables-monitor-image-push: ## push azure-iptables-monitor container image. + $(MAKE) container-push \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + TAG=$(AZURE_IPTABLES_MONITOR_PLATFORM_TAG) + +azure-iptables-monitor-image-pull: ## pull azure-iptables-monitor container image. + $(MAKE) container-pull \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + TAG=$(AZURE_IPTABLES_MONITOR_PLATFORM_TAG) + # ipv6-hp-bpf ipv6-hp-bpf-image-name: # util target to print the ipv6-hp-bpf image name. @@ -417,13 +477,12 @@ ipv6-hp-bpf-image: ## build ipv6-hp-bpf container image. $(MAKE) container \ DOCKERFILE=bpf-prog/ipv6-hp-bpf/$(OS).Dockerfile \ IMAGE=$(IPV6_HP_BPF_IMAGE) \ - EXTRA_BUILD_ARGS='--build-arg OS=$(OS) --build-arg ARCH=$(ARCH) --build-arg OS_VERSION=$(OS_VERSION) --build-arg DEBUG=$(DEBUG)'\ + EXTRA_BUILD_ARGS='--build-arg OS=$(OS) --build-arg ARCH=$(ARCH) --build-arg DEBUG=$(DEBUG)'\ PLATFORM=$(PLATFORM) \ TAG=$(IPV6_HP_BPF_IMAGE_PLATFORM_TAG) \ TARGET=$(OS) \ OS=$(OS) \ - ARCH=$(ARCH) \ - OS_VERSION=$(OS_VERSION) + ARCH=$(ARCH) ipv6-hp-bpf-image-push: ## push ipv6-hp-bpf container image. $(MAKE) container-push \ @@ -452,7 +511,7 @@ cni-image: ## build cni container image. TARGET=$(OS) \ OS=$(OS) \ ARCH=$(ARCH) \ - OS_VERSION=$(OS_VERSION) + EXTRA_BUILD_ARGS='--build-arg CNI_AI_PATH=$(CNI_AI_PATH) --build-arg CNI_AI_ID=$(CNI_AI_ID)' cni-image-push: ## push cni container image. $(MAKE) container-push \ @@ -465,32 +524,6 @@ cni-image-pull: ## pull cni container image. TAG=$(CNI_PLATFORM_TAG) -# cni-dropgz - -cni-dropgz-image-name: # util target to print the CNI dropgz image name. - @echo $(CNI_DROPGZ_IMAGE) - -cni-dropgz-image-name-and-tag: # util target to print the CNI dropgz image name and tag. - @echo $(IMAGE_REGISTRY)/$(CNI_DROPGZ_IMAGE):$(CNI_DROPGZ_PLATFORM_TAG) - -cni-dropgz-image: ## build cni-dropgz container image. - $(MAKE) container \ - DOCKERFILE=dropgz/build/$(OS).Dockerfile \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_PLATFORM_TAG) \ - TARGET=$(OS) - -cni-dropgz-image-push: ## push cni-dropgz container image. - $(MAKE) container-push \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_PLATFORM_TAG) - -cni-dropgz-image-pull: ## pull cni-dropgz container image. - $(MAKE) container-pull \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_PLATFORM_TAG) - - # cns cns-image-name: # util target to print the CNS image name @@ -508,8 +541,7 @@ cns-image: ## build cns container image. TAG=$(CNS_PLATFORM_TAG) \ TARGET=$(OS) \ OS=$(OS) \ - ARCH=$(ARCH) \ - OS_VERSION=$(OS_VERSION) + ARCH=$(ARCH) cns-image-push: ## push cns container image. $(MAKE) container-push \ @@ -538,8 +570,7 @@ npm-image: ## build the npm container image. TAG=$(NPM_PLATFORM_TAG) \ TARGET=$(OS) \ OS=$(OS) \ - ARCH=$(ARCH) \ - OS_VERSION=$(OS_VERSION) + ARCH=$(ARCH) npm-image-push: ## push npm container image. $(MAKE) container-push \ @@ -551,40 +582,6 @@ npm-image-pull: ## pull cns container image. IMAGE=$(NPM_IMAGE) \ TAG=$(NPM_PLATFORM_TAG) - -## Legacy - -# Build the Azure CNM plugin image, installable with "docker plugin install". -azure-cnm-plugin-image: azure-cnm-plugin ## build the azure-cnm plugin container image. - docker images -q $(CNM_PLUGIN_ROOTFS):$(ACN_VERSION) > cid - docker build --no-cache \ - -f Dockerfile.cnm \ - -t $(CNM_PLUGIN_ROOTFS):$(ACN_VERSION) \ - --build-arg CNM_BUILD_DIR=$(CNM_BUILD_DIR) \ - . - $(eval CID := `cat cid`) - docker rmi $(CID) || true - - # Create a container using the image and export its rootfs. - docker create $(CNM_PLUGIN_ROOTFS):$(ACN_VERSION) > cid - $(eval CID := `cat cid`) - $(MKDIR) $(OUTPUT_DIR)/$(CID)/rootfs - docker export $(CID) | tar -x -C $(OUTPUT_DIR)/$(CID)/rootfs - docker rm -vf $(CID) - - # Copy the plugin configuration and set ownership. - cp cnm/config.json $(OUTPUT_DIR)/$(CID) - chgrp -R docker $(OUTPUT_DIR)/$(CID) - - # Create the plugin. - docker plugin rm $(CNM_PLUGIN_IMAGE):$(ACN_VERSION) || true - docker plugin create $(CNM_PLUGIN_IMAGE):$(ACN_VERSION) $(OUTPUT_DIR)/$(CID) - - # Cleanup temporary files. - rm -rf $(OUTPUT_DIR)/$(CID) - rm cid - - ## Reusable targets for building multiplat container image manifests. IMAGE_ARCHIVE_DIR ?= $(shell pwd) @@ -593,16 +590,14 @@ manifest-create: $(CONTAINER_BUILDER) manifest create $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) manifest-add: - $(CONTAINER_BUILDER) manifest add --os=$(OS) --os-version=$($(OS_VERSION)) $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(subst /,-,$(PLATFORM))$(if $(OS_VERSION),-$(OS_VERSION),)-$(TAG) + $(CONTAINER_BUILDER) manifest add --os=$(OS) $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(subst /,-,$(PLATFORM))-$(TAG) manifest-build: # util target to compose multiarch container manifests from platform specific images. $(MAKE) manifest-create $(foreach PLATFORM,$(PLATFORMS),\ $(if $(filter $(PLATFORM),windows/amd64),\ - $(foreach OS_VERSION,$(OS_VERSIONS),\ - $(MAKE) manifest-add CONTAINER_BUILDER=$(CONTAINER_BUILDER) OS=windows OS_VERSION=$(OS_VERSION) PLATFORM=$(PLATFORM);\ - ),\ - $(MAKE) manifest-add PLATFORM=$(PLATFORM);\ + $(MAKE) manifest-add CONTAINER_BUILDER=$(CONTAINER_BUILDER) OS=windows OS_VERSION=$(OS_VERSION) PLATFORM=$(PLATFORM);,\ + $(MAKE) manifest-add PLATFORM=$(PLATFORM);\ )\ )\ @@ -634,8 +629,7 @@ azure-ipam-manifest-build: ## build azure-ipam multiplat container manifest. $(MAKE) manifest-build \ PLATFORMS="$(PLATFORMS)" \ IMAGE=$(AZURE_IPAM_IMAGE) \ - TAG=$(AZURE_IPAM_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" + TAG=$(AZURE_IPAM_VERSION) azure-ipam-manifest-push: ## push azure-ipam multiplat container manifest $(MAKE) manifest-push \ @@ -647,12 +641,43 @@ azure-ipam-skopeo-archive: ## export tar archive of azure-ipam multiplat contain IMAGE=$(AZURE_IPAM_IMAGE) \ TAG=$(AZURE_IPAM_VERSION) +azure-ip-masq-merger-manifest-build: ## build azure-ip-masq-merger multiplat container manifest. + $(MAKE) manifest-build \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + TAG=$(AZURE_IP_MASQ_MERGER_VERSION) + +azure-ip-masq-merger-manifest-push: ## push azure-ip-masq-merger multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + TAG=$(AZURE_IP_MASQ_MERGER_VERSION) + +azure-ip-masq-merger-skopeo-archive: ## export tar archive of azure-ip-masq-merger multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(AZURE_IP_MASQ_MERGER_IMAGE) \ + TAG=$(AZURE_IP_MASQ_MERGER_VERSION) + +azure-iptables-monitor-manifest-build: ## build azure-iptables-monitor multiplat container manifest. + $(MAKE) manifest-build \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + TAG=$(AZURE_IPTABLES_MONITOR_VERSION) + +azure-iptables-monitor-manifest-push: ## push azure-iptables-monitor multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + TAG=$(AZURE_IPTABLES_MONITOR_VERSION) + +azure-iptables-monitor-skopeo-archive: ## export tar archive of azure-iptables-monitor multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(AZURE_IPTABLES_MONITOR_IMAGE) \ + TAG=$(AZURE_IPTABLES_MONITOR_VERSION) + ipv6-hp-bpf-manifest-build: ## build ipv6-hp-bpf multiplat container manifest. $(MAKE) manifest-build \ PLATFORMS="$(PLATFORMS)" \ IMAGE=$(IPV6_HP_BPF_IMAGE) \ - TAG=$(IPV6_HP_BPF_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" + TAG=$(IPV6_HP_BPF_VERSION) ipv6-hp-bpf-manifest-push: ## push ipv6-hp-bpf multiplat container manifest $(MAKE) manifest-push \ @@ -668,8 +693,7 @@ cni-manifest-build: ## build cni multiplat container manifest. $(MAKE) manifest-build \ PLATFORMS="$(PLATFORMS)" \ IMAGE=$(CNI_IMAGE) \ - TAG=$(CNI_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" + TAG=$(CNI_VERSION) cni-manifest-push: ## push cni multiplat container manifest $(MAKE) manifest-push \ @@ -681,29 +705,11 @@ cni-skopeo-archive: ## export tar archive of cni multiplat container manifest. IMAGE=$(CNI_IMAGE) \ TAG=$(CNI_VERSION) -cni-dropgz-manifest-build: ## build cni-dropgz multiplat container manifest. - $(MAKE) manifest-build \ - PLATFORMS="$(PLATFORMS)" \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" - -cni-dropgz-manifest-push: ## push cni-dropgz multiplat container manifest - $(MAKE) manifest-push \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_VERSION) - -cni-dropgz-skopeo-archive: ## export tar archive of cni-dropgz multiplat container manifest. - $(MAKE) manifest-skopeo-archive \ - IMAGE=$(CNI_DROPGZ_IMAGE) \ - TAG=$(CNI_DROPGZ_VERSION) - cns-manifest-build: ## build azure-cns multiplat container manifest. $(MAKE) manifest-build \ PLATFORMS="$(PLATFORMS)" \ IMAGE=$(CNS_IMAGE) \ - TAG=$(CNS_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" + TAG=$(CNS_VERSION) cns-manifest-push: ## push cns multiplat container manifest $(MAKE) manifest-push \ @@ -719,8 +725,7 @@ npm-manifest-build: ## build azure-npm multiplat container manifest. $(MAKE) manifest-build \ PLATFORMS="$(PLATFORMS)" \ IMAGE=$(NPM_IMAGE) \ - TAG=$(NPM_VERSION) \ - OS_VERSIONS="$(OS_VERSIONS)" + TAG=$(NPM_VERSION) npm-manifest-push: ## push multiplat container manifest $(MAKE) manifest-push \ @@ -737,7 +742,7 @@ npm-skopeo-archive: ## export tar archive of multiplat container manifest. # Create a CNI archive for the target platform. .PHONY: cni-archive -cni-archive: azure-vnet-binary azure-vnet-ipam-binary azure-vnet-ipamv6-binary azure-vnet-telemetry-binary +cni-archive: azure-vnet-binary azure-vnet-stateless-binary azure-vnet-ipam-binary azure-vnet-ipamv6-binary azure-vnet-telemetry-binary $(MKDIR) $(CNI_BUILD_DIR) cp cni/azure-$(GOOS).conflist $(CNI_BUILD_DIR)/10-azure.conflist cp telemetry/azure-vnet-telemetry.config $(CNI_BUILD_DIR)/azure-vnet-telemetry.config @@ -791,11 +796,6 @@ ifeq ($(GOOS),windows) cd $(CNI_BAREMETAL_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_BAREMETAL_ARCHIVE_NAME) azure-vnet$(EXE_EXT) 10-azure.conflist endif -# Create a CNM archive for the target platform. -.PHONY: cnm-archive -cnm-archive: cnm-binary - cd $(CNM_BUILD_DIR) && $(ARCHIVE_CMD) $(CNM_ARCHIVE_NAME) azure-vnet-plugin$(EXE_EXT) - # Create a cli archive for the target platform. .PHONY: acncli-archive acncli-archive: acncli-binary @@ -813,9 +813,7 @@ cns-archive: azure-cns-binary # Create a NPM archive for the target platform. Only Linux is supported for now. .PHONY: npm-archive npm-archive: azure-npm-binary -ifeq ($(GOOS),linux) cd $(NPM_BUILD_DIR) && $(ARCHIVE_CMD) $(NPM_ARCHIVE_NAME) azure-npm$(EXE_EXT) -endif # Create a azure-ipam archive for the target platform. .PHONY: azure-ipam-archive @@ -825,6 +823,22 @@ ifeq ($(GOOS),linux) cd $(AZURE_IPAM_BUILD_DIR) && $(ARCHIVE_CMD) $(AZURE_IPAM_ARCHIVE_NAME) azure-ipam$(EXE_EXT) endif +# Create a azure-ip-masq-merger archive for the target platform. +.PHONY: azure-ip-masq-merger-archive +azure-ip-masq-merger-archive: azure-ip-masq-merger-binary +ifeq ($(GOOS),linux) + $(MKDIR) $(AZURE_IP_MASQ_MERGER_BUILD_DIR) + cd $(AZURE_IP_MASQ_MERGER_BUILD_DIR) && $(ARCHIVE_CMD) $(AZURE_IP_MASQ_MERGER_ARCHIVE_NAME) azure-ip-masq-merger$(EXE_EXT) +endif + +# Create a azure-iptables-monitor archive for the target platform. +.PHONY: azure-iptables-monitor-archive +azure-iptables-monitor-archive: azure-iptables-monitor-binary +ifeq ($(GOOS),linux) + $(MKDIR) $(AZURE_IPTABLES_MONITOR_BUILD_DIR) + cd $(AZURE_IPTABLES_MONITOR_BUILD_DIR) && $(ARCHIVE_CMD) $(AZURE_IPTABLES_MONITOR_ARCHIVE_NAME) azure-iptables-monitor$(EXE_EXT) +endif + # Create a ipv6-hp-bpf archive for the target platform. .PHONY: ipv6-hp-bpf-archive ipv6-hp-bpf-archive: ipv6-hp-bpf-binary @@ -860,6 +874,8 @@ workspace: ## Set up the Go workspace. go work init go work use . go work use ./azure-ipam + go work use ./azure-ip-masq-merger + go work use ./azure-iptables-monitor go work use ./build/tools go work use ./dropgz go work use ./zapai @@ -872,11 +888,11 @@ RESTART_CASE ?= false # CNI type is a key to direct the types of state validation done on a cluster. CNI_TYPE ?= cilium -# COVER_FILTER omits folders with all files tagged with one of 'unit', '!ignore_uncovered', or '!ignore_autogenerated' -test-all: ## run all unit tests. - @$(eval COVER_FILTER=`go list --tags ignore_uncovered,ignore_autogenerated $(COVER_PKG)/... | tr '\n' ','`) - @echo Test coverpkg: $(COVER_FILTER) - go test -mod=readonly -buildvcs=false -tags "unit" --skip 'TestE2E*' -coverpkg=$(COVER_FILTER) -race -covermode atomic -coverprofile=coverage.out $(COVER_PKG)/... +test-all: test-azure-ipam test-azure-ip-masq-merger test-azure-iptables-monitor test-main ## run all unit tests. + +test-main: + go test -mod=readonly -buildvcs=false -tags "unit" --skip 'TestE2E*' -race -covermode atomic -coverprofile=coverage-main.out $(COVER_PKG)/... + go tool cover -func=coverage-main.out test-integration: ## run all integration tests. AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) \ @@ -888,7 +904,7 @@ test-load: ## run all load tests AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) \ CNI_VERSION=$(CNI_VERSION) CNS_VERSION=$(CNS_VERSION) \ - go test -timeout 30m -race -tags=load ./test/integration/load... -v + go test -timeout 40m -race -tags=load ./test/integration/load... -v test-validate-state: cd test/integration/load && go test -mod=readonly -count=1 -timeout 30m -tags load --skip 'TestE2E*' -run ^TestValidateState @@ -907,7 +923,13 @@ test-extended-cyclonus: ## run the cyclonus test for npm. cd .. test-azure-ipam: ## run the unit test for azure-ipam - cd $(AZURE_IPAM_DIR) && go test + cd $(AZURE_IPAM_DIR) && go test -race -covermode atomic -coverprofile=../coverage-azure-ipam.out && go tool cover -func=../coverage-azure-ipam.out + +test-azure-ip-masq-merger: ## run the unit test for azure-ip-masq-merger + cd $(AZURE_IP_MASQ_MERGER_DIR) && go test -race -covermode atomic -coverprofile=../coverage-azure-ip-masq-merger.out && go tool cover -func=../coverage-azure-ip-masq-merger.out + +test-azure-iptables-monitor: ## run the unit test for azure-iptables-monitor + cd $(AZURE_IPTABLES_MONITOR_DIR) && go test -race -covermode atomic -coverprofile=../coverage-azure-iptables-monitor.out && go tool cover -func=../coverage-azure-iptables-monitor.out kind: kind create cluster --config ./test/kind/kind.yaml @@ -924,6 +946,11 @@ test-k8se2e-only: ## Run k8s network conformance test, use TYPE=basic for only d ##@ Utilities +dockerfiles: tools ## Render all Dockerfile templates with current state of world + @make -f build/images.mk render PATH=cns + @make -f build/images.mk render PATH=cni + + $(REPO_ROOT)/.git/hooks/pre-push: @ln -s $(REPO_ROOT)/.hooks/pre-push $(REPO_ROOT)/.git/hooks/ @echo installed pre-push hook @@ -983,10 +1010,15 @@ $(MOCKGEN): $(TOOLS_DIR)/go.mod mockgen: $(MOCKGEN) ## Build mockgen +$(RENDERKIT): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -o bin/renderkit github.com/orellazri/renderkit + +renderkit: $(RENDERKIT) ## Build renderkit + clean-tools: rm -r build/tools/bin -tools: acncli gocov gocov-xml go-junit-report golangci-lint gofumpt protoc ## Build bins for build tools +tools: acncli gocov gocov-xml go-junit-report golangci-lint gofumpt protoc renderkit ## Build bins for build tools ##@ Help diff --git a/README.md b/README.md index 04041eac2f..75630fdbbb 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,6 @@ This repository contains container networking services and plugins for Linux and Windows containers running on Azure: * [Azure CNI network and IPAM plugins](docs/cni.md) for Kubernetes. -* [Azure CNM (libnetwork) network and IPAM plugins](docs/cnm.md) for Docker Engine. **(MAINTENANCE MODE)** * [Azure NPM - Kubernetes Network Policy Manager](docs/npm.md) (Linux and (preview) Windows Server 2022) The `azure-vnet` network plugins connect containers to your [Azure VNET](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview), to take advantage of Azure SDN capabilities. The `azure-vnet-ipam` IPAM plugins provide address management functionality for container IP addresses allocated from Azure VNET address space. diff --git a/aitelemetry/api.go b/aitelemetry/api.go index 9808d0a59d..f10bdbd35a 100644 --- a/aitelemetry/api.go +++ b/aitelemetry/api.go @@ -5,11 +5,13 @@ import ( "github.com/Azure/azure-container-networking/common" "github.com/microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" ) // Application trace/log structure type Report struct { Message string + Level contracts.SeverityLevel Context string AppVersion string CustomDimensions map[string]string diff --git a/aitelemetry/connection_string_parser.go b/aitelemetry/connection_string_parser.go new file mode 100644 index 0000000000..b4e7223a22 --- /dev/null +++ b/aitelemetry/connection_string_parser.go @@ -0,0 +1,52 @@ +package aitelemetry + +import ( + "strings" + + "github.com/pkg/errors" +) + +type connectionVars struct { + instrumentationKey string + ingestionURL string +} + +func (c *connectionVars) String() string { + return "InstrumentationKey=" + c.instrumentationKey + ";IngestionEndpoint=" + c.ingestionURL +} + +func parseConnectionString(connectionString string) (*connectionVars, error) { + connectionVars := &connectionVars{} + + if connectionString == "" { + return nil, errors.New("connection string cannot be empty") + } + + pairs := strings.Split(connectionString, ";") + for _, pair := range pairs { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, errors.Errorf("invalid connection string format: %s", pair) + } + key, value := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) + + if key == "" { + return nil, errors.Errorf("key in connection string cannot be empty") + } + + switch strings.ToLower(key) { + case "instrumentationkey": + connectionVars.instrumentationKey = value + case "ingestionendpoint": + if value != "" { + connectionVars.ingestionURL = value + "v2.1/track" + } + } + } + + if connectionVars.instrumentationKey == "" || connectionVars.ingestionURL == "" { + return nil, errors.Errorf("missing required fields in connection string: %s", connectionVars) + } + + return connectionVars, nil +} diff --git a/aitelemetry/connection_string_parser_test.go b/aitelemetry/connection_string_parser_test.go new file mode 100644 index 0000000000..adac67cd06 --- /dev/null +++ b/aitelemetry/connection_string_parser_test.go @@ -0,0 +1,66 @@ +package aitelemetry + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const connectionString = "InstrumentationKey=0000-0000-0000-0000-0000;IngestionEndpoint=https://ingestion.endpoint.com/;LiveEndpoint=https://live.endpoint.com/;ApplicationId=1111-1111-1111-1111-1111" + +func TestParseConnectionString(t *testing.T) { + tests := []struct { + name string + connectionString string + want *connectionVars + wantErr bool + }{ + { + name: "Valid connection string and instrumentation key", + connectionString: connectionString, + want: &connectionVars{ + instrumentationKey: "0000-0000-0000-0000-0000", + ingestionURL: "https://ingestion.endpoint.com/v2.1/track", + }, + wantErr: false, + }, + { + name: "Invalid connection string format", + connectionString: "InvalidConnectionString", + want: nil, + wantErr: true, + }, + { + name: "Valid instrumentation key with missing ingestion endpoint", + connectionString: "InstrumentationKey=0000-0000-0000-0000-0000;IngestionEndpoint=", + want: nil, + wantErr: true, + }, + { + name: "Missing instrumentation key with valid ingestion endpoint", + connectionString: "InstrumentationKey=;IngestionEndpoint=https://ingestion.endpoint.com/", + want: nil, + wantErr: true, + }, + { + name: "Empty connection string", + connectionString: "", + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseConnectionString(tt.connectionString) + if tt.wantErr { + require.Error(t, err, "Expected error but got none") + } else { + require.NoError(t, err, "Expected no error but got one") + require.NotNil(t, got, "Expected a non-nil result") + require.Equal(t, tt.want.instrumentationKey, got.instrumentationKey, "Instrumentation Key does not match") + require.Equal(t, tt.want.ingestionURL, got.ingestionURL, "Ingestion URL does not match") + } + }) + } +} diff --git a/aitelemetry/telemetrywrapper.go b/aitelemetry/telemetrywrapper.go index 36c59c212a..20dde0baa2 100644 --- a/aitelemetry/telemetrywrapper.go +++ b/aitelemetry/telemetrywrapper.go @@ -3,6 +3,7 @@ package aitelemetry import ( "fmt" "os" + "path/filepath" "runtime" "time" @@ -11,6 +12,8 @@ import ( "github.com/Azure/azure-container-networking/processlock" "github.com/Azure/azure-container-networking/store" "github.com/microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" + "github.com/pkg/errors" ) const ( @@ -27,6 +30,7 @@ const ( azurePublicCloudStr = "AzurePublicCloud" hostNameKey = "hostname" defaultTimeout = 10 + maxCloseTimeoutInSeconds = 30 defaultBatchIntervalInSecs = 15 defaultBatchSizeInBytes = 32768 defaultGetEnvRetryCount = 5 @@ -34,6 +38,19 @@ const ( defaultRefreshTimeoutInSecs = 10 ) +var MetadataFile = filepath.Join(os.TempDir(), "azuremetadata.json") + +type Level = contracts.SeverityLevel + +const ( + DebugLevel Level = contracts.Verbose + InfoLevel Level = contracts.Information + WarnLevel Level = contracts.Warning + ErrorLevel Level = contracts.Error + PanicLevel Level = contracts.Critical + FatalLevel Level = contracts.Critical +) + var debugMode bool func setAIConfigDefaults(config *AIConfig) { @@ -85,7 +102,7 @@ func getMetadata(th *telemetryHandle) { // check if metadata in memory otherwise initiate wireserver request for { - metadata, err = common.GetHostMetadata(metadataFile) + metadata, err = common.GetHostMetadata(MetadataFile) if err == nil || th.disableMetadataRefreshThread { break } @@ -104,14 +121,14 @@ func getMetadata(th *telemetryHandle) { th.metadata = metadata th.rwmutex.Unlock() - lockclient, err := processlock.NewFileLock(metadataFile + store.LockExtension) + lockclient, err := processlock.NewFileLock(MetadataFile + store.LockExtension) if err != nil { log.Printf("Error initializing file lock:%v", err) return } // Save metadata retrieved from wireserver to a file - kvs, err := store.NewJsonFileStore(metadataFile, lockclient, nil) + kvs, err := store.NewJsonFileStore(MetadataFile, lockclient, nil) if err != nil { debugLog("[AppInsights] Error initializing kvs store: %v", err) return @@ -121,7 +138,7 @@ func getMetadata(th *telemetryHandle) { log.Errorf("getMetadata: Not able to acquire lock:%v", err) return } - metadataErr := common.SaveHostMetadata(th.metadata, metadataFile) + metadataErr := common.SaveHostMetadata(th.metadata, MetadataFile) err = kvs.Unlock() if err != nil { log.Errorf("getMetadata: Not able to release lock:%v", err) @@ -145,7 +162,7 @@ func isPublicEnvironment(url string, retryCount, waitTimeInSecs int) (bool, erro return true, nil } else if err == nil { debugLog("[AppInsights] This is not azure public cloud:%s", cloudName) - return false, fmt.Errorf("Not an azure public cloud: %s", cloudName) + return false, errors.Errorf("not an azure public cloud: %s", cloudName) } debugLog("GetAzureCloud returned err :%v", err) @@ -198,11 +215,51 @@ func NewAITelemetry( return th, nil } +// NewWithConnectionString creates telemetry handle with user specified appinsights connection string. +func NewWithConnectionString(connectionString string, aiConfig AIConfig) (TelemetryHandle, error) { + debugMode = aiConfig.DebugMode + + if connectionString == "" { + debugLog("Empty connection string") + return nil, errors.New("AI connection string is empty") + } + + setAIConfigDefaults(&aiConfig) + + connectionVars, err := parseConnectionString(connectionString) + if err != nil { + debugLog("Error parsing connection string: %v", err) + return nil, err + } + + telemetryConfig := appinsights.NewTelemetryConfiguration(connectionVars.instrumentationKey) + telemetryConfig.EndpointUrl = connectionVars.ingestionURL + telemetryConfig.MaxBatchSize = aiConfig.BatchSize + telemetryConfig.MaxBatchInterval = time.Duration(aiConfig.BatchInterval) * time.Second + + th := &telemetryHandle{ + client: appinsights.NewTelemetryClientFromConfig(telemetryConfig), + appName: aiConfig.AppName, + appVersion: aiConfig.AppVersion, + diagListener: messageListener(), + disableMetadataRefreshThread: aiConfig.DisableMetadataRefreshThread, + refreshTimeout: aiConfig.RefreshTimeout, + } + + if th.disableMetadataRefreshThread { + getMetadata(th) + } else { + go getMetadata(th) + } + + return th, nil +} + // TrackLog function sends report (trace) to appinsights resource. It overrides few of the existing columns with app information // and for rest it uses custom dimesion func (th *telemetryHandle) TrackLog(report Report) { // Initialize new trace message - trace := appinsights.NewTraceTelemetry(report.Message, appinsights.Warning) + trace := appinsights.NewTraceTelemetry(report.Message, report.Level) // will be empty if cns used as telemetry service for cni if th.appVersion == "" { @@ -330,8 +387,35 @@ func (th *telemetryHandle) Close(timeout int) { timeout = defaultTimeout } + // max wait is the minimum of the timeout and maxCloseTimeoutInSeconds + maxWaitTimeInSeconds := timeout + if maxWaitTimeInSeconds < maxCloseTimeoutInSeconds { + maxWaitTimeInSeconds = maxCloseTimeoutInSeconds + } + // wait for items to be sent otherwise timeout - <-th.client.Channel().Close(time.Duration(timeout) * time.Second) + // similar to the example in the appinsights-go repo: https://github.com/microsoft/ApplicationInsights-Go#shutdown + timer := time.NewTimer(time.Duration(maxWaitTimeInSeconds) * time.Second) + defer timer.Stop() + select { + case <-th.client.Channel().Close(time.Duration(timeout) * time.Second): + // timeout specified for retries. + + // If we got here, then all telemetry was submitted + // successfully, and we can proceed to exiting. + + case <-timer.C: + // absolute timeout. This covers any + // previous telemetry submission that may not have + // completed before Close was called. + + // There are a number of reasons we could have + // reached here. We gave it a go, but telemetry + // submission failed somewhere. Perhaps old events + // were still retrying, or perhaps we're throttled. + // Either way, we don't want to wait around for it + // to complete, so let's just exit. + } // Remove diganostic message listener if th.diagListener != nil { diff --git a/aitelemetry/telemetrywrapper_linux.go b/aitelemetry/telemetrywrapper_linux.go deleted file mode 100644 index 17bf74fc7c..0000000000 --- a/aitelemetry/telemetrywrapper_linux.go +++ /dev/null @@ -1,5 +0,0 @@ -package aitelemetry - -const ( - metadataFile = "/tmp/azuremetadata.json" -) diff --git a/aitelemetry/telemetrywrapper_test.go b/aitelemetry/telemetrywrapper_test.go index 2ccb175217..363f9b90a8 100644 --- a/aitelemetry/telemetrywrapper_test.go +++ b/aitelemetry/telemetrywrapper_test.go @@ -15,7 +15,7 @@ import ( ) var ( - th TelemetryHandle + aiConfig AIConfig hostAgentUrl = "localhost:3501" getCloudResponse = "AzurePublicCloud" httpURL = "http://" + hostAgentUrl @@ -54,6 +54,18 @@ func TestMain(m *testing.M) { return } + aiConfig = AIConfig{ + AppName: "testapp", + AppVersion: "v1.0.26", + BatchSize: 4096, + BatchInterval: 2, + RefreshTimeout: 10, + GetEnvRetryCount: 1, + GetEnvRetryWaitTimeInSecs: 2, + DebugMode: true, + DisableMetadataRefreshThread: true, + } + exitCode := m.Run() if runtime.GOOS == "linux" { @@ -75,45 +87,50 @@ func handleGetCloud(w http.ResponseWriter, req *http.Request) { w.Write([]byte(getCloudResponse)) } +func initTelemetry(_ *testing.T) (th1, th2 TelemetryHandle) { + th1, err1 := NewAITelemetry(httpURL, "00ca2a73-c8d6-4929-a0c2-cf84545ec225", aiConfig) + if err1 != nil { + fmt.Printf("Error initializing AI telemetry: %v", err1) + } + + th2, err2 := NewWithConnectionString(connectionString, aiConfig) + if err2 != nil { + fmt.Printf("Error initializing AI telemetry with connection string: %v", err2) + } + + return +} + func TestEmptyAIKey(t *testing.T) { var err error - aiConfig := AIConfig{ - AppName: "testapp", - AppVersion: "v1.0.26", - BatchSize: 4096, - BatchInterval: 2, - RefreshTimeout: 10, - DebugMode: true, - DisableMetadataRefreshThread: true, - } _, err = NewAITelemetry(httpURL, "", aiConfig) if err == nil { - t.Errorf("Error intializing AI telemetry:%v", err) + t.Errorf("Error initializing AI telemetry:%v", err) + } + + _, err = NewWithConnectionString("", aiConfig) + if err == nil { + t.Errorf("Error initializing AI telemetry with connection string:%v", err) } } func TestNewAITelemetry(t *testing.T) { var err error - aiConfig := AIConfig{ - AppName: "testapp", - AppVersion: "v1.0.26", - BatchSize: 4096, - BatchInterval: 2, - RefreshTimeout: 10, - GetEnvRetryCount: 1, - GetEnvRetryWaitTimeInSecs: 2, - DebugMode: true, - DisableMetadataRefreshThread: true, + th1, th2 := initTelemetry(t) + if th1 == nil { + t.Errorf("Error initializing AI telemetry: %v", err) } - th, err = NewAITelemetry(httpURL, "00ca2a73-c8d6-4929-a0c2-cf84545ec225", aiConfig) - if th == nil { - t.Errorf("Error intializing AI telemetry: %v", err) + + if th2 == nil { + t.Errorf("Error initializing AI telemetry with connection string: %v", err) } } func TestTrackMetric(t *testing.T) { + th1, th2 := initTelemetry(t) + metric := Metric{ Name: "test", Value: 1.0, @@ -121,10 +138,13 @@ func TestTrackMetric(t *testing.T) { } metric.CustomDimensions["dim1"] = "col1" - th.TrackMetric(metric) + th1.TrackMetric(metric) + th2.TrackMetric(metric) } func TestTrackLog(t *testing.T) { + th1, th2 := initTelemetry(t) + report := Report{ Message: "test", Context: "10a", @@ -132,10 +152,13 @@ func TestTrackLog(t *testing.T) { } report.CustomDimensions["dim1"] = "col1" - th.TrackLog(report) + th1.TrackLog(report) + th2.TrackLog(report) } func TestTrackEvent(t *testing.T) { + th1, th2 := initTelemetry(t) + event := Event{ EventName: "testEvent", ResourceID: "SomeResourceId", @@ -144,35 +167,20 @@ func TestTrackEvent(t *testing.T) { event.Properties["P1"] = "V1" event.Properties["P2"] = "V2" - th.TrackEvent(event) + th1.TrackEvent(event) + th2.TrackEvent(event) } func TestFlush(t *testing.T) { - th.Flush() -} + th1, th2 := initTelemetry(t) -func TestClose(t *testing.T) { - th.Close(10) + th1.Flush() + th2.Flush() } -func TestClosewithoutSend(t *testing.T) { - var err error - - aiConfig := AIConfig{ - AppName: "testapp", - AppVersion: "v1.0.26", - BatchSize: 4096, - BatchInterval: 2, - DisableMetadataRefreshThread: true, - RefreshTimeout: 10, - GetEnvRetryCount: 1, - GetEnvRetryWaitTimeInSecs: 2, - } - - thtest, err := NewAITelemetry(httpURL, "00ca2a73-c8d6-4929-a0c2-cf84545ec225", aiConfig) - if thtest == nil { - t.Errorf("Error intializing AI telemetry:%v", err) - } +func TestClose(t *testing.T) { + th1, th2 := initTelemetry(t) - thtest.Close(10) + th1.Close(10) + th2.Close(10) } diff --git a/aitelemetry/telemetrywrapper_windows.go b/aitelemetry/telemetrywrapper_windows.go deleted file mode 100644 index cd49357705..0000000000 --- a/aitelemetry/telemetrywrapper_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package aitelemetry - -import ( - "os" - "path/filepath" -) - -var metadataFile = filepath.FromSlash(os.Getenv("TEMP")) + "\\azuremetadata.json" diff --git a/azure-ip-masq-merger/.golangci.yml b/azure-ip-masq-merger/.golangci.yml new file mode 100644 index 0000000000..ccad1f382b --- /dev/null +++ b/azure-ip-masq-merger/.golangci.yml @@ -0,0 +1,40 @@ +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + new-from-rev: origin/master +linters: + presets: + - bugs + - error + - format + - performance + - unused + disable: + - maligned + - scopelint + enable: + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goprintffuncname + - gosimple + - lll + - misspell + - nakedret + - promlinter + - revive +linters-settings: + gocritic: + enabled-tags: + - "diagnostic" + - "style" + - "performance" + disabled-checks: + - "hugeParam" + govet: + enable: + - shadow + lll: + line-length: 200 diff --git a/azure-ip-masq-merger/Dockerfile b/azure-ip-masq-merger/Dockerfile new file mode 100644 index 0000000000..fc0592ae69 --- /dev/null +++ b/azure-ip-masq-merger/Dockerfile @@ -0,0 +1,16 @@ +ARG ARCH +ARG OS + +# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.23.2-azurelinux3.0 --format "{{.Name}}@{{.Digest}}" +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:f1f0cbd464ae4cd9d41176d47f1f9fe16a6965425871f817587314e3a04576ec AS go + +FROM go AS azure-ip-masq-merger +ARG OS +ARG VERSION +WORKDIR /azure-ip-masq-merger +COPY ./azure-ip-masq-merger . +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/ip-masq-merger -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" . + +FROM scratch AS linux +COPY --from=azure-ip-masq-merger /go/bin/ip-masq-merger azure-ip-masq-merger +ENTRYPOINT [ "/azure-ip-masq-merger" ] diff --git a/azure-ip-masq-merger/README.md b/azure-ip-masq-merger/README.md new file mode 100644 index 0000000000..fd10afd70a --- /dev/null +++ b/azure-ip-masq-merger/README.md @@ -0,0 +1,69 @@ +# azure-ip-masq-merger + +`azure-ip-masq-merger` is a utility for merging multiple ip-masq-agent configuration files into a single, valid configuration for use in Kubernetes clusters. + +## Description + +The goal of this program is to periodically scan a directory for configuration fragments (YAML or JSON files starting with `ip-masq`), validate and merge them, and write the resulting configuration to a target directory for consumption. This allows us to combine non-masquerade CIDRs and related options between multiple files, for example if we had one ip masq config managed by the cloud provider and another supplied by the user. + +## Usage + +Follow the steps below to build and run the program: + +1. Build the binary using `make`: + ```bash + make azure-ip-masq-merger + ``` + or make an image: + ```bash + make azure-ip-masq-merger-image + ``` + +2. Deploy or copy the binary to your node(s). + +3. Prepare your configuration fragments in the input directory (see below for defaults). Each file should be named with the prefix `ip-masq` and contain valid YAML or JSON for the ip-masq-agent config. + +4. Start the program with: + ```bash + ./azure-ip-masq-merger --input=/etc/config/ --output=/etc/merged-config/ + ``` + - The `--input` flag specifies the directory to scan for config fragments. Default: `/etc/config/` + - The `--output` flag specifies where to write the merged config. Default: `/etc/merged-config/` + +5. The merged configuration will be written to the output directory as `ip-masq-agent`. If no valid configs are found, any existing merged config will be removed. + +## Manual Testing + +You can test the merger locally by creating sample config files in your input directory and running the merger. + +## Configuration File Format + +Each config fragment should be a YAML or JSON file that may have the following fields: +```yaml +nonMasqueradeCIDRs: + - 10.0.0.0/8 + - 192.168.0.0/16 +masqLinkLocal: true +masqLinkLocalIPv6: false +``` +- `nonMasqueradeCIDRs`: List of CIDRs that should not be masqueraded. Appended between configs. +- `masqLinkLocal`: Boolean to enable/disable masquerading of link-local addresses. OR'd between configs. +- `masqLinkLocalIPv6`: Boolean to enable/disable masquerading of IPv6 link-local addresses. OR'd between configs. + +## Debugging + +Logs are output to standard error. Increase verbosity with the `-v` flag: +```bash +./azure-ip-masq-merger -v 2 +``` + +## Development + +To run tests: +```bash +go test ./... +``` +or at the repository level: +```bash +make test-azure-ip-masq-merger +``` diff --git a/azure-ip-masq-merger/go.mod b/azure-ip-masq-merger/go.mod new file mode 100644 index 0000000000..4c59ce1a8e --- /dev/null +++ b/azure-ip-masq-merger/go.mod @@ -0,0 +1,47 @@ +module github.com/Azure/azure-container-networking/azure-ip-masq-merger + +go 1.23.0 + +require ( + github.com/stretchr/testify v1.9.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/apimachinery v0.31.3 + k8s.io/component-base v0.31.3 + k8s.io/klog/v2 v2.130.1 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/azure-ip-masq-merger/go.sum b/azure-ip-masq-merger/go.sum new file mode 100644 index 0000000000..0435b8a605 --- /dev/null +++ b/azure-ip-masq-merger/go.sum @@ -0,0 +1,130 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/azure-ip-masq-merger/ip_masq_merger.go b/azure-ip-masq-merger/ip_masq_merger.go new file mode 100644 index 0000000000..ea35a39104 --- /dev/null +++ b/azure-ip-masq-merger/ip_masq_merger.go @@ -0,0 +1,277 @@ +package main + +import ( + utiljson "encoding/json" + "errors" + "flag" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "gopkg.in/yaml.v2" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/component-base/logs" + "k8s.io/component-base/version/verflag" + "k8s.io/klog/v2" +) + +// Version is populated by make during build. +var version string + +var ( + // path to a yaml or json files + configPath = flag.String("input", "/etc/config/", `Name of the directory with configs to merge`) + // merged config written to this directory + outputPath = flag.String("output", "/etc/merged-config/", `Name of the directory to output the merged config`) + // errors + errAlignment = errors.New("ip not aligned to CIDR block") +) + +const ( + // config files in this path must start with this to be read + configFilePrefix = "ip-masq" + // error formats + cidrParseErrFmt = "CIDR %q could not be parsed: %w" + cidrAlignErrFmt = "CIDR %q is not aligned to a CIDR block, ip: %q network: %q: %w" +) + +type FileSystem interface { + ReadFile(name string) ([]byte, error) + WriteFile(name string, data []byte, perm fs.FileMode) error + ReadDir(dirname string) ([]fs.DirEntry, error) + DeleteFile(name string) error +} + +type OSFileSystem struct{} + +func (OSFileSystem) ReadFile(name string) ([]byte, error) { + return os.ReadFile(name) // nolint +} + +func (OSFileSystem) WriteFile(name string, data []byte, perm fs.FileMode) error { + return os.WriteFile(name, data, perm) // nolint +} + +func (OSFileSystem) ReadDir(dirname string) ([]fs.DirEntry, error) { + return os.ReadDir(dirname) // nolint +} + +func (OSFileSystem) DeleteFile(name string) error { + return os.Remove(name) // nolint +} + +// name of nat chain for iptables masquerade rules +var resyncInterval = flag.Int("resync-interval", 60, "How often to refresh the config (in seconds)") + +// MasqConfig object +type MasqConfig struct { + NonMasqueradeCIDRs []string `json:"nonMasqueradeCIDRs"` + MasqLinkLocal bool `json:"masqLinkLocal"` + MasqLinkLocalIPv6 bool `json:"masqLinkLocalIPv6"` +} + +// EmptyMasqConfig returns a MasqConfig with empty values +func EmptyMasqConfig() *MasqConfig { + return &MasqConfig{ + NonMasqueradeCIDRs: make([]string, 0), + MasqLinkLocal: false, + MasqLinkLocalIPv6: false, + } +} + +// MasqDaemon object +type MasqDaemon struct { + config *MasqConfig +} + +// NewMasqDaemon returns a MasqDaemon with default values +func NewMasqDaemon(c *MasqConfig) *MasqDaemon { + return &MasqDaemon{ + config: c, + } +} + +func main() { + klog.InitFlags(nil) + + flag.Parse() + + c := EmptyMasqConfig() + + logs.InitLogs() + defer logs.FlushLogs() + + klog.Infof("Version: %s", version) + + verflag.PrintAndExitIfRequested() + + m := NewMasqDaemon(c) + err := m.Run() + if err != nil { + klog.Fatalf("the daemon encountered an error: %v", err) + } +} + +func (m *MasqDaemon) Run() error { + // Periodically resync + for { + // resync config + err := m.osMergeConfig() + if err != nil { + return fmt.Errorf("error merging configuration: %w", err) + } + + time.Sleep(time.Duration(*resyncInterval) * time.Second) + } +} + +func (m *MasqDaemon) osMergeConfig() error { + var fs FileSystem = OSFileSystem{} + return m.mergeConfig(fs) +} + +// Syncs the config to the file at ConfigPath, or uses defaults if the file could not be found +// Error if the file is found but cannot be parsed. +func (m *MasqDaemon) mergeConfig(fileSys FileSystem) error { + var err error + c := EmptyMasqConfig() + defer func() { + if err == nil { + json, marshalErr := utiljson.Marshal(c) + if marshalErr == nil { + klog.V(2).Infof("using config: %s", string(json)) + } else { + klog.V(2).Info("could not marshal final config") + } + } + }() + + files, err := fileSys.ReadDir(*configPath) + if err != nil { + return fmt.Errorf("failed to read config directory, error: %w", err) + } + + var configAdded bool + for _, file := range files { + if !strings.HasPrefix(file.Name(), configFilePrefix) { + continue + } + var yaml []byte + var json []byte + + klog.V(2).Infof("syncing config file %q at %q", file.Name(), *configPath) + yaml, err = fileSys.ReadFile(filepath.Join(*configPath, file.Name())) + if err != nil { + return fmt.Errorf("failed to read config file %q, error: %w", file.Name(), err) + } + + json, err = utilyaml.ToJSON(yaml) + if err != nil { + return fmt.Errorf("failed to convert config file %q to JSON, error: %w", file.Name(), err) + } + + var newConfig MasqConfig + err = utiljson.Unmarshal(json, &newConfig) + if err != nil { + return fmt.Errorf("failed to unmarshal config file %q, error: %w", file.Name(), err) + } + + err = newConfig.validate() + if err != nil { + return fmt.Errorf("config file %q is invalid: %w", file.Name(), err) + } + c.merge(&newConfig) + + configAdded = true + } + + mergedPath := filepath.Join(*outputPath, "ip-masq-agent") + + if !configAdded { + // no valid config files found to merge-- remove any existing merged config file so ip masq agent uses defaults + // the default config map is different from an empty config map + klog.V(2).Infof("no valid config files found at %q, removing existing config map", *configPath) + err = fileSys.DeleteFile(mergedPath) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to remove existing config file: %w", err) + } + return nil + } + + // apply new config + m.config = c + + out, err := yaml.Marshal(m.config) + if err != nil { + return fmt.Errorf("failed to marshal merged config to YAML: %w", err) + } + + err = fileSys.WriteFile(mergedPath, out, 0o644) + if err != nil { + return fmt.Errorf("failed to write merged config: %w", err) + } + + return nil +} + +func (c *MasqConfig) validate() error { + // check CIDRs are valid + for _, cidr := range c.NonMasqueradeCIDRs { + err := validateCIDR(cidr) + if err != nil { + return err + } + } + return nil +} + +// merge combines the existing MasqConfig with newConfig. The bools are OR'd together. +func (c *MasqConfig) merge(newConfig *MasqConfig) { + if newConfig == nil { + return + } + + if len(newConfig.NonMasqueradeCIDRs) > 0 { + c.NonMasqueradeCIDRs = mergeCIDRs(c.NonMasqueradeCIDRs, newConfig.NonMasqueradeCIDRs) + } + + c.MasqLinkLocal = c.MasqLinkLocal || newConfig.MasqLinkLocal + c.MasqLinkLocalIPv6 = c.MasqLinkLocalIPv6 || newConfig.MasqLinkLocalIPv6 +} + +// mergeCIDRS merges two slices of CIDRs into one, ignoring duplicates +func mergeCIDRs(cidrs1, cidrs2 []string) []string { + cidrsSet := map[string]struct{}{} + + for _, cidr := range cidrs1 { + cidrsSet[cidr] = struct{}{} + } + + for _, cidr := range cidrs2 { + cidrsSet[cidr] = struct{}{} + } + + cidrsList := []string{} + for cidr := range cidrsSet { + cidrsList = append(cidrsList, cidr) + } + + return cidrsList +} + +func validateCIDR(cidr string) error { + // parse test + ip, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf(cidrParseErrFmt, cidr, err) + } + // alignment test + if !ip.Equal(ipnet.IP) { + return fmt.Errorf(cidrAlignErrFmt, cidr, ip, ipnet.String(), errAlignment) + } + return nil +} diff --git a/azure-ip-masq-merger/ip_masq_merger_test.go b/azure-ip-masq-merger/ip_masq_merger_test.go new file mode 100644 index 0000000000..8dc1860c2b --- /dev/null +++ b/azure-ip-masq-merger/ip_masq_merger_test.go @@ -0,0 +1,321 @@ +package main + +import ( + "io/fs" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +const ( + errInvalidConfig = "expected error for invalid config file, got nil" + errUnexpected = "unexpected error: %v" +) + +type mockFile struct { + data []byte + mode fs.FileMode +} + +type mockFS struct { + files map[string]mockFile + dirs map[string][]string // directory to file names +} + +func newMockFS() *mockFS { + return &mockFS{ + files: make(map[string]mockFile), + dirs: make(map[string][]string), + } +} + +func (m *mockFS) ReadFile(path string) ([]byte, error) { + f, ok := m.files[path] + if !ok { + return nil, fs.ErrNotExist + } + return f.data, nil +} + +// WriteFile creates a file and directory entry for our mock +func (m *mockFS) WriteFile(path string, data []byte, perm fs.FileMode) error { + m.files[path] = mockFile{data: data, mode: perm} + dir := filepath.Dir(path) + m.dirs[dir] = append(m.dirs[dir], filepath.Base(path)) + return nil +} + +func (m *mockFS) ReadDir(dirname string) ([]fs.DirEntry, error) { + entries := []fs.DirEntry{} + for _, fname := range m.dirs[dirname] { + entries = append(entries, mockDirEntry{name: fname}) + } + return entries, nil +} + +func (m *mockFS) DeleteFile(path string) error { + if _, ok := m.files[path]; !ok { + return fs.ErrNotExist + } + delete(m.files, path) + return nil +} + +type mockDirEntry struct{ name string } + +func (m mockDirEntry) Name() string { return m.name } +func (m mockDirEntry) IsDir() bool { return false } +func (m mockDirEntry) Type() fs.FileMode { return 0 } +func (m mockDirEntry) Info() (fs.FileInfo, error) { return nil, nil } + +func TestMergeConfig(t *testing.T) { + type file struct { + name string + data string + } + type want struct { + config *MasqConfig + expectFile bool + expectErr bool + } + tests := []struct { + name string + files []file + want want + }{ + { + name: "no config files", + files: nil, + want: want{ + config: nil, + expectFile: false, + expectErr: false, + }, + }, + { + name: "one valid config", + files: []file{ + { + name: "ip-masq-foo.yaml", + data: `{"nonMasqueradeCIDRs":["10.0.0.0/8"],"masqLinkLocal":true,"masqLinkLocalIPv6":false}`, + }, + }, + want: want{ + config: &MasqConfig{ + NonMasqueradeCIDRs: []string{"10.0.0.0/8"}, + MasqLinkLocal: true, + MasqLinkLocalIPv6: false, + }, + expectFile: true, + expectErr: false, + }, + }, + { + name: "two valid configs merged", + files: []file{ + { + name: "ip-masq-a.yaml", + data: `{"nonMasqueradeCIDRs":["10.0.0.0/8"],"masqLinkLocal":false,"masqLinkLocalIPv6":true}`, + }, + { + name: "ip-masq-b.yaml", + data: `{"nonMasqueradeCIDRs":["192.168.0.0/16"],"masqLinkLocal":true,"masqLinkLocalIPv6":false}`, + }, + }, + want: want{ + config: &MasqConfig{ + NonMasqueradeCIDRs: []string{"10.0.0.0/8", "192.168.0.0/16"}, + MasqLinkLocal: true, + MasqLinkLocalIPv6: true, + }, + expectFile: true, + expectErr: false, + }, + }, + { + name: "two valid configs merged yaml", + files: []file{ + { + name: "ip-masq-a.yaml", + data: `nonMasqueradeCIDRs: ["10.0.0.0/8"] +masqLinkLocal: false +masqLinkLocalIPv6: true`, + }, + { + name: "ip-masq-b.yaml", + data: `nonMasqueradeCIDRs: ["192.168.0.0/16"] +masqLinkLocal: true +masqLinkLocalIPv6: false`, + }, + }, + want: want{ + config: &MasqConfig{ + NonMasqueradeCIDRs: []string{"10.0.0.0/8", "192.168.0.0/16"}, + MasqLinkLocal: true, + MasqLinkLocalIPv6: true, + }, + expectFile: true, + expectErr: false, + }, + }, + { + name: "invalid config file", + files: []file{ + { + name: "ip-masq-bad.yaml", + data: "not valid yaml", + }, + }, + want: want{ + config: nil, + expectFile: false, + expectErr: true, + }, + }, + { + name: "valid and invalid config files", + files: []file{ + { + name: "ip-masq-good.yaml", + data: `{"nonMasqueradeCIDRs":["10.0.0.0/8"],"masqLinkLocal":true,"masqLinkLocalIPv6":false}`, + }, + { + name: "ip-masq-bad.yaml", + data: "not valid yaml", + }, + }, + want: want{ + config: nil, + expectFile: false, + expectErr: true, + }, + }, + { + name: "misaligned cidr invalid config file", + files: []file{ + { + name: "ip-masq-bad.yaml", + data: `{"nonMasqueradeCIDRs":["10.0.0.4/8"],"masqLinkLocal":true,"masqLinkLocalIPv6":false}`, + }, + }, + want: want{ + config: nil, + expectFile: false, + expectErr: true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := newMockFS() + var configFiles []string + for _, f := range tt.files { + full := filepath.Join(*configPath, f.name) + fs.files[full] = mockFile{data: []byte(f.data)} + configFiles = append(configFiles, f.name) + } + fs.dirs[*configPath] = configFiles + + daemon := &MasqDaemon{} + err := daemon.mergeConfig(fs) + if tt.want.expectErr { + require.Error(t, err, errInvalidConfig) + return + } + require.NoError(t, err, errUnexpected, err) + + mergedPath := filepath.Join(*outputPath, "ip-masq-agent") + mergedFile, ok := fs.files[mergedPath] + if tt.want.expectFile { + require.True(t, ok, "expected merged config file at %q", mergedPath) + var got MasqConfig + require.NoError(t, yaml.Unmarshal(mergedFile.data, &got)) + require.True(t, cidrSetEqual(got.NonMasqueradeCIDRs, tt.want.config.NonMasqueradeCIDRs), "unexpected merged CIDRs: got %v, want %v", got.NonMasqueradeCIDRs, tt.want.config.NonMasqueradeCIDRs) + require.Equal(t, tt.want.config.MasqLinkLocal, got.MasqLinkLocal, "unexpected MasqLinkLocal") + require.Equal(t, tt.want.config.MasqLinkLocalIPv6, got.MasqLinkLocalIPv6, "unexpected MasqLinkLocalIPv6") + } else { + require.False(t, ok, "expected no merged config file, but found one") + } + }) + } +} + +// cidrSetEqual checks if the two string slices have the same elements regardless of ordering +func cidrSetEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + set := make(map[string]struct{}, len(a)) + for _, v := range a { + set[v] = struct{}{} + } + for _, v := range b { + if _, ok := set[v]; !ok { + return false + } + } + return true +} + +func TestMergeCIDRs(t *testing.T) { + a := []string{"10.0.0.0/8", "192.168.1.0/24"} + b := []string{"192.168.1.0/24", "172.16.0.0/12"} + got := mergeCIDRs(a, b) + want := map[string]struct{}{ + "10.0.0.0/8": {}, + "192.168.1.0/24": {}, + "172.16.0.0/12": {}, + } + require.Equal(t, len(want), len(got), "expected %d, got %d", len(want), len(got)) + for _, cidr := range got { + _, ok := want[cidr] + require.True(t, ok, "unexpected CIDR: %s", cidr) + } +} + +func TestValidateCIDR(t *testing.T) { + valid := []string{"10.0.0.0/8", "192.168.1.0/24", "2001:db8::/32"} + for _, cidr := range valid { + require.NoError(t, validateCIDR(cidr), "expected valid for %q", cidr) + } + invalid := []string{"10.0.0.1/8", "notacidr", "10.0.0.0/33"} + for _, cidr := range invalid { + require.Error(t, validateCIDR(cidr), "expected error for %q", cidr) + } +} + +func TestMergeConfigAddAndRemove(t *testing.T) { + fs := newMockFS() + cfgA := `{"nonMasqueradeCIDRs":["10.0.0.0/8"],"masqLinkLocal":false,"masqLinkLocalIPv6":true}` + cfgB := `{"nonMasqueradeCIDRs":["192.168.0.0/16"],"masqLinkLocal":true,"masqLinkLocalIPv6":false}` + fs.files[filepath.Join(*configPath, "ip-masq-a.yaml")] = mockFile{data: []byte(cfgA)} + fs.files[filepath.Join(*configPath, "ip-masq-b.yaml")] = mockFile{data: []byte(cfgB)} + fs.dirs[*configPath] = []string{"ip-masq-a.yaml", "ip-masq-b.yaml"} + + daemon := &MasqDaemon{} + // merge with both configs present + err := daemon.mergeConfig(fs) + require.NoError(t, err) + mergedPath := filepath.Join(*outputPath, "ip-masq-agent") + mergedFile, ok := fs.files[mergedPath] + require.True(t, ok, "expected merged config file at %q", mergedPath) + var got MasqConfig + require.NoError(t, yaml.Unmarshal(mergedFile.data, &got)) + require.True(t, cidrSetEqual(got.NonMasqueradeCIDRs, []string{"10.0.0.0/8", "192.168.0.0/16"}), "unexpected merged CIDRs: %v", got.NonMasqueradeCIDRs) + require.True(t, got.MasqLinkLocal, "expected MasqLinkLocal true") + require.True(t, got.MasqLinkLocalIPv6, "expected MasqLinkLocalIPv6 true") + + // remove both config files + delete(fs.files, filepath.Join(*configPath, "ip-masq-a.yaml")) + delete(fs.files, filepath.Join(*configPath, "ip-masq-b.yaml")) + fs.dirs[*configPath] = []string{} + + // merge again, should remove merged config file + err = daemon.mergeConfig(fs) + require.NoError(t, err) + _, ok = fs.files[mergedPath] + require.False(t, ok, "expected merged config file to be deleted, but it still exists") +} diff --git a/azure-ipam/Dockerfile b/azure-ipam/Dockerfile index a6e53cc7cb..50a91413fb 100644 --- a/azure-ipam/Dockerfile +++ b/azure-ipam/Dockerfile @@ -3,11 +3,11 @@ ARG DROPGZ_VERSION=v0.0.12 ARG OS_VERSION ARG OS -# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.22-cbl-mariner2.0 --format "{{.Name}}@{{.Digest}}" -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:c062e5e23f2d172a8fd590adcd171499af7005cae344a36284255f26e5ce4f8a AS go +# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.23.2-azurelinux3.0 --format "{{.Name}}@{{.Digest}}" +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:f1f0cbd464ae4cd9d41176d47f1f9fe16a6965425871f817587314e3a04576ec AS go -# skopeo inspect docker://mcr.microsoft.com/cbl-mariner/base/core:2.0 --format "{{.Name}}@{{.Digest}}" -FROM --platform=linux/${ARCH} mcr.microsoft.com/cbl-mariner/base/core@sha256:a490e0b0869dc570ae29782c2bc17643aaaad1be102aca83ce0b96e0d0d2d328 AS mariner-core +# skopeo inspect docker://mcr.microsoft.com/azurelinux/base/core:3.0 --format "{{.Name}}@{{.Digest}}" +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:b46476be0b5c9691ad20f78871819950c01433bdfad81d72c61618f4a6202b25 AS mariner-core FROM go AS azure-ipam ARG OS @@ -37,15 +37,7 @@ FROM scratch AS linux COPY --from=dropgz /go/bin/dropgz dropgz ENTRYPOINT [ "/dropgz" ] -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2019 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:7f6649348a11655e3576463fd6d55c29248f97405f8e643cab2409009339f520 AS ltsc2019 - -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2022 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:244113e50a678a25a63930780f9ccafd22e1a37aa9e3d93295e4cebf0f170a11 AS ltsc2022 - -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2025 --format "{{.Name}}@{{.Digest}}" ## 2025 isn't tagged yet -FROM mcr.microsoft.com/windows/nanoserver/insider@sha256:67e0ab7f3a79cd73be4a18bae24659c03b294aed0dbeaa624feb3810931f0bd2 AS ltsc2025 - -FROM ${OS_VERSION} AS windows +# skopeo inspect docker://mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 --format "{{.Name}}@{{.Digest}}" +FROM mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b as windows COPY --from=dropgz /go/bin/dropgz dropgz.exe ENTRYPOINT [ "/dropgz.exe" ] diff --git a/azure-ipam/go.mod b/azure-ipam/go.mod index 90b8481c5d..ca180da258 100644 --- a/azure-ipam/go.mod +++ b/azure-ipam/go.mod @@ -1,51 +1,52 @@ module github.com/Azure/azure-container-networking/azure-ipam -go 1.21 +go 1.23.2 require ( - github.com/Azure/azure-container-networking v1.5.21 - github.com/containernetworking/cni v1.2.3 - github.com/containernetworking/plugins v1.5.1 + github.com/Azure/azure-container-networking v1.7.0 + github.com/containernetworking/cni v1.3.0 + github.com/containernetworking/plugins v1.6.2 github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( code.cloudfoundry.org/clock v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/Masterminds/semver v1.5.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.3 // indirect + github.com/Microsoft/hcsshim v0.13.0 // indirect github.com/avast/retry-go/v3 v3.1.1 // indirect + github.com/avast/retry-go/v4 v4.6.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/billgraziano/dpapi v0.5.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/errdefs v0.1.0 // indirect - github.com/coreos/go-iptables v0.7.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/coreos/go-iptables v0.8.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.7 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -57,38 +58,36 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.46.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.64.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vishvananda/netns v0.0.4 // indirect + github.com/vishvananda/netns v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.62.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/time v0.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect + google.golang.org/grpc v1.73.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect - k8s.io/apimachinery v0.29.0 // indirect - k8s.io/client-go v0.29.0 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect - sigs.k8s.io/controller-runtime v0.16.3 // indirect + k8s.io/api v0.30.7 // indirect + k8s.io/apimachinery v0.30.7 // indirect + k8s.io/client-go v0.30.7 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect + sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/azure-ipam/go.sum b/azure-ipam/go.sum index dbe7f37165..dbe69daa11 100644 --- a/azure-ipam/go.sum +++ b/azure-ipam/go.sum @@ -2,77 +2,84 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c= code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo= -github.com/Azure/azure-container-networking v1.5.21 h1:/4VzLPuuzG6smnApvp26lBLITvkf0Gz9JTj6YTb9ERc= -github.com/Azure/azure-container-networking v1.5.21/go.mod h1:T3I+cXT7xCla+o1y/lrBxhmNZfdSSXIcrYXBCqy4rS0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-container-networking v1.7.0 h1:8AzGE1K7SM9Aqk0esza8MCAP3RblTviiUWvJKXFaaCc= +github.com/Azure/azure-container-networking v1.7.0/go.mod h1:DzZTuVyd1tIwTjuSUyEeDwdZf5VNrHdqO7Gy9slxgqA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 h1:j8BorDEigD8UFOSZQiSqAMOOleyQOOQPnUAwV+Ls1gA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= -github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= github.com/avast/retry-go/v3 v3.1.1 h1:49Scxf4v8PmiQ/nY0aY3p0hDueqSmc7++cBbtiDGu2g= github.com/avast/retry-go/v3 v3.1.1/go.mod h1:6cXRK369RpzFL3UQGqIUp9Q7GDrams+KsYWrfNA1/nQ= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/billgraziano/dpapi v0.5.0 h1:pcxA17vyjbDqYuxCFZbgL9tYIk2xgbRZjRaIbATwh+8= github.com/billgraziano/dpapi v0.5.0/go.mod h1:lmEcZjRfLCSbUTsRu8V2ti6Q17MvnKn3N9gQqzDdTh0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= -github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= -github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= -github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= -github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= -github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= -github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= +github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= +github.com/containernetworking/plugins v1.6.2 h1:pqP8Mq923TLyef5g97XfJ/xpDeVek4yF8A4mzy9Tc4U= +github.com/containernetworking/plugins v1.6.2/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64= +github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= +github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= +github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -81,8 +88,8 @@ github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1 github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -99,10 +106,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -111,29 +116,29 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,11 +164,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= +github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -173,21 +178,23 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= +github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -196,14 +203,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -215,18 +221,16 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -237,47 +241,36 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -286,29 +279,26 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -318,10 +308,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -342,20 +330,20 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 h1:yHNkNuLjht7iq95pO9QmbjOWCguvn8mDe3lT78nqPkw= -k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +k8s.io/api v0.30.7 h1:wB2eHI+IptVYsz5WsAQpI6+Dqi3+11wEWBqIh4fh980= +k8s.io/api v0.30.7/go.mod h1:bR0EwbmhYmJvUoeza7ZzBUmYCrVXccQ9JOdfv0BxhH0= +k8s.io/apimachinery v0.30.7 h1:CoQFxvzPFKwU1eJGN/8LgM3ZJBC3hKgvwGqRrL43uIY= +k8s.io/apimachinery v0.30.7/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.7 h1:DQRfuGWxDzxPEyyiTE/fxzAsZcj2p9sbc5671njR52w= +k8s.io/client-go v0.30.7/go.mod h1:oED9+njB91ExCc4BNPAotniB7WH1ig7CmiBx5pVA1yw= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/azure-iptables-monitor/Dockerfile b/azure-iptables-monitor/Dockerfile new file mode 100644 index 0000000000..eb6c6be056 --- /dev/null +++ b/azure-iptables-monitor/Dockerfile @@ -0,0 +1,28 @@ +ARG ARCH + +# mcr.microsoft.com/azurelinux/base/core:3.0 +FROM mcr.microsoft.com/azurelinux/base/core@sha256:9948138108a3d69f1dae62104599ac03132225c3b7a5ac57b85a214629c8567d AS mariner-core + +# mcr.microsoft.com/azurelinux/distroless/minimal:3.0 +FROM mcr.microsoft.com/azurelinux/distroless/minimal@sha256:0801b80a0927309572b9adc99bd1813bc680473175f6e8175cd4124d95dbd50c AS mariner-distroless + +# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.23.2-azurelinux3.0 --format "{{.Name}}@{{.Digest}}" +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:f1f0cbd464ae4cd9d41176d47f1f9fe16a6965425871f817587314e3a04576ec AS go + + +FROM go AS azure-iptables-monitor +ARG OS +ARG VERSION +WORKDIR /azure-iptables-monitor +COPY ./azure-iptables-monitor . +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/iptables-monitor -trimpath -ldflags "-s -w -X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" . + +FROM mariner-core AS iptables +RUN tdnf install -y iptables + +FROM mariner-distroless AS linux +COPY --from=iptables /usr/sbin/*tables* /usr/sbin/ +COPY --from=iptables /usr/lib /usr/lib +COPY --from=azure-iptables-monitor /go/bin/iptables-monitor azure-iptables-monitor + +ENTRYPOINT ["/azure-iptables-monitor"] diff --git a/azure-iptables-monitor/README.md b/azure-iptables-monitor/README.md new file mode 100644 index 0000000000..48aa3b060b --- /dev/null +++ b/azure-iptables-monitor/README.md @@ -0,0 +1,64 @@ +# azure-iptables-monitor + +`azure-iptables-monitor` is a utility for monitoring iptables rules on Kubernetes nodes and labeling a ciliumnode resource based on whether the corresponding node contains user-defined iptables rules. + +## Description + +The goal of this program is to periodically scan iptables rules across all tables (nat, mangle, filter, raw, security) and determine if any rules exist that don't match expected patterns. When unexpected rules are found, the ciliumnode resource is labeled to indicate the presence of user-defined iptables rules. + +## Usage + +Follow the steps below to build and run the program: + +1. Build the binary using `make`: + ```bash + make azure-iptables-monitor + ``` + or make an image: + ```bash + make azure-iptables-monitor-image + ``` + +2. Deploy or copy the binary to your node(s). + +3. Prepare your allowed pattern files in the input directory. Each file should be named after an iptables table (`nat`, `mangle`, `filter`, `raw`, `security`) or `global` and contain regex patterns that match expected iptables rules. You may want to mount a configmap for this purpose. + +4. Start the program with: + ```bash + ./azure-iptables-monitor --input=/etc/config/ --interval=300 + ``` + - The `--input` flag specifies the directory containing allowed regex pattern files. Default: `/etc/config/` + - The `--interval` flag specifies how often to check iptables rules in seconds. Default: `300` + - The `--events` flag enables Kubernetes event creation for rule violations. Default: `false` + - The program must be in a k8s environment and `NODE_NAME` must be a set environment variable with the current node. + +5. The program will set the `user-iptables-rules` label to `true` on the specified ciliumnode resource if unexpected rules are found, or `false` if all rules match expected patterns. Proper RBAC is required for patching (patch for ciliumnodes, create for events, get for nodes). + + +## Pattern File Format + +Each pattern file should contain one regex pattern per line: +``` +^-A INPUT -i lo -j ACCEPT$ +^-A FORWARD -j DOCKER.* +^-A POSTROUTING -s 10\.0\.0\.0/8 -j MASQUERADE$ +``` + +- `global`: Patterns that can match rules in any iptables table +- `nat`, `mangle`, `filter`, `raw`, `security`: Patterns specific to each iptables table +- Empty lines are ignored +- Each line should be a valid Go regex pattern + +## Debugging + +Logs are output to standard error. Increase verbosity with the `-v` flag: +```bash +./azure-iptables-monitor -v 3 +``` + +## Development + +To run tests at the repository level: +```bash +make test-azure-iptables-monitor +``` diff --git a/azure-iptables-monitor/go.mod b/azure-iptables-monitor/go.mod new file mode 100644 index 0000000000..04f8a416f0 --- /dev/null +++ b/azure-iptables-monitor/go.mod @@ -0,0 +1,62 @@ +module github.com/Azure/azure-container-networking/azure-iptables-monitor + +go 1.23.0 + +require ( + github.com/coreos/go-iptables v0.8.0 + github.com/stretchr/testify v1.9.0 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/component-base v0.31.3 + k8s.io/klog/v2 v2.130.1 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/azure-iptables-monitor/go.sum b/azure-iptables-monitor/go.sum new file mode 100644 index 0000000000..561bcf8e39 --- /dev/null +++ b/azure-iptables-monitor/go.sum @@ -0,0 +1,182 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= +github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/azure-iptables-monitor/iptables_monitor.go b/azure-iptables-monitor/iptables_monitor.go new file mode 100644 index 0000000000..9ccdc07acf --- /dev/null +++ b/azure-iptables-monitor/iptables_monitor.go @@ -0,0 +1,296 @@ +package main + +import ( + "bufio" + "context" + "flag" + "fmt" + "os" + "path/filepath" + "regexp" + "time" + + goiptables "github.com/coreos/go-iptables/iptables" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/logs" + "k8s.io/component-base/version/verflag" + "k8s.io/klog/v2" +) + +// Version is populated by make during build. +var version string + +var ( + configPath = flag.String("input", "/etc/config/", "Name of the directory with the allowed regex files") + checkInterval = flag.Int("interval", 300, "How often to check iptables rules (in seconds)") + sendEvents = flag.Bool("events", false, "Whether to send node events if unexpected iptables rules are detected") +) + +const label = "user-iptables-rules" + +type FileLineReader interface { + Read(filename string) ([]string, error) +} + +type OSFileLineReader struct{} + +// Read opens the file and reads each line into a new string, returning the contents as a slice of strings +// Empty lines are skipped +func (OSFileLineReader) Read(filename string) ([]string, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %w", filename, err) + } + defer file.Close() + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + // Skip empty lines + if line != "" { + lines = append(lines, line) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to scan file %s: %w", filename, err) + } + + return lines, nil +} + +// patchLabel sets a specified label to a certain value on a ciliumnode resource by patching it +// Requires proper rbac +func patchLabel(clientset dynamic.Interface, labelValue bool, nodeName string) error { + gvr := schema.GroupVersionResource{ + Group: "cilium.io", + Version: "v2", + Resource: "ciliumnodes", + } + + patch := []byte(fmt.Sprintf(`{ + "metadata": { + "labels": { + "%s": "%v" + } + } + }`, label, labelValue)) + + _, err := clientset.Resource(gvr). + Patch(context.TODO(), nodeName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("failed to patch %s with label %s=%v: %w", nodeName, label, labelValue, err) + } + return nil +} + +// createNodeEvent creates a Kubernetes event for the specified node +func createNodeEvent(clientset *kubernetes.Clientset, nodeName, reason, message, eventType string) error { + node, err := clientset.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get node UID for %s: %w", nodeName, err) + } + + now := metav1.NewTime(time.Now()) + + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%d", nodeName, now.Unix()), + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "Node", + Name: nodeName, + UID: node.UID, // required for event to show up in node describe + APIVersion: "v1", + }, + Reason: reason, + Message: message, + Type: eventType, + FirstTimestamp: now, + LastTimestamp: now, + Count: 1, + Source: corev1.EventSource{ + Component: "azure-iptables-monitor", + }, + } + _, err = clientset.CoreV1().Events("default").Create( + context.TODO(), + event, + metav1.CreateOptions{}, + ) + if err != nil { + return fmt.Errorf("failed to create event for node %s: %w", nodeName, err) + } + + klog.V(2).Infof("Created event for node %s: %s - %s", nodeName, reason, message) + return nil +} + +type IPTablesClient interface { + ListChains(table string) ([]string, error) + List(table, chain string) ([]string, error) +} + +// GetRules returns all rules as a slice of strings for the specified tableName +func GetRules(client IPTablesClient, tableName string) ([]string, error) { + var allRules []string + chains, err := client.ListChains(tableName) + if err != nil { + return nil, fmt.Errorf("failed to list chains for table %s: %w", tableName, err) + } + + for _, chain := range chains { + rules, err := client.List(tableName, chain) + if err != nil { + return nil, fmt.Errorf("failed to list rules for table %s chain %s: %w", tableName, chain, err) + } + allRules = append(allRules, rules...) + } + + return allRules, nil +} + +// hasUnexpectedRules checks if any rules in currentRules don't match any of the allowedPatterns +// Returns true if there are unexpected rules, false if all rules match expected patterns +func hasUnexpectedRules(currentRules, allowedPatterns []string) bool { + foundUnexpectedRules := false + + // compile regex patterns + compiledPatterns := make([]*regexp.Regexp, 0, len(allowedPatterns)) + for _, pattern := range allowedPatterns { + compiled, err := regexp.Compile(pattern) + if err != nil { + klog.Errorf("Error compiling regex pattern '%s': %v", pattern, err) + continue + } + compiledPatterns = append(compiledPatterns, compiled) + } + + // check each rule to see if it matches any allowed pattern + for _, rule := range currentRules { + ruleMatched := false + for _, pattern := range compiledPatterns { + if pattern.MatchString(rule) { + klog.V(3).Infof("MATCHED: '%s' -> pattern: '%s'", rule, pattern.String()) + ruleMatched = true + break + } + } + if !ruleMatched { + klog.Infof("Unexpected rule: %s", rule) + foundUnexpectedRules = true + // continue to iterate over remaining rules to identify all unexpected rules + } + } + + return foundUnexpectedRules +} + +// nodeHasUserIPTablesRules returns true if the node has iptables rules that do not match the regex +// specified in the rule's respective table: nat, mangle, filter, raw, or security +// The global file's regexes can match to a rule in any table +func nodeHasUserIPTablesRules(fileReader FileLineReader, iptablesClient IPTablesClient) bool { + tables := []string{"nat", "mangle", "filter", "raw", "security"} + + globalPatterns, err := fileReader.Read(filepath.Join(*configPath, "global")) + if err != nil { + globalPatterns = []string{} + klog.V(2).Infof("No global patterns file found, using empty patterns") + } + + userIPTablesRules := false + + for _, table := range tables { + rules, err := GetRules(iptablesClient, table) + if err != nil { + klog.Errorf("failed to get rules for table %s: %v", table, err) + continue + } + + var referencePatterns []string + referencePatterns, err = fileReader.Read(filepath.Join(*configPath, table)) + if err != nil { + referencePatterns = []string{} + klog.V(2).Infof("No reference patterns file found for table %s", table) + } + + referencePatterns = append(referencePatterns, globalPatterns...) + + klog.V(3).Infof("===== %s =====", table) + if hasUnexpectedRules(rules, referencePatterns) { + klog.Infof("Unexpected rules detected in table %s", table) + userIPTablesRules = true + } + } + + return userIPTablesRules +} + +func main() { + klog.InitFlags(nil) + flag.Parse() + + logs.InitLogs() + defer logs.FlushLogs() + + klog.Infof("Version: %s", version) + verflag.PrintAndExitIfRequested() + + config, err := rest.InClusterConfig() + if err != nil { + klog.Fatalf("failed to create in-cluster config: %v", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + klog.Fatalf("failed to create kubernetes clientset: %v", err) + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + klog.Fatalf("failed to create dynamic client: %v", err) + } + + var iptablesClient IPTablesClient + iptablesClient, err = goiptables.New() + if err != nil { + klog.Fatalf("failed to create iptables client: %v", err) + } + + // get current node name from environment variable + currentNodeName := os.Getenv("NODE_NAME") + if currentNodeName == "" { + klog.Fatalf("NODE_NAME environment variable not set") + } + + klog.Infof("Starting iptables monitor for node: %s", currentNodeName) + + var fileReader FileLineReader = OSFileLineReader{} + + for { + userIPTablesRulesFound := nodeHasUserIPTablesRules(fileReader, iptablesClient) + + // update label based on whether user iptables rules were found + err = patchLabel(dynamicClient, userIPTablesRulesFound, currentNodeName) + if err != nil { + klog.Errorf("failed to patch label: %v", err) + } else { + klog.V(2).Infof("Successfully updated label for %s: %s=%v", currentNodeName, label, userIPTablesRulesFound) + } + + if *sendEvents && userIPTablesRulesFound { + err = createNodeEvent(clientset, currentNodeName, "UnexpectedIPTablesRules", "Node has unexpected iptables rules", corev1.EventTypeWarning) + if err != nil { + klog.Errorf("failed to create event: %v", err) + } + } + + time.Sleep(time.Duration(*checkInterval) * time.Second) + } +} diff --git a/azure-iptables-monitor/iptables_monitor_test.go b/azure-iptables-monitor/iptables_monitor_test.go new file mode 100644 index 0000000000..2ebbfa27ab --- /dev/null +++ b/azure-iptables-monitor/iptables_monitor_test.go @@ -0,0 +1,224 @@ +package main + +import ( + "errors" + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +type MockFileLineReader struct { + files map[string][]string +} + +func NewMockFileLineReader() *MockFileLineReader { + return &MockFileLineReader{ + files: make(map[string][]string), + } +} + +var ErrFileNotFound = errors.New("file not found") + +func (m *MockFileLineReader) Read(filename string) ([]string, error) { + if lines, exists := m.files[filename]; exists { + return lines, nil + } + return nil, fmt.Errorf("reading file %q: %w", filename, ErrFileNotFound) +} + +// MockIPTablesClient implements IPTablesClient for testing +type MockIPTablesClient struct { + // rules is organized as: table -> chain -> []rules + rules map[string]map[string][]string +} + +// NewMockIPTablesClient creates a new mock client with empty rules +func NewMockIPTablesClient() *MockIPTablesClient { + return &MockIPTablesClient{ + rules: make(map[string]map[string][]string), + } +} + +// ListChains returns all chain names for the given table +func (m *MockIPTablesClient) ListChains(table string) ([]string, error) { + chains, exists := m.rules[table] + if !exists { + return []string{}, nil + } + + chainNames := make([]string, 0, len(chains)) + for chainName := range chains { + chainNames = append(chainNames, chainName) + } + return chainNames, nil +} + +// List returns all rules for the given table and chain +func (m *MockIPTablesClient) List(table, chain string) ([]string, error) { + tableChains, exists := m.rules[table] + if !exists { + return []string{}, nil + } + + rules, exists := tableChains[chain] + if !exists { + return []string{}, nil + } + + return rules, nil +} + +func TestHasUnexpectedRules(t *testing.T) { + testCases := []struct { + name string + currentRules []string + allowedPatterns []string + expected bool // true if we expect one of our rules to not match our allowedPatterns + }{ + { + name: "no rules, no patterns", + currentRules: []string{}, + allowedPatterns: []string{}, + expected: false, + }, + { + name: "all rules match patterns", + currentRules: []string{"ACCEPT all -- anywhere anywhere", "DROP all -- 192.168.1.0/24 anywhere"}, + allowedPatterns: []string{ + "^ACCEPT.*anywhere.*anywhere$", + "^DROP.*192\\.168\\..*", + }, + expected: false, + }, + { + name: "some rules don't match patterns", + currentRules: []string{"ACCEPT all -- anywhere anywhere", "CUSTOM_RULE something unexpected"}, + allowedPatterns: []string{ + "^ACCEPT.*anywhere.*anywhere$", + }, + expected: true, + }, + { + name: "no patterns provided, rules exist", + currentRules: []string{"ACCEPT all -- anywhere anywhere"}, + allowedPatterns: []string{}, + expected: true, + }, + { + name: "invalid regex pattern", + currentRules: []string{"ACCEPT all -- anywhere anywhere"}, + allowedPatterns: []string{ + "^ACCEPT.*anywhere.*anywhere$", + "[invalid regex", // This will fail to compile + }, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := hasUnexpectedRules(tc.currentRules, tc.allowedPatterns) + require.Equal(t, tc.expected, result, "hasUnexpectedRules result mismatch") + }) + } +} + +func TestNodeHasUserIPTablesRules(t *testing.T) { + testCases := []struct { + name string + files map[string][]string + rules map[string]map[string][]string + expected bool + description string + }{ + { + name: "no unexpected rules", + files: map[string][]string{ + filepath.Join("/etc/config/", "global"): { // nolint + "^-A.*INPUT.*lo.*", + }, + filepath.Join("/etc/config/", "nat"): { // nolint + "^-A.*MASQUERADE.*", + }, + }, + rules: map[string]map[string][]string{ + "nat": { + "POSTROUTING": { + "-A POSTROUTING -s 10.0.0.0/8 -j MASQUERADE", + }, + "INPUT": { + "-A INPUT -i lo -j ACCEPT", + }, + }, + "filter": { + "INPUT": { + "-A INPUT -i lo -j ACCEPT", + }, + }, + }, + expected: false, + description: "all rules match expected patterns", + }, + { + name: "has unexpected rules", + files: map[string][]string{ + filepath.Join("/etc/config/", "nat"): { // nolint + "^-A.*CUSTOM_CHAIN.*", + }, + }, + rules: map[string]map[string][]string{ + "nat": { + "CUSTOM_CHAIN": { + "-A CUSTOM_CHAIN -j DROP", + }, + }, + "filter": { + "CUSTOM_CHAIN": { + "-A CUSTOM_CHAIN -j DROP", // This won't match any pattern + }, + }, + }, + expected: true, + description: "unexpected custom rule found", + }, + { + name: "no pattern files exist", + files: map[string][]string{}, + rules: map[string]map[string][]string{ + "nat": { + "POSTROUTING": { + "-A POSTROUTING -j MASQUERADE", + }, + }, + }, + expected: true, + description: "no patterns means all rules are unexpected", + }, + { + name: "empty iptables rules", + files: map[string][]string{ + filepath.Join("/etc/config/", "global"): { // nolint + "^-A.*ACCEPT.*", + }, + }, + rules: map[string]map[string][]string{}, + expected: false, + description: "no rules means no unexpected rules", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fileReader := NewMockFileLineReader() + iptablesClient := NewMockIPTablesClient() + + fileReader.files = tc.files + iptablesClient.rules = tc.rules + + result := nodeHasUserIPTablesRules(fileReader, iptablesClient) + require.Equal(t, tc.expected, result, tc.description) + }) + } +} diff --git a/bpf-prog/ipv6-hp-bpf/README.md b/bpf-prog/ipv6-hp-bpf/README.md index 808a568413..c32dae1d5b 100644 --- a/bpf-prog/ipv6-hp-bpf/README.md +++ b/bpf-prog/ipv6-hp-bpf/README.md @@ -4,9 +4,17 @@ ## Description -The goal of this bpf program is to fix the issue described [here](https://github.com/cilium/cilium/issues/31326). It includes both egress and ingress TC programs. These programs are meant to replace the nftable rules since they don't work on cilium clusters. +The goal of this bpf program is to fix the issue described [here](https://github.com/cilium/cilium/issues/31326). It includes both egress and ingress TC programs. These programs are meant to replace the nftable rules since they don't work on cilium clusters. The egress bpf code converts the destination IPv6 of the packet from global unicast to link local, and ingress converts the source IPv6 from link local to global unicast. +## Dependencies + +Leverage the below make recipe to install the required libraries. + + ```bash + make ipv6-hp-bpf-lib + ``` + ## Usage Follow the steps below to compile the program and install it onto your node: @@ -20,9 +28,9 @@ Follow the steps below to compile the program and install it onto your node: 3. Remove the nftable rules for ipv6 with the following commands: ```bash - nft delete chain ip6 azureSLBProbe postrouting - nft delete chain ip6 azureSLBProbe prerouting - nft -n list table ip6 azureSLBProbe + nft delete chain ip6 azureSLBProbe postrouting + nft delete chain ip6 azureSLBProbe prerouting + nft -n list table ip6 azureSLBProbe ``` 4. Start the program with: @@ -43,7 +51,7 @@ To copy to the node you need to create a node-shell instance kubectl cp egress.o nsenter-xxxxx: ``` -Since this is for cilium clusters, cilium already creates a qdisc on eth0 of type clsact (which allows both ingress and egress filters to be attached). If cilium is not installed, you would have to create the qdisc on your own by doing the following: +Since this is for cilium clusters, cilium already creates a qdisc on eth0 of type clsact (which allows both ingress and egress filters to be attached). If cilium is not installed, you would have to create the qdisc on your own by doing the following: ```bash tc qdisc add dev eth0 clsact ``` @@ -56,4 +64,4 @@ tc filter add dev eth0 egress prio 1 bpf da obj egress.o sec classifier ## Verify the filter is attached ```bash tc filter show dev eth0 egress -``` \ No newline at end of file +``` diff --git a/bpf-prog/ipv6-hp-bpf/go.mod b/bpf-prog/ipv6-hp-bpf/go.mod index e6dd75a0d6..4de0b87fad 100644 --- a/bpf-prog/ipv6-hp-bpf/go.mod +++ b/bpf-prog/ipv6-hp-bpf/go.mod @@ -1,6 +1,8 @@ module github.com/Azure/azure-container-networking/bpf-prog/ipv6-hp-bpf -go 1.21.6 +go 1.23 + +toolchain go1.23.2 require ( github.com/cilium/ebpf v0.15.0 diff --git a/bpf-prog/ipv6-hp-bpf/linux.Dockerfile b/bpf-prog/ipv6-hp-bpf/linux.Dockerfile index 1b3ddfa863..ab19e3de35 100644 --- a/bpf-prog/ipv6-hp-bpf/linux.Dockerfile +++ b/bpf-prog/ipv6-hp-bpf/linux.Dockerfile @@ -1,4 +1,6 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.22 AS builder +ARG ARCH +# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.23.2 --format "{{.Name}}@{{.Digest}}" +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:86c5b00bbed2a6e7157052d78bf4b45c0bf26545ed6e8fd7dbad51ac9415f534 AS builder ARG VERSION ARG DEBUG ARG OS diff --git a/build/images.mk b/build/images.mk new file mode 100644 index 0000000000..76fa71682e --- /dev/null +++ b/build/images.mk @@ -0,0 +1,30 @@ +# Source images +export GO_IMG ?= mcr.microsoft.com/oss/go/microsoft/golang:1.23-azurelinux3.0 +export MARINER_CORE_IMG ?= mcr.microsoft.com/azurelinux/base/core:3.0 +export MARINER_DISTROLESS_IMG ?= mcr.microsoft.com/azurelinux/distroless/minimal:3.0 +export WIN_HPC_IMG ?= mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 + + +# Pinned SHA images +export GO_PIN ?= $(shell skopeo inspect docker://${GO_IMG} --format "{{.Name}}@{{.Digest}}") +export MARINER_CORE_PIN ?= $(shell skopeo inspect docker://${MARINER_CORE_IMG} --format "{{.Name}}@{{.Digest}}") +export MARINER_DISTROLESS_PIN ?= $(shell skopeo inspect docker://${MARINER_DISTROLESS_IMG} --format "{{.Name}}@{{.Digest}}") +export WIN_HPC_PIN ?= $(shell skopeo inspect --override-os windows docker://${WIN_HPC_IMG} --format "{{.Name}}@{{.Digest}}") + +export RENDER_MSG ?= "!! AUTOGENERATED - DO NOT EDIT !!" +export SRC ?= ${PATH}/Dockerfile.tmpl +export DEST ?= ${PATH}/Dockerfile + +export PIPE_PATH ?= .pipelines/build/dockerfiles +export SRC_PIPE ?= ${PIPE_PATH}/${PATH}.Dockerfile.tmpl +export DEST_PIPE ?= ${PIPE_PATH}/${PATH}.Dockerfile + +print: + @echo ${GO_PIN} + @echo ${MARINER_CORE_PIN} + @echo ${MARINER_DISTROLESS_PIN} + @echo ${WIN_HPC_PIN} + +render: + build/tools/bin/renderkit -f ${SRC} --ds env:// > ${DEST} + build/tools/bin/renderkit -f ${SRC_PIPE} --ds env:// > ${DEST_PIPE} diff --git a/build/tools/go.mod b/build/tools/go.mod index 6648c280b4..b7e2378b15 100644 --- a/build/tools/go.mod +++ b/build/tools/go.mod @@ -1,66 +1,79 @@ module github.com/Azure/azure-container-networking/build/tools -go 1.21 +go 1.23.2 require ( - github.com/AlekSi/gocov-xml v1.1.0 - github.com/axw/gocov v1.1.0 + github.com/AlekSi/gocov-xml v1.2.0 + github.com/axw/gocov v1.2.1 github.com/golang/mock v1.6.0 - github.com/golangci/golangci-lint v1.59.1 + github.com/golangci/golangci-lint v1.63.4 github.com/jstemmer/go-junit-report v1.0.0 + github.com/orellazri/renderkit v0.6.3 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.34.2 - mvdan.cc/gofumpt v0.6.0 - sigs.k8s.io/controller-tools v0.13.0 + google.golang.org/protobuf v1.36.5 + mvdan.cc/gofumpt v0.7.0 + sigs.k8s.io/controller-tools v0.16.3 ) require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/4meepo/tagalign v1.3.4 // indirect - github.com/Abirdcfly/dupword v0.0.14 // indirect - github.com/Antonboom/errname v0.1.13 // indirect - github.com/Antonboom/nilnil v0.1.9 // indirect - github.com/Antonboom/testifylint v1.3.1 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect - github.com/Crocmagnon/fatcontext v0.2.2 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/4meepo/tagalign v1.4.1 // indirect + github.com/Abirdcfly/dupword v0.1.3 // indirect + github.com/Antonboom/errname v1.0.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect + github.com/CloudyKit/jet/v6 v6.2.0 // indirect + github.com/Crocmagnon/fatcontext v0.5.3 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect - github.com/alecthomas/go-check-sumtype v0.1.4 // indirect - github.com/alexkohler/nakedret/v2 v2.0.4 // indirect + github.com/a8m/envsubst v1.4.2 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.1 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect + github.com/aymerick/raymond v2.0.2+incompatible // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.2.1 // indirect - github.com/breml/bidichk v0.2.7 // indirect - github.com/breml/errchkjson v0.3.6 // indirect - github.com/butuzov/ireturn v0.3.0 // indirect - github.com/butuzov/mirror v1.2.0 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/breml/bidichk v0.3.2 // indirect + github.com/breml/errchkjson v0.4.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/cbroglie/mustache v1.4.0 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.1.2 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.13.4 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/ettle/strcase v0.2.0 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.6 // indirect - github.com/go-critic/go-critic v0.11.4 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/ghostiam/protogetter v0.3.8 // indirect + github.com/go-critic/go-critic v0.11.5 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -68,145 +81,164 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect - github.com/gobuffalo/flect v1.0.2 // indirect + github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/gofmt v0.0.0-20231019111953-be8c47862aaa // indirect + github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 // indirect github.com/golangci/misspell v0.6.0 // indirect - github.com/golangci/modinfo v0.3.4 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/golangci/revgrep v0.5.3 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect + github.com/goreleaser/fileglob v1.3.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/go-envparse v0.1.0 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect - github.com/jjti/go-spancheck v0.6.1 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/julz/importas v0.1.0 // indirect + github.com/julz/importas v0.2.0 // indirect github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect - github.com/kisielk/errcheck v1.7.0 // indirect + github.com/kisielk/errcheck v1.8.0 // indirect github.com/kkHAIKE/contextcheck v1.1.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/lasiar/canonicalheader v1.1.1 // indirect - github.com/ldez/gomoddirectives v0.2.4 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.3.1 // indirect + github.com/ldez/gomoddirectives v0.6.0 // indirect + github.com/ldez/grignotin v0.7.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect - github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect + github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/mgechev/revive v1.3.7 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mgechev/revive v1.5.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/moricho/tparallel v0.3.1 // indirect + github.com/moricho/tparallel v0.3.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nikolalohinski/gonja/v2 v2.3.3 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.16.2 // indirect + github.com/nunnatsa/ginkgolinter v0.18.4 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/polyfloyd/go-errorlint v1.5.2 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/polyfloyd/go-errorlint v1.7.0 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/rivo/uniseg v0.4.4 // indirect - github.com/ryancurrah/gomodguard v1.3.2 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.26.0 // indirect - github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect + github.com/securego/gosec/v2 v2.21.4 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/tenv v1.7.1 // indirect - github.com/sonatard/noctx v0.0.2 // indirect + github.com/sivchari/tenv v1.12.1 // indirect + github.com/sonatard/noctx v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.1 // indirect + github.com/spf13/viper v1.19.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.9.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.16 // indirect - github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect + github.com/tdakkota/asciicheck v0.3.0 // indirect + github.com/tetafro/godot v1.4.20 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/timonwong/loggercheck v0.10.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.1.1 // indirect - github.com/uudashr/gocognit v1.1.2 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/urfave/cli/v2 v2.27.5 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.12.2 // indirect - go-simpler.org/sloglint v0.7.1 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect + go-simpler.org/musttag v0.13.0 // indirect + go-simpler.org/sloglint v0.7.2 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect - golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.22.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect + golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/tools v0.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.4.7 // indirect - k8s.io/api v0.29.0 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/apimachinery v0.29.0 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect - mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect + honnef.co/go/tools v0.5.1 // indirect + k8s.io/api v0.31.1 // indirect + k8s.io/apiextensions-apiserver v0.31.1 // indirect + k8s.io/apimachinery v0.31.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + mvdan.cc/unparam v0.0.0-20240917084806-57a3b4290ba3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/build/tools/go.sum b/build/tools/go.sum index 952f971bd6..c2889ad58f 100644 --- a/build/tools/go.sum +++ b/build/tools/go.sum @@ -2,91 +2,120 @@ 4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= -github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= -github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= -github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= -github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= -github.com/AlekSi/gocov-xml v1.1.0 h1:iElWGi7s/MuL8/d8WDtI2fOAsN3ap9x8nK5RrAhaDng= -github.com/AlekSi/gocov-xml v1.1.0/go.mod h1:g1dRVOCHjKkMtlPfW6BokJ/qxoeZ1uPNAK7A/ii3CUo= -github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= -github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= -github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= -github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= -github.com/Antonboom/testifylint v1.3.1 h1:Uam4q1Q+2b6H7gvk9RQFw6jyVDdpzIirFOOrbs14eG4= -github.com/Antonboom/testifylint v1.3.1/go.mod h1:NV0hTlteCkViPW9mSR4wEMfwp+Hs1T3dY60bkvSfhpM= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/Crocmagnon/fatcontext v0.2.2 h1:OrFlsDdOj9hW/oBEJBNSuH7QWf+E9WPVHw+x52bXVbk= -github.com/Crocmagnon/fatcontext v0.2.2/go.mod h1:WSn/c/+MMNiD8Pri0ahRj0o9jVpeowzavOQplBJw6u0= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4= +github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/AlekSi/gocov-xml v1.2.0 h1:TGx+qVSgBn655Ejv8b2mgPIGvIa8ZxGFSPXuWf0J3Vw= +github.com/AlekSi/gocov-xml v1.2.0/go.mod h1:g1dRVOCHjKkMtlPfW6BokJ/qxoeZ1uPNAK7A/ii3CUo= +github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= +github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oME= +github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= +github.com/Crocmagnon/fatcontext v0.5.3 h1:zCh/wjc9oyeF+Gmp+V60wetm8ph2tlsxocgg/J0hOps= +github.com/Crocmagnon/fatcontext v0.5.3/go.mod h1:XoCQYY1J+XTfyv74qLXvNw4xFunr3L1wkopIIKG7wGM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= -github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= -github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg= +github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= +github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.1 h1:7cYuJewpy9jFNMEA72Q1+3Nm3zKHzg+Q28D5f2bBFUA= +github.com/alingse/nilnesserr v0.1.1/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/axw/gocov v1.1.0 h1:y5U1krExoJDlb/kNtzxyZQmNRprFOFCutWbNjcQvmVM= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= github.com/axw/gocov v1.1.0/go.mod h1:H9G4tivgdN3pYSSVrTFBr6kGDCmAkgbJhtxFzAvgcdw= +github.com/axw/gocov v1.2.1 h1:bqtQDBC2tQWcPzTYIVxK0EDCfNRLwsk4NZ0+GB4hX8Q= +github.com/axw/gocov v1.2.1/go.mod h1:l11/vZBBKfQEE+42jF47myjDrRZHM+hR+XgGjI6FopU= +github.com/aymerick/raymond v2.0.2+incompatible h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0= +github.com/aymerick/raymond v2.0.2+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= -github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= -github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= -github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= -github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= -github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= -github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= -github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= -github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= -github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= -github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= +github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= +github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= +github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= +github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= +github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= +github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/cbroglie/mustache v1.4.0 h1:Azg0dVhxTml5me+7PsZ7WPrQq1Gkf3WApcHMjMprYoU= +github.com/cbroglie/mustache v1.4.0/go.mod h1:SS1FTIghy0sjse4DUVGV1k/40B1qE1XkD9DtDsHo9iM= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= -github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= -github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= @@ -95,16 +124,18 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= -github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= -github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= -github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/ghostiam/protogetter v0.3.8 h1:LYcXbYvybUyTIxN2Mj9h6rHrDZBDwZloPoKctWrFyJY= +github.com/ghostiam/protogetter v0.3.8/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/go-critic/go-critic v0.11.5 h1:TkDTOn5v7EEngMxu8KbuFqFR43USaaH8XRJLz1jhVYA= +github.com/go-critic/go-critic v0.11.5/go.mod h1:wu6U7ny9PiaHaZHcvMDmdysMqvDem162Rh3zWTrqk8M= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= @@ -126,30 +157,30 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= -github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/gofmt v0.0.0-20231019111953-be8c47862aaa h1:L0Zq43Px2HrLroRKEgfCsQLMJUkjskJBB1kd1Zjcvvc= -github.com/golangci/gofmt v0.0.0-20231019111953-be8c47862aaa/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= -github.com/golangci/golangci-lint v1.59.1 h1:CRRLu1JbhK5avLABFJ/OHVSQ0Ie5c4ulsOId1h3TTks= -github.com/golangci/golangci-lint v1.59.1/go.mod h1:jX5Oif4C7P0j9++YB2MMJmoNrb01NJ8ITqKWNLewThg= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 h1:t5wybL6RtO83VwoMOb7U/Peqe3gGKQlPIC66wXmnkvM= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9/go.mod h1:Ag3L7sh7E28qAp/5xnpMMTuGYqxLZoSaEHZDkZB1RgU= +github.com/golangci/golangci-lint v1.63.4 h1:bJQFQ3hSfUto597dkL7ipDzOxsGEpiWdLiZ359OWOBI= +github.com/golangci/golangci-lint v1.63.4/go.mod h1:Hx0B7Lg5/NXbaOHem8+KU+ZUIzMI6zNj/7tFwdnn10I= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= -github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= -github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= @@ -167,10 +198,14 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= +github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= @@ -181,39 +216,49 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3 github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= +github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jjti/go-spancheck v0.6.1 h1:ZK/wE5Kyi1VX3PJpUO2oEgeoI4FWOUm7Shb2Gbv5obI= -github.com/jjti/go-spancheck v0.6.1/go.mod h1:vF1QkOO159prdo6mHRxak2CpzDpHAfKiPUDP/NeRnX8= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= -github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg= +github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -224,16 +269,20 @@ github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCT github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I= -github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= -github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= -github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.3.1 h1:90yWWoAKMFHeovTK8uzBms9Ppp8Du/xQ20DRO26Ymrw= +github.com/ldez/exptostd v0.3.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.0 h1:Jyf1ZdTeiIB4dd+2n4qw+g4aI9IJ6JyfOZ8BityWvnA= +github.com/ldez/gomoddirectives v0.6.0/go.mod h1:TuwOGYoPAoENDWQpe8DMqEm5nIfjrxZXmxX/CExWyZ4= +github.com/ldez/grignotin v0.7.0 h1:vh0dI32WhHaq6LLPZ38g7WxXuZ1+RzyrJ7iPG9JMa8c= +github.com/ldez/grignotin v0.7.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -242,8 +291,8 @@ github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= -github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0 h1:Ny7cm4KSWceJLYyI1sm+aFIVDWSGXLcOJ0O0UaS5wdU= +github.com/matoous/godox v0.0.0-20240105082147-c5b5e0e7c0c0/go.mod h1:jgE/3fUXiTurkdHOLT5WEkThTSuE7yxHv5iWPa80afs= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -252,41 +301,49 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= -github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mgechev/revive v1.5.1 h1:hE+QPeq0/wIzJwOphdVyUJ82njdd8Khp4fUIHGZHW3M= +github.com/mgechev/revive v1.5.1/go.mod h1:lC9AhkJIBs5zwx8wkudyHrU+IJkrEKmpCmGMnIJPk4o= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= -github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nikolalohinski/gonja/v2 v2.3.3 h1:5cTcmz0i/DwJl67US8Rvnb4OkBXB5V5OWd5IIAPPkXw= +github.com/nikolalohinski/gonja/v2 v2.3.3/go.mod h1:8KC3RlefxnOaY5P4rH5erdwV0/owS83U615cSnDLYFs= github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk= -github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/nunnatsa/ginkgolinter v0.18.4 h1:zmX4KUR+6fk/vhUFt8DOP6KwznekhkmVSzzVJve2vyM= +github.com/nunnatsa/ginkgolinter v0.18.4/go.mod h1:AMEane4QQ6JwFz5GgjI5xLUM9S/CylO+UyM97fN2iBI= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= -github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/orellazri/renderkit v0.6.3 h1:2FADTZPgPHB7gobSH3tHMD24Zt+O7CW3Oq09yuvSEx0= +github.com/orellazri/renderkit v0.6.3/go.mod h1:kwgCCgVNwv4Z2WaremVrxDyrMPmAaxVrwBRLb6VOoig= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -294,25 +351,27 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.5.2 h1:SJhVik3Umsjh7mte1vE0fVZ5T1gznasQG3PV7U5xFdA= -github.com/polyfloyd/go-errorlint v1.5.2/go.mod h1:sH1QC1pxxi0fFecsVIzBmxtrgd9IF/SkJpA6wqyKAJs= +github.com/polyfloyd/go-errorlint v1.7.0 h1:Zp6lzCK4hpBDj8y8a237YK4EPrMXQWvOe3nGoH4pFrU= +github.com/polyfloyd/go-errorlint v1.7.0/go.mod h1:dGWKu85mGHnegQ2SWpEybFityCg3j7ZbwsVUxAOk9gY= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= -github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -321,60 +380,65 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.2 h1:CuG27ulzEB1Gu5Dk5gP8PFxSOZ3ptSdP5iI/3IXxM18= -github.com/ryancurrah/gomodguard v1.3.2/go.mod h1:LqdemiFomEjcxOqirbQCb3JFvSxH2JUYMerTFd3sF2o= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.26.0 h1:LONR2hNVKxRmzIrZR0PhSF3mhCAzvnr+DcUiHgREfXE= -github.com/sashamelentyev/usestdlibvars v1.26.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 h1:rnO6Zp1YMQwv8AyxzuwsVohljJgp4L0ZqiCgtACsPsc= -github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9/go.mod h1:dg7lPlu/xK/Ut9SedURCoZbVCR4yC7fM65DtH9/CDHs= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.21.4 h1:Le8MSj0PDmOnHJgUATjD96PaXRvCpKC+DGJvwyy0Mlk= +github.com/securego/gosec/v2 v2.21.4/go.mod h1:Jtb/MwRQfRxCXyCm1rfM1BEiiiTfUOdyzzAhlr6lUTA= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= -github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= +github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.1 h1:rmuU42rScKWlhhJDyXZRKJQHXFX02chSVW1IvkPGiVM= -github.com/spf13/viper v1.18.1/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -388,36 +452,42 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tdakkota/asciicheck v0.3.0 h1:LqDGgZdholxZMaJgpM6b0U9CFIjDCbFdUF00bDnBKOQ= +github.com/tdakkota/asciicheck v0.3.0/go.mod h1:KoJKXuX/Z/lt6XzLo8WMBfQGzak0SrAKZlvRr4tg8Ac= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= -github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs= -github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E= +github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= +github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= -github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= -github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= -github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.0 h1:zwPch0fs9tdh9BmL5kcgSpvnObV+yHjO4JjVBl8IA10= +github.com/uudashr/iface v1.3.0/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= @@ -435,41 +505,45 @@ gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs= -go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= -go-simpler.org/sloglint v0.7.1 h1:qlGLiqHbN5islOxjeLXoPtUdZXb669RW+BDQ+xOSNoU= -go-simpler.org/sloglint v0.7.1/go.mod h1:OlaVDRh/FKKd4X4sIMbsz8st97vomydceL146Fthh/c= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= +go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= +go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= +go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -480,12 +554,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -493,14 +570,15 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -509,23 +587,27 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -534,13 +616,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -549,34 +632,33 @@ golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -593,24 +675,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= -honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= -mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= -mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= -mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= -sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= -sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA= +honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= +honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= +mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= +mvdan.cc/unparam v0.0.0-20240917084806-57a3b4290ba3 h1:YkmTN1n5U60NM02j7TCSWRlW3fqNiuXe/eVXf0dLFN8= +mvdan.cc/unparam v0.0.0-20240917084806-57a3b4290ba3/go.mod h1:z5yboO1sP1Q9pcfvS597TpfbNXQjphDlkCJHzt13ybc= +sigs.k8s.io/controller-tools v0.16.3 h1:z48C5/d4jCVQQvtiSBL5MYyZ3EO2eFIOXrIKMgHVhFY= +sigs.k8s.io/controller-tools v0.16.3/go.mod h1:AEj6k+w1kYpLZv2einOH3mj52ips4W/6FUjnB5tkJGs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/build/tools/tools.go b/build/tools/tools.go index 1c8e2a57a0..b6279e4381 100644 --- a/build/tools/tools.go +++ b/build/tools/tools.go @@ -6,6 +6,7 @@ import ( _ "github.com/golang/mock/mockgen" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/jstemmer/go-junit-report" + _ "github.com/orellazri/renderkit" _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" _ "google.golang.org/protobuf/cmd/protoc-gen-go" _ "mvdan.cc/gofumpt" diff --git a/cni/Dockerfile b/cni/Dockerfile index 97cbdc77ee..b35b2ff9f6 100644 --- a/cni/Dockerfile +++ b/cni/Dockerfile @@ -1,21 +1,25 @@ +# !! AUTOGENERATED - DO NOT EDIT !! +# SOURCE: cni/Dockerfile.tmpl ARG ARCH ARG DROPGZ_VERSION=v0.0.12 ARG OS_VERSION ARG OS -# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.22-cbl-mariner2.0 --format "{{.Name}}@{{.Digest}}" -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:c062e5e23f2d172a8fd590adcd171499af7005cae344a36284255f26e5ce4f8a AS go +# mcr.microsoft.com/oss/go/microsoft/golang:1.23-azurelinux3.0 +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:7d33a8015c48c06e97ecd5139181594b550d4d4d6a9d7fb19083192541213753 AS go -# skopeo inspect docker://mcr.microsoft.com/cbl-mariner/base/core:2.0 --format "{{.Name}}@{{.Digest}}" -FROM --platform=linux/${ARCH} mcr.microsoft.com/cbl-mariner/base/core@sha256:a490e0b0869dc570ae29782c2bc17643aaaad1be102aca83ce0b96e0d0d2d328 AS mariner-core +# mcr.microsoft.com/azurelinux/base/core:3.0 +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:c09a4e011a092a45b5c46ac5633253eb1e1106df028912b89cbe225d9061ef0b AS mariner-core FROM go AS azure-vnet ARG OS ARG VERSION +ARG CNI_AI_PATH +ARG CNI_AI_ID WORKDIR /azure-container-networking COPY . . RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/network/plugin/main.go -RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-telemetry -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/telemetry/service/telemetrymain.go +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-telemetry -trimpath -ldflags "-X main.version="$VERSION" -X "$CNI_AI_PATH"="$CNI_AI_ID"" -gcflags="-dwarflocationlists=true" cni/telemetry/service/telemetrymain.go RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-ipam -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/ipam/plugin/main.go RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-stateless -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/network/stateless/main.go @@ -25,7 +29,7 @@ WORKDIR /payload COPY --from=azure-vnet /go/bin/* /payload/ COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS.conflist /payload/azure.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift.conflist /payload/azure-swift.conflist -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-multitenancy-transparent-vlan.conflist /payload/azure-multitenancy-transparent-vlan.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-linux-multitenancy-transparent-vlan.conflist /payload/azure-multitenancy-transparent-vlan.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay.conflist /payload/azure-swift-overlay.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay-dualstack.conflist /payload/azure-swift-overlay-dualstack.conflist COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-multitenancy.conflist /payload/azure-multitenancy.conflist @@ -49,15 +53,9 @@ FROM scratch AS linux COPY --from=dropgz /go/bin/dropgz dropgz ENTRYPOINT [ "/dropgz" ] -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2019 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:7f6649348a11655e3576463fd6d55c29248f97405f8e643cab2409009339f520 AS ltsc2019 +# mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 +FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b as hpc -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2022 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:244113e50a678a25a63930780f9ccafd22e1a37aa9e3d93295e4cebf0f170a11 AS ltsc2022 - -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2025 --format "{{.Name}}@{{.Digest}}" ## 2025 isn't tagged yet -FROM mcr.microsoft.com/windows/nanoserver/insider@sha256:67e0ab7f3a79cd73be4a18bae24659c03b294aed0dbeaa624feb3810931f0bd2 AS ltsc2025 - -FROM ${OS_VERSION} as windows +FROM hpc as windows COPY --from=dropgz /go/bin/dropgz dropgz.exe ENTRYPOINT [ "/dropgz.exe" ] diff --git a/cni/Dockerfile.tmpl b/cni/Dockerfile.tmpl new file mode 100644 index 0000000000..c0e04b39dd --- /dev/null +++ b/cni/Dockerfile.tmpl @@ -0,0 +1,61 @@ +# {{.RENDER_MSG}} +# SOURCE: {{.SRC}} +ARG ARCH +ARG DROPGZ_VERSION=v0.0.12 +ARG OS_VERSION +ARG OS + +# {{.GO_IMG}} +FROM --platform=linux/${ARCH} {{.GO_PIN}} AS go + +# {{.MARINER_CORE_IMG}} +FROM --platform=linux/${ARCH} {{.MARINER_CORE_PIN}} AS mariner-core + +FROM go AS azure-vnet +ARG OS +ARG VERSION +ARG CNI_AI_PATH +ARG CNI_AI_ID +WORKDIR /azure-container-networking +COPY . . +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/network/plugin/main.go +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-telemetry -trimpath -ldflags "-X main.version="$VERSION" -X "$CNI_AI_PATH"="$CNI_AI_ID"" -gcflags="-dwarflocationlists=true" cni/telemetry/service/telemetrymain.go +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-ipam -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/ipam/plugin/main.go +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-vnet-stateless -trimpath -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/network/stateless/main.go + +FROM mariner-core AS compressor +ARG OS +WORKDIR /payload +COPY --from=azure-vnet /go/bin/* /payload/ +COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS.conflist /payload/azure.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift.conflist /payload/azure-swift.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-linux-multitenancy-transparent-vlan.conflist /payload/azure-multitenancy-transparent-vlan.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay.conflist /payload/azure-swift-overlay.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay-dualstack.conflist /payload/azure-swift-overlay-dualstack.conflist +COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-multitenancy.conflist /payload/azure-multitenancy.conflist +COPY --from=azure-vnet /azure-container-networking/telemetry/azure-vnet-telemetry.config /payload/azure-vnet-telemetry.config +RUN cd /payload && sha256sum * > sum.txt +RUN gzip --verbose --best --recursive /payload && for f in /payload/*.gz; do mv -- "$f" "${f%%.gz}"; done + +FROM go AS dropgz +ARG DROPGZ_VERSION +ARG OS +ARG VERSION +RUN go mod download github.com/azure/azure-container-networking/dropgz@$DROPGZ_VERSION +WORKDIR /go/pkg/mod/github.com/azure/azure-container-networking/dropgz\@$DROPGZ_VERSION +COPY --from=compressor /payload/* pkg/embed/fs/ +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/dropgz -trimpath -ldflags "-X github.com/Azure/azure-container-networking/dropgz/internal/buildinfo.Version="$VERSION"" -gcflags="-dwarflocationlists=true" main.go + +FROM scratch AS bins +COPY --from=azure-vnet /go/bin/* / + +FROM scratch AS linux +COPY --from=dropgz /go/bin/dropgz dropgz +ENTRYPOINT [ "/dropgz" ] + +# {{.WIN_HPC_IMG}} +FROM --platform=windows/${ARCH} {{.WIN_HPC_PIN}} as hpc + +FROM hpc as windows +COPY --from=dropgz /go/bin/dropgz dropgz.exe +ENTRYPOINT [ "/dropgz.exe" ] diff --git a/cni/azure-windows-multitenancy-transparent-vlan.conflist b/cni/azure-windows-multitenancy-transparent-vlan.conflist deleted file mode 100644 index 21031acab3..0000000000 --- a/cni/azure-windows-multitenancy-transparent-vlan.conflist +++ /dev/null @@ -1,52 +0,0 @@ -{ - "cniVersion": "0.3.0", - "name": "azure", - "plugins": [ - { - "type": "azure-vnet", - "mode": "transparent-vlan", - "bridge": "azure0", - "multiTenancy":true, - "enableSnatOnHost":true, - "enableExactMatchForPodName": true, - "capabilities": { - "portMappings": true - }, - "ipam": { - "type": "azure-cns" - }, - "dns": { - "Nameservers": [ - "10.0.0.10", - "168.63.129.16" - ], - "Search": [ - "svc.cluster.local" - ] - }, - "AdditionalArgs": [ - { - "Name": "EndpointPolicy", - "Value": { - "Type": "OutBoundNAT", - "ExceptionList": [ - "10.240.0.0/16", - "10.0.0.0/8" - ] - } - }, - { - "Name": "EndpointPolicy", - "Value": { - "Type": "ROUTE", - "DestinationPrefix": "10.0.0.0/8", - "NeedEncap": true - } - } - ], - "windowsSettings": { - "hnsTimeoutDurationInSeconds" : 120 - } - } - ] -} \ No newline at end of file diff --git a/cni/azure-windows-multitenancy.conflist b/cni/azure-windows-multitenancy.conflist index 008fa04c18..2c4c4b0fda 100644 --- a/cni/azure-windows-multitenancy.conflist +++ b/cni/azure-windows-multitenancy.conflist @@ -4,8 +4,6 @@ "plugins": [ { "type": "azure-vnet", - "mode": "bridge", - "bridge": "azure0", "multiTenancy":true, "enableSnatOnHost":true, "enableExactMatchForPodName": true, diff --git a/cni/azure-windows-swift-overlay-dualstack.conflist b/cni/azure-windows-swift-overlay-dualstack.conflist index cd5003c8d1..88c8cfc483 100644 --- a/cni/azure-windows-swift-overlay-dualstack.conflist +++ b/cni/azure-windows-swift-overlay-dualstack.conflist @@ -5,8 +5,6 @@ "plugins": [ { "type": "azure-vnet", - "mode": "bridge", - "bridge": "azure0", "capabilities": { "portMappings": true, "dns": true diff --git a/cni/azure-windows-swift-overlay.conflist b/cni/azure-windows-swift-overlay.conflist index cd5003c8d1..88c8cfc483 100644 --- a/cni/azure-windows-swift-overlay.conflist +++ b/cni/azure-windows-swift-overlay.conflist @@ -5,8 +5,6 @@ "plugins": [ { "type": "azure-vnet", - "mode": "bridge", - "bridge": "azure0", "capabilities": { "portMappings": true, "dns": true diff --git a/cni/azure-windows-swift.conflist b/cni/azure-windows-swift.conflist index 8f9fafee37..f386859381 100644 --- a/cni/azure-windows-swift.conflist +++ b/cni/azure-windows-swift.conflist @@ -5,8 +5,6 @@ "plugins": [ { "type": "azure-vnet", - "mode": "bridge", - "bridge": "azure0", "executionMode": "v4swift", "capabilities": { "portMappings": true, diff --git a/cni/azure-windows.conflist b/cni/azure-windows.conflist index 201871e68b..5f275dfc84 100644 --- a/cni/azure-windows.conflist +++ b/cni/azure-windows.conflist @@ -5,8 +5,6 @@ "plugins": [ { "type": "azure-vnet", - "mode": "bridge", - "bridge": "azure0", "capabilities": { "portMappings": true, "dns": true @@ -45,4 +43,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/cni/log/logger.go b/cni/log/logger.go index 1f47a4ec99..cf66b15a6d 100644 --- a/cni/log/logger.go +++ b/cni/log/logger.go @@ -17,7 +17,7 @@ var ( const ( maxLogFileSizeInMb = 5 maxLogFileCount = 8 - etwCNIEventName = "Azure-CNI" + etwCNIEventName = "AzureCNI" loggingLevel = zapcore.DebugLevel ) diff --git a/cni/log/logger_error.go b/cni/log/logger_error.go new file mode 100644 index 0000000000..4186cb1985 --- /dev/null +++ b/cni/log/logger_error.go @@ -0,0 +1,45 @@ +package log + +import ( + "errors" + "fmt" + "io" +) + +type ErrorWithoutStackTrace struct { + error +} + +func (l *ErrorWithoutStackTrace) Error() string { + if l.error == nil { + return "" + } + return l.error.Error() +} + +func (l *ErrorWithoutStackTrace) Format(s fmt.State, verb rune) { + // if the error is nil, nothing should happen + if l.error == nil { + return + } + v := verb + // replace uses of %v with %s + if v == 'v' { + v = 's' + } + // if the error implements formatter (which it should) + var formatter fmt.Formatter + if errors.As(l.error, &formatter) { + formatter.Format(s, v) + } else { + _, _ = io.WriteString(s, l.error.Error()) + } +} + +func (l *ErrorWithoutStackTrace) Unwrap() error { + return l.error +} + +func NewErrorWithoutStackTrace(err error) *ErrorWithoutStackTrace { + return &ErrorWithoutStackTrace{err} +} diff --git a/cni/log/logger_test.go b/cni/log/logger_test.go new file mode 100644 index 0000000000..db422aa415 --- /dev/null +++ b/cni/log/logger_test.go @@ -0,0 +1,55 @@ +package log + +import ( + "bytes" + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var errInternal = errors.New("internal error") + +func TestLoggerError(t *testing.T) { + require := require.New(t) //nolint:gocritic + + var buf bytes.Buffer + + // Create a zap core that writes logs to the buffer + core := zapcore.NewCore( + zapcore.NewJSONEncoder(zapcore.EncoderConfig{}), + zapcore.AddSync(&buf), + zapcore.DebugLevel, + ) + logger := zap.New(core) + + wrappedError := errors.Wrap(errInternal, "wrapped message") + errorNoStack := NewErrorWithoutStackTrace(wrappedError) + + logger.Info("Error", zap.Error(wrappedError)) + require.Contains(buf.String(), "errorVerbose") + fmt.Println(buf.String()) + buf.Reset() + + // Error verbose field should be omitted from the error without stack trace error + logger.Info("ErrorWithoutStackTrace", zap.Error(errorNoStack)) + require.NotContains(buf.String(), "errorVerbose") + require.Contains(buf.String(), "wrapped message") + require.Contains(buf.String(), "internal error") + fmt.Println(buf.String()) + buf.Reset() + + // Even if the embedded error is nil, the error should still display an empty string and not panic + logger.Info("ErrorWithoutStackTrace nil internal error", zap.Error(NewErrorWithoutStackTrace(nil))) + require.Contains(buf.String(), "\"error\":\"\"") + fmt.Println(buf.String()) + buf.Reset() + + // should be able to unwrap the error without a stack trace + require.ErrorIs(errorNoStack, errInternal) + // Even if the embedded error is nil, should function properly + require.NotErrorIs(&ErrorWithoutStackTrace{error: nil}, errorNoStack) +} diff --git a/cni/log/logger_windows.go b/cni/log/logger_windows.go index 2ac46204f0..7b7586f77f 100644 --- a/cni/log/logger_windows.go +++ b/cni/log/logger_windows.go @@ -26,7 +26,7 @@ func etwCore(loggingLevel zapcore.Level) (zapcore.Core, error) { encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder jsonEncoder := zapcore.NewJSONEncoder(encoderConfig) - etwcore, err := zapetw.NewETWCore(etwCNIEventName, jsonEncoder, loggingLevel) + etwcore, _, err := zapetw.New("ACN-Monitoring", etwCNIEventName, jsonEncoder, loggingLevel) if err != nil { return nil, errors.Wrap(err, "failed to create ETW core") } diff --git a/cni/network/common.go b/cni/network/common.go index e76d864f4a..260579326a 100644 --- a/cni/network/common.go +++ b/cni/network/common.go @@ -4,26 +4,14 @@ import ( "encoding/json" "io" "os" - "reflect" "github.com/Azure/azure-container-networking/cni" - "github.com/Azure/azure-container-networking/telemetry" "github.com/containernetworking/cni/pkg/skel" cniTypes "github.com/containernetworking/cni/pkg/types" "github.com/pkg/errors" "go.uber.org/zap" ) -// send error report to hostnetagent if CNI encounters any error. -func ReportPluginError(reportManager *telemetry.ReportManager, tb *telemetry.TelemetryBuffer, err error) { - logger.Error("Report plugin error") - reflect.ValueOf(reportManager.Report).Elem().FieldByName("ErrorMessage").SetString(err.Error()) - - if err := reportManager.SendReport(tb); err != nil { - logger.Error("SendReport failed", zap.Error(err)) - } -} - func validateConfig(jsonBytes []byte) error { var conf struct { Name string `json:"name"` diff --git a/cni/network/invoker.go b/cni/network/invoker.go index 907aeca5d7..9e766d020c 100644 --- a/cni/network/invoker.go +++ b/cni/network/invoker.go @@ -39,3 +39,13 @@ func (ipamAddResult IPAMAddResult) PrettyString() string { } return pStr } + +// shallow copy options from one map to a new options map +func (ipamAddConfig IPAMAddConfig) shallowCopyIpamAddConfigOptions() map[string]interface{} { + res := map[string]interface{}{} + for k, v := range ipamAddConfig.options { + // only support primitive type + res[k] = v + } + return res +} diff --git a/cni/network/invoker_cns.go b/cni/network/invoker_cns.go index 01126247c5..928096b361 100644 --- a/cni/network/invoker_cns.go +++ b/cni/network/invoker_cns.go @@ -7,6 +7,7 @@ import ( "net" "github.com/Azure/azure-container-networking/cni" + "github.com/Azure/azure-container-networking/cni/log" "github.com/Azure/azure-container-networking/cni/util" "github.com/Azure/azure-container-networking/cns" cnscli "github.com/Azure/azure-container-networking/cns/client" @@ -14,6 +15,7 @@ import ( "github.com/Azure/azure-container-networking/iptables" "github.com/Azure/azure-container-networking/network" "github.com/Azure/azure-container-networking/network/networkutils" + "github.com/Azure/azure-container-networking/network/policy" cniSkel "github.com/containernetworking/cni/pkg/skel" "github.com/pkg/errors" "go.uber.org/zap" @@ -54,6 +56,7 @@ type IPResultInfo struct { skipDefaultRoutes bool routes []cns.Route pnpID string + endpointPolicies []policy.Policy } func (i IPResultInfo) MarshalLogObject(encoder zapcore.ObjectEncoder) error { @@ -136,7 +139,6 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro } } else { logger.Info("Failed to get IP address from CNS", - zap.Error(err), zap.Any("response", response)) return IPAMAddResult{}, errors.Wrap(err, "Failed to get IP address from CNS") } @@ -159,6 +161,7 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro skipDefaultRoutes: response.PodIPInfo[i].SkipDefaultRoutes, routes: response.PodIPInfo[i].Routes, pnpID: response.PodIPInfo[i].PnPID, + endpointPolicies: response.PodIPInfo[i].EndpointPolicies, } logger.Info("Received info for pod", @@ -169,7 +172,7 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro // Do we want to leverage this lint skip in other places of our code? key := invoker.getInterfaceInfoKey(info.nicType, info.macAddress) switch info.nicType { - case cns.NodeNetworkInterfaceFrontendNIC, cns.NodeNetworkInterfaceAccelnetFrontendNIC: + case cns.NodeNetworkInterfaceFrontendNIC: // only handling single v4 PodIPInfo for NodeNetworkInterfaceFrontendNIC and AccelnetNIC at the moment, will have to update once v6 gets added if !info.skipDefaultRoutes { numInterfacesWithDefaultRoutes++ @@ -321,8 +324,9 @@ func (invoker *CNSIPAMInvoker) Delete(address *net.IPNet, nwCfg *cni.NetworkConf if errors.As(err, &connectionErr) { addErr := fsnotify.AddFile(ipConfigs.PodInterfaceID, args.ContainerID, watcherPath) if addErr != nil { - logger.Error("Failed to add file to watcher", zap.String("podInterfaceID", ipConfigs.PodInterfaceID), zap.String("containerID", args.ContainerID), zap.Error(addErr)) - return errors.Wrap(addErr, fmt.Sprintf("failed to add file to watcher with containerID %s and podInterfaceID %s", args.ContainerID, ipConfigs.PodInterfaceID)) + logger.Error("Failed to add file to watcher (unsupported api path)", + zap.String("podInterfaceID", ipConfigs.PodInterfaceID), zap.String("containerID", args.ContainerID), zap.Error(log.NewErrorWithoutStackTrace(addErr))) + return errors.Wrap(addErr, fmt.Sprintf("failed to add file to watcher with containerID %s and podInterfaceID %s (unsupported api path)", args.ContainerID, ipConfigs.PodInterfaceID)) } } else { logger.Error("Failed to release IP address from CNS using ReleaseIPAddress ", @@ -336,7 +340,8 @@ func (invoker *CNSIPAMInvoker) Delete(address *net.IPNet, nwCfg *cni.NetworkConf if errors.As(err, &connectionErr) { addErr := fsnotify.AddFile(ipConfigs.PodInterfaceID, args.ContainerID, watcherPath) if addErr != nil { - logger.Error("Failed to add file to watcher", zap.String("podInterfaceID", ipConfigs.PodInterfaceID), zap.String("containerID", args.ContainerID), zap.Error(addErr)) + logger.Error("Failed to add file to watcher", zap.String("podInterfaceID", ipConfigs.PodInterfaceID), zap.String("containerID", args.ContainerID), + zap.Error(log.NewErrorWithoutStackTrace(addErr))) return errors.Wrap(addErr, fmt.Sprintf("failed to add file to watcher with containerID %s and podInterfaceID %s", args.ContainerID, ipConfigs.PodInterfaceID)) } } else { @@ -442,6 +447,7 @@ func configureDefaultAddResult(info *IPResultInfo, addConfig *IPAMAddConfig, add Gw: ncgw, }) } + // if we have multiple infra ip result infos, we effectively append routes and ip configs to that same interface info each time // the host subnet prefix (in ipv4 or ipv6) will always refer to the same interface regardless of which ip result info we look at addResult.interfaceInfo[key] = network.InterfaceInfo{ @@ -450,6 +456,7 @@ func configureDefaultAddResult(info *IPResultInfo, addConfig *IPAMAddConfig, add IPConfigs: ipConfigs, Routes: resRoute, HostSubnetPrefix: *hostIPNet, + EndpointPolicies: info.endpointPolicies, } } @@ -525,7 +532,7 @@ func addBackendNICToResult(info *IPResultInfo, addResult *IPAMAddResult, key str } func (invoker *CNSIPAMInvoker) getInterfaceInfoKey(nicType cns.NICType, macAddress string) string { - if nicType == cns.NodeNetworkInterfaceFrontendNIC || nicType == cns.BackendNIC || nicType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { + if nicType == cns.NodeNetworkInterfaceFrontendNIC || nicType == cns.BackendNIC { return macAddress } return string(nicType) diff --git a/cni/network/invoker_cns_test.go b/cni/network/invoker_cns_test.go index 8a75f645be..b28798cc28 100644 --- a/cni/network/invoker_cns_test.go +++ b/cni/network/invoker_cns_test.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/iptables" "github.com/Azure/azure-container-networking/network" + "github.com/Azure/azure-container-networking/network/policy" cniSkel "github.com/containernetworking/cni/pkg/skel" "github.com/stretchr/testify/require" ) @@ -521,14 +522,38 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { hostSubnetPrefix *net.IPNet options map[string]interface{} } + valueOut := []byte(`{ + "Type": "ACL", + "Action": "Block", + "Direction": "Out", + "Priority": 10000 + }`) + valueIn := []byte(`{ + "Type": "ACL", + "Action": "Block", + "Direction": "In", + "Priority": 10000 + }`) + + expectedEndpointPolicies := []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: valueOut, + }, + { + Type: policy.EndpointPolicy, + Data: valueIn, + }, + } tests := []struct { - name string - fields fields - args args - wantDefaultResult network.InterfaceInfo - wantMultitenantResult network.InterfaceInfo - wantErr bool + name string + fields fields + args args + wantDefaultDenyEndpoints bool + wantDefaultResult network.InterfaceInfo + wantMultitenantResult network.InterfaceInfo + wantErr bool }{ { name: "Test happy CNI add", @@ -559,7 +584,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { PrimaryIP: "10.0.0.1", Subnet: "10.0.0.0/24", }, - NICType: cns.InfraNIC, + NICType: cns.InfraNIC, + EndpointPolicies: expectedEndpointPolicies, }, }, Response: cns.Response{ @@ -588,6 +614,7 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { Gateway: net.ParseIP("10.0.0.1"), }, }, + EndpointPolicies: expectedEndpointPolicies, Routes: []network.RouteInfo{ { Dst: network.Ipv4DefaultRouteDstPrefix, @@ -597,7 +624,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { NICType: cns.InfraNIC, HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, - wantErr: false, + wantDefaultDenyEndpoints: true, + wantErr: false, }, { name: "Test CNI add with pod ip info empty nictype", @@ -665,7 +693,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { NICType: cns.InfraNIC, HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, - wantErr: false, + wantDefaultDenyEndpoints: false, + wantErr: false, }, { name: "Test happy CNI add for both ipv4 and ipv6", @@ -696,7 +725,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { PrimaryIP: "10.0.0.1", Subnet: "10.0.0.0/24", }, - NICType: cns.InfraNIC, + NICType: cns.InfraNIC, + EndpointPolicies: expectedEndpointPolicies, }, { PodIPConfig: cns.IPSubnet{ @@ -716,7 +746,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { PrimaryIP: "fe80::1234:5678:9abc", Subnet: "fd11:1234::/112", }, - NICType: cns.InfraNIC, + NICType: cns.InfraNIC, + EndpointPolicies: expectedEndpointPolicies, }, }, Response: cns.Response{ @@ -749,6 +780,7 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { Gateway: net.ParseIP("fe80::1234:5678:9abc"), }, }, + EndpointPolicies: expectedEndpointPolicies, Routes: []network.RouteInfo{ { Dst: network.Ipv4DefaultRouteDstPrefix, @@ -762,7 +794,8 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { NICType: cns.InfraNIC, HostSubnetPrefix: *parseCIDR("fd11:1234::/112"), }, - wantErr: false, + wantDefaultDenyEndpoints: true, + wantErr: false, }, { name: "fail to request IP addresses from cns", @@ -773,12 +806,24 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { require: require, requestIPs: requestIPsHandler{ ipconfigArgument: getTestIPConfigsRequest(), - result: nil, - err: errors.New("failed error from CNS"), //nolint "error for ut" + result: &cns.IPConfigsResponse{ + PodIPInfo: []cns.PodIpInfo{ + { + EndpointPolicies: expectedEndpointPolicies, + }, + }, + Response: cns.Response{ + ReturnCode: 0, + Message: "", + }, + }, + err: errors.New("failed error from CNS"), //nolint "error for ut" + }, }, }, - wantErr: true, + wantDefaultDenyEndpoints: false, + wantErr: true, }, } for _, tt := range tests { @@ -794,6 +839,7 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { } ipamAddResult, err := invoker.Add(IPAMAddConfig{nwCfg: tt.args.nwCfg, args: tt.args.args, options: tt.args.options}) if tt.wantErr { + require.Equalf([]policy.Policy(nil), ipamAddResult.interfaceInfo[string(cns.InfraNIC)].EndpointPolicies, "There was an error requesting IP addresses from cns") require.Error(err) } else { require.NoError(err) @@ -809,6 +855,11 @@ func TestCNSIPAMInvoker_Add(t *testing.T) { } if ifInfo.NICType == cns.InfraNIC { require.Equalf(tt.wantDefaultResult, ifInfo, "incorrect default response") + if tt.wantDefaultDenyEndpoints { + require.Equalf(expectedEndpointPolicies, ifInfo.EndpointPolicies, "Correct default deny ACL") + } else { + require.Equalf([]policy.Policy(nil), ifInfo.EndpointPolicies, "Correct default deny ACL") + } } } }) @@ -1446,8 +1497,6 @@ func Test_getInterfaceInfoKey(t *testing.T) { require.Equal("", inv.getInterfaceInfoKey(cns.NodeNetworkInterfaceFrontendNIC, "")) require.Equal(dummyMAC, inv.getInterfaceInfoKey(cns.BackendNIC, dummyMAC)) require.Equal("", inv.getInterfaceInfoKey(cns.BackendNIC, "")) - require.Equal(dummyMAC, inv.getInterfaceInfoKey(cns.NodeNetworkInterfaceAccelnetFrontendNIC, dummyMAC)) - require.Equal("", inv.getInterfaceInfoKey(cns.NodeNetworkInterfaceAccelnetFrontendNIC, "")) } func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { @@ -1459,9 +1508,6 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { ibMacAddress := "bc:9a:78:56:34:12" ibParsedMacAddress, _ := net.ParseMAC(ibMacAddress) - accelnetAddress := "ab:cd:ef:12:34:56" - accelnetParsedMacAddress, _ := net.ParseMAC(accelnetAddress) - pnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\5&8c5acce&0&0" type fields struct { @@ -1621,7 +1667,7 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { wantErr: false, }, { - name: "Test happy CNI add with InfraNIC + AccelnetNIC interfaces", + name: "Test happy CNI add with InfraNIC + DelegatedNIC + BackendNIC interfaces", fields: fields{ podName: testPodInfo.PodName, podNamespace: testPodInfo.PodNamespace, @@ -1666,10 +1712,15 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { PrimaryIP: "20.0.0.2", Subnet: "20.0.0.1/24", }, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: accelnetAddress, + NICType: cns.NodeNetworkInterfaceFrontendNIC, + MacAddress: macAddress, SkipDefaultRoutes: false, }, + { + MacAddress: ibMacAddress, + NICType: cns.BackendNIC, + PnPID: pnpID, + }, }, Response: cns.Response{ ReturnCode: 0, @@ -1708,21 +1759,159 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, wantSecondaryInterfacesInfo: map[string]network.InterfaceInfo{ - accelnetAddress: { + macAddress: { IPConfigs: []*network.IPConfig{ { Address: *getCIDRNotationForAddress("20.1.1.10/24"), }, }, Routes: []network.RouteInfo{}, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: accelnetParsedMacAddress, + NICType: cns.NodeNetworkInterfaceFrontendNIC, + MacAddress: parsedMacAddress, + }, + ibMacAddress: { + NICType: cns.BackendNIC, + MacAddress: ibParsedMacAddress, + PnPID: pnpID, }, }, wantErr: false, }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + invoker := &CNSIPAMInvoker{ + podName: tt.fields.podName, + podNamespace: tt.fields.podNamespace, + cnsClient: tt.fields.cnsClient, + } + ipamAddResult, err := invoker.Add(IPAMAddConfig{nwCfg: tt.args.nwCfg, args: tt.args.args, options: tt.args.options}) + if tt.wantErr { + require.Error(err) + } else { + require.NoError(err) + } + + for _, ifInfo := range ipamAddResult.interfaceInfo { + if ifInfo.NICType == cns.InfraNIC { + fmt.Printf("want:%+v\nrest:%+v\n", tt.wantDefaultResult, ifInfo) + require.Equalf(tt.wantDefaultResult, ifInfo, "incorrect ipv4 response") + } + + if ifInfo.NICType == cns.BackendNIC { + fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo, ipamAddResult.interfaceInfo[ibMacAddress]) + require.EqualValues(tt.wantSecondaryInterfacesInfo[ibMacAddress], ipamAddResult.interfaceInfo[ibMacAddress], "incorrect response for IB") + } + + if ifInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { + fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress]) + require.EqualValues(tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress], "incorrect response for Delegated") + } + } + }) + } +} + +func TestShallowCopyIpamAddConfigOptions(t *testing.T) { + opts := IPAMAddConfig{ + // mock different types of map value + options: map[string]interface{}{ + network.SNATIPKey: "10", + dockerNetworkOption: "20", + "intType": 10, + "floatType": 0.51, + "byteType": byte('A'), + }, + } + + // shallow copy all ipamAddConfig options + res := opts.shallowCopyIpamAddConfigOptions() + require.Equal(t, opts.options, res) + + // modified copied res and make sure original opts is not changed + newSNATIPKeyValue := "100" + newDockerNetworkOptionValue := "200" + + res[network.SNATIPKey] = newSNATIPKeyValue + res[dockerNetworkOption] = newDockerNetworkOptionValue + + expectedOpts := map[string]interface{}{ + network.SNATIPKey: newSNATIPKeyValue, + dockerNetworkOption: newDockerNetworkOptionValue, + "intType": 10, + "floatType": 0.51, + "byteType": byte('A'), + } + require.Equal(t, expectedOpts, res) + + // make sure original object is equal to expected opts after copied res is changed + expectedOriginalOpts := map[string]interface{}{ + network.SNATIPKey: "10", + dockerNetworkOption: "20", + "intType": 10, + "floatType": 0.51, + "byteType": byte('A'), + } + require.Equal(t, expectedOriginalOpts, opts.options) + + // shallow copy empty opts and make sure it does not break anything + emptyOpts := IPAMAddConfig{ + options: map[string]interface{}{}, + } + emptyRes := emptyOpts.shallowCopyIpamAddConfigOptions() + require.Equal(t, emptyOpts.options, emptyRes) + + // shallow copy null opts and make sure it does not break anything + nullOpts := IPAMAddConfig{ + options: nil, + } + nullRes := nullOpts.shallowCopyIpamAddConfigOptions() + require.Equal(t, map[string]interface{}{}, nullRes) +} + +// Test addBackendNICToResult() and configureSecondaryAddResult() to update secondary interfaces to cni Result +func TestAddNICsToCNIResult(t *testing.T) { + require := require.New(t) //nolint further usage of require without passing t + + macAddress := "bc:9a:78:56:34:12" + newMacAddress := "bc:9a:78:56:34:45" + newParsedMacAddress, _ := net.ParseMAC(newMacAddress) + newNCGatewayIPAddress := "40.0.0.1" + + pnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\5&8c5acce&0&0" + newPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\5&8c5acce&0&1" + + newPodIPConfig := &cns.IPSubnet{ + IPAddress: "30.1.1.10", + PrefixLength: 24, + } + + newIP, newIPNet, _ := newPodIPConfig.GetIPNet() + + type fields struct { + podName string + podNamespace string + cnsClient cnsclient + } + + type args struct { + nwCfg *cni.NetworkConfig + args *cniSkel.CmdArgs + hostSubnetPrefix *net.IPNet + options map[string]interface{} + info IPResultInfo + podIPConfig *cns.IPSubnet + } + + tests := []struct { + name string + fields fields + args args + wantSecondaryInterfacesInfo map[string]network.InterfaceInfo + }{ { - name: "Test happy CNI add with InfraNIC + DelegatedNIC + BackendNIC interfaces", + name: "add new backendNIC to cni Result", fields: fields{ podName: testPodInfo.PodName, podNamespace: testPodInfo.PodNamespace, @@ -1754,25 +1943,10 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { PrimaryIP: "10.0.0.1", Subnet: "10.0.0.0/24", }, - NICType: cns.InfraNIC, - SkipDefaultRoutes: true, - }, - { - PodIPConfig: cns.IPSubnet{ - IPAddress: "20.1.1.10", - PrefixLength: 24, - }, - HostPrimaryIPInfo: cns.HostIPInfo{ - Gateway: "20.0.0.1", - PrimaryIP: "20.0.0.2", - Subnet: "20.0.0.1/24", - }, - NICType: cns.NodeNetworkInterfaceFrontendNIC, - MacAddress: macAddress, - SkipDefaultRoutes: false, + NICType: cns.InfraNIC, }, { - MacAddress: ibMacAddress, + MacAddress: macAddress, NICType: cns.BackendNIC, PnPID: pnpID, }, @@ -1795,45 +1969,23 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { }, hostSubnetPrefix: getCIDRNotationForAddress("10.0.0.1/24"), options: map[string]interface{}{}, - }, - wantDefaultResult: network.InterfaceInfo{ - IPConfigs: []*network.IPConfig{ - { - Address: *getCIDRNotationForAddress("10.0.1.10/24"), - Gateway: net.ParseIP("10.0.0.1"), - }, - }, - Routes: []network.RouteInfo{ - { - Dst: network.Ipv4DefaultRouteDstPrefix, - Gw: net.ParseIP("10.0.0.1"), - }, + // update new pnpID, macAddress + info: IPResultInfo{ + pnpID: newPnpID, + macAddress: newMacAddress, + nicType: cns.BackendNIC, }, - NICType: cns.InfraNIC, - SkipDefaultRoutes: true, - HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, wantSecondaryInterfacesInfo: map[string]network.InterfaceInfo{ macAddress: { - IPConfigs: []*network.IPConfig{ - { - Address: *getCIDRNotationForAddress("20.1.1.10/24"), - }, - }, - Routes: []network.RouteInfo{}, - NICType: cns.NodeNetworkInterfaceFrontendNIC, - MacAddress: parsedMacAddress, - }, - ibMacAddress: { + MacAddress: newParsedMacAddress, + PnPID: newPnpID, NICType: cns.BackendNIC, - MacAddress: ibParsedMacAddress, - PnPID: pnpID, }, }, - wantErr: false, }, { - name: "Test happy CNI add with InfraNIC + AccelnetNIC + BackendNIC interfaces", + name: "add new delegatedVMNIC to cni Result", fields: fields{ podName: testPodInfo.PodName, podNamespace: testPodInfo.PodNamespace, @@ -1870,22 +2022,16 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { }, { PodIPConfig: cns.IPSubnet{ - IPAddress: "30.1.1.10", + IPAddress: "20.1.1.10", PrefixLength: 24, }, HostPrimaryIPInfo: cns.HostIPInfo{ - Gateway: "30.0.0.1", - PrimaryIP: "30.0.0.2", - Subnet: "30.0.0.1/24", + Gateway: "20.0.0.1", + PrimaryIP: "20.0.0.2", + Subnet: "20.0.0.1/24", }, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: accelnetAddress, - SkipDefaultRoutes: false, - }, - { - MacAddress: ibMacAddress, - NICType: cns.BackendNIC, - PnPID: pnpID, + NICType: cns.NodeNetworkInterfaceFrontendNIC, + MacAddress: macAddress, }, }, Response: cns.Response{ @@ -1906,45 +2052,114 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { }, hostSubnetPrefix: getCIDRNotationForAddress("10.0.0.1/24"), options: map[string]interface{}{}, - }, - wantDefaultResult: network.InterfaceInfo{ - IPConfigs: []*network.IPConfig{ - { - Address: *getCIDRNotationForAddress("10.0.1.10/24"), - Gateway: net.ParseIP("10.0.0.1"), - }, - }, - Routes: []network.RouteInfo{ - { - Dst: network.Ipv4DefaultRouteDstPrefix, - Gw: net.ParseIP("10.0.0.1"), - }, + // update podIPConfig + podIPConfig: newPodIPConfig, + // update new mac address and ncGatewayIPAddress + info: IPResultInfo{ + macAddress: newMacAddress, + nicType: cns.NodeNetworkInterfaceFrontendNIC, + ncGatewayIPAddress: newNCGatewayIPAddress, }, - NICType: cns.InfraNIC, - SkipDefaultRoutes: true, - HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, wantSecondaryInterfacesInfo: map[string]network.InterfaceInfo{ - accelnetAddress: { + macAddress: { + MacAddress: newParsedMacAddress, + NICType: cns.NodeNetworkInterfaceFrontendNIC, IPConfigs: []*network.IPConfig{ { - Address: *getCIDRNotationForAddress("30.1.1.10/24"), + Address: net.IPNet{ + IP: newIP, + Mask: newIPNet.Mask, + }, + Gateway: net.ParseIP(newNCGatewayIPAddress), }, }, - Routes: []network.RouteInfo{}, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: accelnetParsedMacAddress, - }, - ibMacAddress: { - NICType: cns.BackendNIC, - MacAddress: ibParsedMacAddress, - PnPID: pnpID, + Routes: []network.RouteInfo{}, }, }, - wantErr: false, }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + invoker := &CNSIPAMInvoker{ + podName: tt.fields.podName, + podNamespace: tt.fields.podNamespace, + cnsClient: tt.fields.cnsClient, + } + ipamAddResult, err := invoker.Add(IPAMAddConfig{nwCfg: tt.args.nwCfg, args: tt.args.args, options: tt.args.options}) + if err != nil { + t.Fatalf("Failed to create ipamAddResult due to error: %v", err) + } + + for _, ifInfo := range ipamAddResult.interfaceInfo { + if ifInfo.NICType == cns.BackendNIC { + // add new backendNIC info to cni Result + err := addBackendNICToResult(&tt.args.info, &ipamAddResult, macAddress) + if err != nil { + t.Fatalf("Failed to add backend NIC to cni Result due to error %v", err) + } + fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo, ipamAddResult.interfaceInfo[macAddress]) + require.EqualValues(tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress], "incorrect response for IB") + } + if ifInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { + // add new secondaryInterfaceNIC to cni Result + err := configureSecondaryAddResult(&tt.args.info, &ipamAddResult, tt.args.podIPConfig, macAddress) + if err != nil { + t.Fatalf("Failed to add secondary interface NIC %s to cni Result due to error %v", ifInfo.NICType, err) + } + fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo, ipamAddResult.interfaceInfo[macAddress]) + require.EqualValues(tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress], "incorrect response for delegatedVMNIC") + } + } + }) + } +} + +// Test to add multiple IB NICs to make sure CNI receives all correct IB info from CNS +func TestMultipleIBNICsToResult(t *testing.T) { + require := require.New(t) //nolint further usage of require without passing t + + firstMacAddress := "bc:9a:78:56:34:12" + firstParsedMacAddress, _ := net.ParseMAC(firstMacAddress) + + secondMacAddress := "bc:9a:78:56:34:13" + secondParsedMacAddress, _ := net.ParseMAC(secondMacAddress) + + thirdMacAddress := "bc:9a:78:56:34:14" + thirdParsedMacAddress, _ := net.ParseMAC(thirdMacAddress) + + macAddressList := []string{firstMacAddress, secondMacAddress, thirdMacAddress} + + firstPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\5&8c5acce&0&0" + firstNewPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\5&8c5acce&0&1" + secondPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\6&8c5acce&0&1" + secondNewPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\6&8c5acce&0&2" + thirdPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\7&8c5acce&0&2" + thirdNewPnpID := "PCI\\VEN_15B3&DEV_101C&SUBSYS_000715B3&REV_00\\7&8c5acce&0&3" + + type fields struct { + podName string + podNamespace string + cnsClient cnsclient + } + + type args struct { + nwCfg *cni.NetworkConfig + args *cniSkel.CmdArgs + hostSubnetPrefix *net.IPNet + options map[string]interface{} + info []IPResultInfo + } + + tests := []struct { + name string + fields fields + args args + wantSecondaryInterfacesInfo map[string]network.InterfaceInfo + }{ { - name: "Test unhappy CNI add with InfraNIC + AccelnetNIC + BackendNIC interfaces", + name: "add three backendNIC to cni Result", fields: fields{ podName: testPodInfo.PodName, podNamespace: testPodInfo.PodNamespace, @@ -1976,27 +2191,22 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { PrimaryIP: "10.0.0.1", Subnet: "10.0.0.0/24", }, - NICType: cns.InfraNIC, - SkipDefaultRoutes: false, + NICType: cns.InfraNIC, }, { - PodIPConfig: cns.IPSubnet{ - IPAddress: "30.1.1.10", - PrefixLength: 24, - }, - HostPrimaryIPInfo: cns.HostIPInfo{ - Gateway: "30.0.0.1", - PrimaryIP: "30.0.0.2", - Subnet: "30.0.0.1/24", - }, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: "invalid mac", - SkipDefaultRoutes: false, + MacAddress: firstMacAddress, + NICType: cns.BackendNIC, + PnPID: firstPnpID, }, { - MacAddress: ibMacAddress, + MacAddress: secondMacAddress, + NICType: cns.BackendNIC, + PnPID: secondPnpID, + }, + { + MacAddress: thirdMacAddress, NICType: cns.BackendNIC, - PnPID: "invalid pnpID", + PnPID: thirdPnpID, }, }, Response: cns.Response{ @@ -2017,42 +2227,41 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { }, hostSubnetPrefix: getCIDRNotationForAddress("10.0.0.1/24"), options: map[string]interface{}{}, - }, - wantDefaultResult: network.InterfaceInfo{ - IPConfigs: []*network.IPConfig{ + info: []IPResultInfo{ { - Address: *getCIDRNotationForAddress("10.0.1.10/24"), - Gateway: net.ParseIP("10.0.0.1"), + pnpID: firstNewPnpID, // update pnp ID + macAddress: firstMacAddress, + nicType: cns.BackendNIC, }, - }, - Routes: []network.RouteInfo{ { - Dst: network.Ipv4DefaultRouteDstPrefix, - Gw: net.ParseIP("10.0.0.1"), + pnpID: secondNewPnpID, // update pnp ID + macAddress: secondMacAddress, + nicType: cns.BackendNIC, + }, + { + pnpID: thirdNewPnpID, // update pnp ID + macAddress: thirdMacAddress, + nicType: cns.BackendNIC, }, }, - NICType: cns.InfraNIC, - SkipDefaultRoutes: true, - HostSubnetPrefix: *parseCIDR("10.0.0.0/24"), }, wantSecondaryInterfacesInfo: map[string]network.InterfaceInfo{ - accelnetAddress: { - IPConfigs: []*network.IPConfig{ - { - Address: *getCIDRNotationForAddress("30.1.1.10/24"), - }, - }, - Routes: []network.RouteInfo{}, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: accelnetParsedMacAddress, + firstMacAddress: { + MacAddress: firstParsedMacAddress, + PnPID: firstNewPnpID, + NICType: cns.BackendNIC, }, - ibMacAddress: { + secondMacAddress: { + MacAddress: secondParsedMacAddress, + PnPID: secondNewPnpID, + NICType: cns.BackendNIC, + }, + thirdMacAddress: { + MacAddress: thirdParsedMacAddress, + PnPID: thirdNewPnpID, NICType: cns.BackendNIC, - MacAddress: ibParsedMacAddress, - PnPID: pnpID, }, }, - wantErr: true, }, } for _, tt := range tests { @@ -2064,32 +2273,27 @@ func TestCNSIPAMInvoker_Add_SwiftV2(t *testing.T) { cnsClient: tt.fields.cnsClient, } ipamAddResult, err := invoker.Add(IPAMAddConfig{nwCfg: tt.args.nwCfg, args: tt.args.args, options: tt.args.options}) - if tt.wantErr { - require.Error(err) - } else { - require.NoError(err) + if err != nil { + t.Fatalf("Failed to create ipamAddResult due to error: %v", err) } - for _, ifInfo := range ipamAddResult.interfaceInfo { - if ifInfo.NICType == cns.InfraNIC { - fmt.Printf("want:%+v\nrest:%+v\n", tt.wantDefaultResult, ifInfo) - require.Equalf(tt.wantDefaultResult, ifInfo, "incorrect ipv4 response") - } - - if ifInfo.NICType == cns.BackendNIC { - fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo, ipamAddResult.interfaceInfo[ibMacAddress]) - require.EqualValues(tt.wantSecondaryInterfacesInfo[ibMacAddress], ipamAddResult.interfaceInfo[ibMacAddress], "incorrect response for IB") - } - - if ifInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { - fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress]) - require.EqualValues(tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress], "incorrect response for Delegated") - } + // add three new backendNICs info to cni Result + err = addBackendNICToResult(&tt.args.info[0], &ipamAddResult, firstMacAddress) + if err != nil { + t.Fatalf("Failed to add first backend NIC to cni Result due to error %v", err) + } + err = addBackendNICToResult(&tt.args.info[1], &ipamAddResult, secondMacAddress) + if err != nil { + t.Fatalf("Failed to add second backend NIC to cni Result due to error %v", err) + } + err = addBackendNICToResult(&tt.args.info[2], &ipamAddResult, thirdMacAddress) + if err != nil { + t.Fatalf("Failed to add third backend NIC to cni Result due to error %v", err) + } - if ifInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { - fmt.Printf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo[accelnetAddress], ipamAddResult.interfaceInfo[accelnetAddress]) - require.EqualValues(tt.wantSecondaryInterfacesInfo[accelnetAddress], ipamAddResult.interfaceInfo[accelnetAddress], "incorrect response for Accelnet") - } + for _, macAddress := range macAddressList { + t.Logf("want:%+v\nrest:%+v\n", tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress]) + require.EqualValues(tt.wantSecondaryInterfacesInfo[macAddress], ipamAddResult.interfaceInfo[macAddress], "incorrect response for IB") } }) } diff --git a/cni/network/invoker_mock.go b/cni/network/invoker_mock.go index 8961288769..7e6c57ed66 100644 --- a/cni/network/invoker_mock.go +++ b/cni/network/invoker_mock.go @@ -22,7 +22,6 @@ var ( errV6 = errors.New("v6 Fail") errDelegatedVMNIC = errors.New("NodeNetworkInterfaceFrontendNIC fail") errDeleteIpam = errors.New("delete fail") - errAccelnetVMNIC = errors.New("accelnetNIC fail") ) type MockIpamInvoker struct { @@ -31,29 +30,28 @@ type MockIpamInvoker struct { v6Fail bool delegatedVMNIC bool delegatedVMNICFail bool - accelnetNIC bool - accelnetNICFail bool ipMap map[string]bool - customReturn map[string]network.InterfaceInfo + add func(opt IPAMAddConfig) (ipamAddResult IPAMAddResult, err error) } -func NewMockIpamInvoker(ipv6, v4Fail, v6Fail, delegatedVMNIC, delegatedVMNICFail, accelnetNIC, accelnetNICFail bool) *MockIpamInvoker { +func NewMockIpamInvoker(ipv6, v4Fail, v6Fail, delegatedVMNIC, delegatedVMNICFail bool) *MockIpamInvoker { return &MockIpamInvoker{ isIPv6: ipv6, v4Fail: v4Fail, v6Fail: v6Fail, delegatedVMNIC: delegatedVMNIC, delegatedVMNICFail: delegatedVMNICFail, - accelnetNIC: accelnetNIC, - accelnetNICFail: accelnetNICFail, ipMap: make(map[string]bool), } } func NewCustomMockIpamInvoker(customReturn map[string]network.InterfaceInfo) *MockIpamInvoker { return &MockIpamInvoker{ - customReturn: customReturn, - + add: func(_ IPAMAddConfig) (ipamAddResult IPAMAddResult, err error) { + ipamAddResult = IPAMAddResult{interfaceInfo: make(map[string]network.InterfaceInfo)} + ipamAddResult.interfaceInfo = customReturn + return ipamAddResult, nil + }, ipMap: make(map[string]bool), } } @@ -117,23 +115,8 @@ func (invoker *MockIpamInvoker) Add(opt IPAMAddConfig) (ipamAddResult IPAMAddRes } } - if invoker.accelnetNIC { - if invoker.accelnetNICFail { - return IPAMAddResult{}, errAccelnetVMNIC - } - - ipStr := "30.30.30.30/32" - _, ipnet, _ := net.ParseCIDR(ipStr) - ipRes = append(ipRes, &network.IPConfig{Address: *ipnet}) - ipamAddResult.interfaceInfo[string(cns.InfraNIC)] = network.InterfaceInfo{ - IPConfigs: ipRes, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - } - } - - if invoker.customReturn != nil { - ipamAddResult.interfaceInfo = invoker.customReturn - return ipamAddResult, nil + if invoker.add != nil { + return invoker.add(opt) } return ipamAddResult, nil diff --git a/cni/network/multitenancy_mock.go b/cni/network/multitenancy_mock.go index 9c63279091..13107a3357 100644 --- a/cni/network/multitenancy_mock.go +++ b/cni/network/multitenancy_mock.go @@ -4,7 +4,6 @@ import ( "context" "errors" "net" - "runtime" "strconv" "github.com/Azure/azure-container-networking/cni" @@ -14,7 +13,8 @@ import ( ) type MockMultitenancy struct { - fail bool + fail bool + cnsResponses []*cns.GetNetworkContainerResponse } const ( @@ -26,9 +26,10 @@ const ( var errMockMulAdd = errors.New("multitenancy fail") -func NewMockMultitenancy(fail bool) *MockMultitenancy { +func NewMockMultitenancy(fail bool, cnsResponses []*cns.GetNetworkContainerResponse) *MockMultitenancy { return &MockMultitenancy{ - fail: fail, + fail: fail, + cnsResponses: cnsResponses, } } @@ -56,31 +57,9 @@ func (m *MockMultitenancy) GetNetworkContainer( return nil, net.IPNet{}, errMockMulAdd } - cnsResponse := &cns.GetNetworkContainerResponse{ - IPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "192.168.0.4", - PrefixLength: ipPrefixLen, - }, - GatewayIPAddress: "192.168.0.1", - }, - LocalIPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "169.254.0.4", - PrefixLength: localIPPrefixLen, - }, - GatewayIPAddress: "169.254.0.1", - }, - - PrimaryInterfaceIdentifier: "10.240.0.4/24", - MultiTenancyInfo: cns.MultiTenancyInfo{ - EncapType: cns.Vlan, - ID: 1, - }, - } - _, ipnet, _ := net.ParseCIDR(cnsResponse.PrimaryInterfaceIdentifier) + _, ipnet, _ := net.ParseCIDR(m.cnsResponses[0].PrimaryInterfaceIdentifier) - return cnsResponse, *ipnet, nil + return m.cnsResponses[0], *ipnet, nil } func (m *MockMultitenancy) GetAllNetworkContainers( @@ -97,64 +76,13 @@ func (m *MockMultitenancy) GetAllNetworkContainers( var cnsResponses []cns.GetNetworkContainerResponse var ipNets []net.IPNet - cnsResponseOne := &cns.GetNetworkContainerResponse{ - IPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "20.0.0.10", - PrefixLength: ipPrefixLen, - }, - GatewayIPAddress: "20.0.0.1", - }, - LocalIPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "168.254.0.4", - PrefixLength: localIPPrefixLen, - }, - GatewayIPAddress: "168.254.0.1", - }, - - PrimaryInterfaceIdentifier: "20.240.0.4/24", - MultiTenancyInfo: cns.MultiTenancyInfo{ - EncapType: cns.Vlan, - ID: multiTenancyVlan1, - }, - } - - // TODO: add dual nic test cases for windows - if runtime.GOOS == "windows" { - cnsResponseTwo := &cns.GetNetworkContainerResponse{ - IPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "10.0.0.10", - PrefixLength: ipPrefixLen, - }, - GatewayIPAddress: "10.0.0.1", - }, - LocalIPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "169.254.0.4", - PrefixLength: localIPPrefixLen, - }, - GatewayIPAddress: "169.254.0.1", - }, - - PrimaryInterfaceIdentifier: "10.240.0.4/24", - MultiTenancyInfo: cns.MultiTenancyInfo{ - EncapType: cns.Vlan, - ID: multiTenancyVlan2, - }, - } + for _, cnsResp := range m.cnsResponses { + _, ipNet, _ := net.ParseCIDR(cnsResp.PrimaryInterfaceIdentifier) - _, secondIPnet, _ := net.ParseCIDR(cnsResponseTwo.PrimaryInterfaceIdentifier) - ipNets = append(ipNets, *secondIPnet) - cnsResponses = append(cnsResponses, *cnsResponseTwo) + ipNets = append(ipNets, *ipNet) + cnsResponses = append(cnsResponses, *cnsResp) } - _, firstIPnet, _ := net.ParseCIDR(cnsResponseOne.PrimaryInterfaceIdentifier) - - ipNets = append(ipNets, *firstIPnet) - cnsResponses = append(cnsResponses, *cnsResponseOne) - ipamResult := IPAMAddResult{} ipamResult.interfaceInfo = make(map[string]network.InterfaceInfo) diff --git a/cni/network/multitenancy_test.go b/cni/network/multitenancy_test.go index 9e43535484..b6dbb9e19a 100644 --- a/cni/network/multitenancy_test.go +++ b/cni/network/multitenancy_test.go @@ -287,7 +287,7 @@ func TestCleanupMultitenancyResources(t *testing.T) { }, infraIPNet: &cniTypesCurr.Result{}, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, }, expected: args{ @@ -298,7 +298,7 @@ func TestCleanupMultitenancyResources(t *testing.T) { }, infraIPNet: &cniTypesCurr.Result{}, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, }, }, @@ -346,6 +346,10 @@ func TestGetMultiTenancyCNIResult(t *testing.T) { GatewayIPAddress: "10.1.0.1", }, }, + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: "1", // vlanID 1 + ID: 1, + }, } ncResponseTwo := cns.GetNetworkContainerResponse{ @@ -377,6 +381,10 @@ func TestGetMultiTenancyCNIResult(t *testing.T) { GatewayIPAddress: "20.1.0.1", }, }, + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: "2", // vlanID 2 + ID: 2, + }, } ncResponses = append(ncResponses, ncResponseOne, ncResponseTwo) @@ -413,7 +421,7 @@ func TestGetMultiTenancyCNIResult(t *testing.T) { IPAM: cni.IPAM{Type: "azure-vnet-ipam"}, }, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), multitenancyClient: &Multitenancy{ netioshim: &mockNetIOShim{}, cnsclient: &MockCNSClient{ @@ -484,6 +492,10 @@ func TestGetMultiTenancyCNIResult(t *testing.T) { GatewayIPAddress: "10.1.0.1", }, }, + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: "1", + ID: 1, + }, }, want2: &cns.GetNetworkContainerResponse{ PrimaryInterfaceIdentifier: "20.0.0.0/16", @@ -514,6 +526,10 @@ func TestGetMultiTenancyCNIResult(t *testing.T) { GatewayIPAddress: "20.1.0.1", }, }, + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: "2", + ID: 2, + }, }, want3: *getCIDRNotationForAddress("10.0.0.0/16"), want4: &cniTypesCurr.Result{ @@ -629,7 +645,7 @@ func TestGetMultiTenancyCNIResultUnsupportedAPI(t *testing.T) { IPAM: cni.IPAM{Type: "azure-vnet-ipam"}, }, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), multitenancyClient: &Multitenancy{ netioshim: &mockNetIOShim{}, cnsclient: &MockCNSClient{ @@ -769,7 +785,7 @@ func TestGetMultiTenancyCNIResultNotFound(t *testing.T) { IPAM: cni.IPAM{Type: "azure-vnet-ipam"}, }, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), multitenancyClient: &Multitenancy{ netioshim: &mockNetIOShim{}, cnsclient: &MockCNSClient{ @@ -804,7 +820,7 @@ func TestGetMultiTenancyCNIResultNotFound(t *testing.T) { IPAM: cni.IPAM{Type: "azure-vnet-ipam"}, }, plugin: &NetPlugin{ - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), multitenancyClient: &Multitenancy{ netioshim: &mockNetIOShim{}, cnsclient: &MockCNSClient{ diff --git a/cni/network/network.go b/cni/network/network.go index dc9a2a127d..a09db694c0 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -8,18 +8,19 @@ import ( "encoding/json" "fmt" "net" - "os" "regexp" "strconv" "time" - "github.com/Azure/azure-container-networking/aitelemetry" "github.com/Azure/azure-container-networking/cni" "github.com/Azure/azure-container-networking/cni/api" + "github.com/Azure/azure-container-networking/cni/log" "github.com/Azure/azure-container-networking/cni/util" "github.com/Azure/azure-container-networking/cns" cnscli "github.com/Azure/azure-container-networking/cns/client" + "github.com/Azure/azure-container-networking/cns/fsnotify" "github.com/Azure/azure-container-networking/common" + "github.com/Azure/azure-container-networking/dhcp" "github.com/Azure/azure-container-networking/iptables" "github.com/Azure/azure-container-networking/netio" "github.com/Azure/azure-container-networking/netlink" @@ -37,7 +38,10 @@ import ( ) // matches if the string fully consists of zero or more alphanumeric, dots, dashes, parentheses, spaces, or underscores -var allowedInput = regexp.MustCompile(`^[a-zA-Z0-9._\-\(\) ]*$`) +var ( + allowedInput = regexp.MustCompile(`^[a-zA-Z0-9._\-\(\) ]*$`) + telemetryClient = telemetry.AIClient +) const ( dockerNetworkOption = "com.docker.network.generic" @@ -77,8 +81,6 @@ type NetPlugin struct { *cni.Plugin nm network.NetworkManager ipamInvoker IPAMInvoker - report *telemetry.CNIReport - tb *telemetry.TelemetryBuffer nnsClient NnsClient multitenancyClient MultitenancyClient netClient InterfaceGetter @@ -106,6 +108,7 @@ type NnsClient interface { // client for getting interface type InterfaceGetter interface { GetNetworkInterfaces() ([]net.Interface, error) + GetNetworkInterfaceAddrs(iface *net.Interface) ([]net.Addr, error) } // snatConfiguration contains a bool that determines whether CNI enables snat on host and snat for dns @@ -128,7 +131,7 @@ func NewPlugin(name string, nl := netlink.NewNetlink() // Setup network manager. - nm, err := network.NewNetworkManager(nl, platform.NewExecClient(logger), &netio.NetIO{}, network.NewNamespaceClient(), iptables.NewClient()) + nm, err := network.NewNetworkManager(nl, platform.NewExecClient(logger), &netio.NetIO{}, network.NewNamespaceClient(), iptables.NewClient(), dhcp.New(logger)) if err != nil { return nil, err } @@ -144,11 +147,6 @@ func NewPlugin(name string, }, nil } -func (plugin *NetPlugin) SetCNIReport(report *telemetry.CNIReport, tb *telemetry.TelemetryBuffer) { - plugin.report = report - plugin.tb = tb -} - // Starts the plugin. func (plugin *NetPlugin) Start(config *common.PluginConfig) error { // Initialize base plugin. @@ -175,13 +173,6 @@ func (plugin *NetPlugin) Start(config *common.PluginConfig) error { return nil } -func sendEvent(plugin *NetPlugin, msg string) { - eventMsg := fmt.Sprintf("[%d] %s", os.Getpid(), msg) - plugin.report.Version = plugin.Version - plugin.report.EventMessage = eventMsg - telemetry.SendCNIEvent(plugin.tb, plugin.report) -} - func (plugin *NetPlugin) GetAllEndpointState(networkid string) (*api.AzureCNIState, error) { st := api.AzureCNIState{ ContainerInterfaces: make(map[string]api.PodNetworkInterfaceInfo), @@ -254,7 +245,7 @@ func (plugin *NetPlugin) findMasterInterfaceBySubnet(nwCfg *cni.NetworkConfig, s } var ipnets []string for _, iface := range interfaces { - addrs, _ := iface.Addrs() + addrs, _ := plugin.netClient.GetNetworkInterfaceAddrs(&iface) //nolint for _, addr := range addrs { _, ipnet, err := net.ParseCIDR(addr.String()) if err != nil { @@ -303,35 +294,11 @@ func (plugin *NetPlugin) getPodInfo(args string) (name, ns string, err error) { return k8sPodName, k8sNamespace, nil } -func SetCustomDimensions(cniMetric *telemetry.AIMetric, nwCfg *cni.NetworkConfig, err error) { - if cniMetric == nil { - logger.Error("Unable to set custom dimension. Report is nil") - return - } - - if err != nil { - cniMetric.Metric.CustomDimensions[telemetry.StatusStr] = telemetry.FailedStr - } else { - cniMetric.Metric.CustomDimensions[telemetry.StatusStr] = telemetry.SucceededStr - } - - if nwCfg != nil { - if nwCfg.MultiTenancy { - cniMetric.Metric.CustomDimensions[telemetry.CNIModeStr] = telemetry.MultiTenancyStr - } else { - cniMetric.Metric.CustomDimensions[telemetry.CNIModeStr] = telemetry.SingleTenancyStr - } - - cniMetric.Metric.CustomDimensions[telemetry.CNINetworkModeStr] = nwCfg.Mode - } -} - -func (plugin *NetPlugin) setCNIReportDetails(nwCfg *cni.NetworkConfig, opType, msg string) { - plugin.report.OperationType = opType - plugin.report.SubContext = fmt.Sprintf("%+v", nwCfg) - plugin.report.EventMessage = msg - plugin.report.BridgeDetails.NetworkMode = nwCfg.Mode - plugin.report.InterfaceDetails.SecondaryCAUsedCount = plugin.nm.GetNumberOfEndpoints("", nwCfg.Name) +func (plugin *NetPlugin) setCNIReportDetails(containerID, opType, msg string) { + telemetryClient.Settings().OperationType = opType + telemetryClient.Settings().SubContext = containerID + telemetryClient.Settings().EventMessage = msg + telemetryClient.Settings().Version = plugin.Version } func addNatIPV6SubnetInfo(nwCfg *cni.NetworkConfig, @@ -357,7 +324,6 @@ func (plugin *NetPlugin) addIpamInvoker(ipamAddConfig IPAMAddConfig) (IPAMAddRes if err != nil { return IPAMAddResult{}, errors.Wrap(err, "failed to add ipam invoker") } - sendEvent(plugin, fmt.Sprintf("Allocated IPAddress from ipam interface: %+v", ipamAddResult.PrettyString())) return ipamAddResult, nil } @@ -389,12 +355,10 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { enableInfraVnet bool enableSnatForDNS bool k8sPodName string - cniMetric telemetry.AIMetric epInfos []*network.EndpointInfo ) startTime := time.Now() - logger.Info("Processing ADD command", zap.String("containerId", args.ContainerID), zap.String("netNS", args.Netns), @@ -402,8 +366,6 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { zap.Any("args", args.Args), zap.String("path", args.Path), zap.ByteString("stdinData", args.StdinData)) - sendEvent(plugin, fmt.Sprintf("[cni-net] Processing ADD command with args {ContainerID:%v Netns:%v IfName:%v Args:%v Path:%v StdinData:%s}.", - args.ContainerID, args.Netns, args.IfName, args.Args, args.Path, args.StdinData)) // Parse network configuration from stdin. nwCfg, err := cni.ParseNetworkConfig(args.StdinData) @@ -417,20 +379,20 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { return err } + // Parse Pod arguments. + k8sPodName, k8sNamespace, err := plugin.getPodInfo(args.Args) + if err != nil { + return err + } + telemetryClient.Settings().ContainerName = k8sPodName + ":" + k8sNamespace + + plugin.setCNIReportDetails(args.ContainerID, CNI_ADD, "") + telemetryClient.SendEvent(fmt.Sprintf("[cni-net] Processing ADD command with args {ContainerID:%v Netns:%v IfName:%v Args:%v Path:%v StdinData:%s}.", + args.ContainerID, args.Netns, args.IfName, args.Args, args.Path, args.StdinData)) + iptables.DisableIPTableLock = nwCfg.DisableIPTableLock - plugin.setCNIReportDetails(nwCfg, CNI_ADD, "") defer func() { - operationTimeMs := time.Since(startTime).Milliseconds() - cniMetric.Metric = aitelemetry.Metric{ - Name: telemetry.CNIAddTimeMetricStr, - Value: float64(operationTimeMs), - AppVersion: plugin.Version, - CustomDimensions: make(map[string]string), - } - SetCustomDimensions(&cniMetric, nwCfg, err) - telemetry.SendCNIMetric(&cniMetric, plugin.tb) - // Add Interfaces to result. // previously we had a default interface info to select which interface info was the one to be returned from cni add cniResult := &cniTypesCurr.Result{} @@ -474,18 +436,15 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { logger.Info("ADD command completed for", zap.String("pod", k8sPodName), zap.Any("IPs", cniResult.IPs), - zap.Error(err)) - }() + zap.Error(log.NewErrorWithoutStackTrace(err))) - ipamAddResult = IPAMAddResult{interfaceInfo: make(map[string]network.InterfaceInfo)} + telemetryClient.SendEvent(fmt.Sprintf("ADD command completed with [ipamAddResult]: %s [epInfos]: %s [error]: %v ", ipamAddResult.PrettyString(), network.FormatSliceOfPointersToString(epInfos), err)) - // Parse Pod arguments. - k8sPodName, k8sNamespace, err := plugin.getPodInfo(args.Args) - if err != nil { - return err - } + operationTimeMs := time.Since(startTime).Milliseconds() + telemetryClient.SendMetric(telemetry.CNIAddTimeMetricStr, float64(operationTimeMs), make(map[string]string)) + }() - plugin.report.ContainerName = k8sPodName + ":" + k8sNamespace + ipamAddResult = IPAMAddResult{interfaceInfo: make(map[string]network.InterfaceInfo)} k8sContainerID := args.ContainerID if len(k8sContainerID) == 0 { @@ -538,7 +497,7 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { // triggered only in swift v1 multitenancy // dual nic multitenancy -> two interface infos // multitenancy (swift v1) -> one interface info - plugin.report.Context = "AzureCNIMultitenancy" + telemetryClient.Settings().Context = "AzureCNIMultitenancy" plugin.multitenancyClient.Init(cnsClient, AzureNetIOShim{}) // Temporary if block to determining whether we disable SNAT on host (for multi-tenant scenario only) @@ -580,15 +539,9 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { if err != nil { return fmt.Errorf("IPAM Invoker Add failed with error: %w", err) } - - // TODO: This proably needs to be changed as we return all interfaces... - // sendEvent(plugin, fmt.Sprintf("Allocated IPAddress from ipam DefaultInterface: %+v, SecondaryInterfaces: %+v", ipamAddResult.interfaceInfo[ifIndex], ipamAddResult.interfaceInfo)) } policies := cni.GetPoliciesFromNwCfg(nwCfg.AdditionalArgs) - // moved to addIpamInvoker - // sendEvent(plugin, fmt.Sprintf("Allocated IPAddress from ipam interface: %+v", ipamAddResult.PrettyString())) - defer func() { //nolint:gocritic if err != nil { // for swift v1 multi-tenancies scenario, CNI is not supposed to invoke CNS for cleaning Ips @@ -606,8 +559,10 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { infraSeen := false endpointIndex := 1 + for key := range ipamAddResult.interfaceInfo { ifInfo := ipamAddResult.interfaceInfo[key] + logger.Info("Processing interfaceInfo:", zap.Any("ifInfo", ifInfo)) natInfo := getNATInfo(nwCfg, options[network.SNATIPKey], enableSnatForDNS) networkID, _ := plugin.getNetworkID(args.Netns, &ifInfo, nwCfg) @@ -674,8 +629,6 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { if err != nil { return errors.Wrap(err, "failed to create endpoint") // behavior can change if you don't assign to err prior to returning } - // telemetry added - sendEvent(plugin, fmt.Sprintf("CNI ADD Process succeeded for interfaces: %v", ipamAddResult.PrettyString())) return nil } @@ -683,11 +636,13 @@ func (plugin *NetPlugin) findMasterInterface(opt *createEpInfoOpt) string { switch opt.ifInfo.NICType { case cns.InfraNIC: return plugin.findMasterInterfaceBySubnet(opt.ipamAddConfig.nwCfg, &opt.ifInfo.HostSubnetPrefix) - case cns.NodeNetworkInterfaceFrontendNIC, cns.NodeNetworkInterfaceAccelnetFrontendNIC: + case cns.NodeNetworkInterfaceFrontendNIC: return plugin.findInterfaceByMAC(opt.ifInfo.MacAddress.String()) - case cns.BackendNIC: // TODO: how to find interface with IB NIC by mac address - opt.ifInfo.Name = ibInterfacePrefix + strconv.Itoa(opt.endpointIndex) - return opt.ifInfo.Name + case cns.BackendNIC: + // if windows swiftv2 has right network drivers, there will be an NDIS interface while the VFs are mounted + // when the VF is dismounted, this interface will go away + // return an unique interface name to containerd + return ibInterfacePrefix + strconv.Itoa(opt.endpointIndex) default: return "" } @@ -772,7 +727,7 @@ func (plugin *NetPlugin) createEpInfo(opt *createEpInfoOpt) (*network.EndpointIn BridgeName: opt.ipamAddConfig.nwCfg.Bridge, NetworkPolicies: networkPolicies, // nw and ep policies separated to avoid possible conflicts NetNs: opt.ipamAddConfig.args.Netns, - Options: opt.ipamAddConfig.options, + Options: opt.ipamAddConfig.shallowCopyIpamAddConfigOptions(), DisableHairpinOnHostInterface: opt.ipamAddConfig.nwCfg.DisableHairpinOnHostInterface, IsIPv6Enabled: opt.ipv6Enabled, // present infra only @@ -826,6 +781,10 @@ func (plugin *NetPlugin) createEpInfo(opt *createEpInfoOpt) (*network.EndpointIn // create endpoint policies by appending to network policies // the value passed into NetworkPolicies should be unaffected since we reassign here opt.policies = append(opt.policies, endpointPolicies...) + + // appends endpoint policies specific to this interface + opt.policies = append(opt.policies, opt.ifInfo.EndpointPolicies...) + endpointInfo.EndpointPolicies = opt.policies // add even more endpoint policies epPolicies, err := getPoliciesFromRuntimeCfg(opt.nwCfg, opt.ipamAddResult.ipv6Enabled) // not specific to delegated or infra @@ -933,7 +892,7 @@ func (plugin *NetPlugin) Get(args *cniSkel.CmdArgs) error { } logger.Info("GET command completed", zap.Any("result", result), - zap.Error(err)) + zap.Error(log.NewErrorWithoutStackTrace(err))) }() // Parse network configuration from stdin. @@ -1004,11 +963,8 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { k8sNamespace string networkID string nwInfo network.EndpointInfo - cniMetric telemetry.AIMetric ) - startTime := time.Now() - logger.Info("Processing DEL command", zap.String("containerId", args.ContainerID), zap.String("netNS", args.Netns), @@ -1016,13 +972,14 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { zap.Any("args", args.Args), zap.String("path", args.Path), zap.ByteString("stdinData", args.StdinData)) - sendEvent(plugin, fmt.Sprintf("[cni-net] Processing DEL command with args {ContainerID:%v Netns:%v IfName:%v Args:%v Path:%v, StdinData:%s}.", - args.ContainerID, args.Netns, args.IfName, args.Args, args.Path, args.StdinData)) defer func() { logger.Info("DEL command completed", zap.String("pod", k8sPodName), - zap.Error(err)) + zap.Error(log.NewErrorWithoutStackTrace(err))) + telemetryClient.SendEvent(fmt.Sprintf("DEL command completed: [podname]: %s [namespace]: %s [error]: %v", k8sPodName, k8sNamespace, err)) + operationTimeMs := time.Since(startTime).Milliseconds() + telemetryClient.SendMetric(telemetry.CNIDelTimeMetricStr, float64(operationTimeMs), make(map[string]string)) }() // Parse network configuration from stdin. @@ -1040,30 +997,18 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { if k8sPodName, k8sNamespace, err = plugin.getPodInfo(args.Args); err != nil { logger.Error("Failed to get POD info", zap.Error(err)) } + telemetryClient.Settings().ContainerName = k8sPodName + ":" + k8sNamespace - plugin.setCNIReportDetails(nwCfg, CNI_DEL, "") - plugin.report.ContainerName = k8sPodName + ":" + k8sNamespace + plugin.setCNIReportDetails(args.ContainerID, CNI_DEL, "") + telemetryClient.SendEvent(fmt.Sprintf("[cni-net] Processing DEL command with args {ContainerID:%v Netns:%v IfName:%v Args:%v Path:%v, StdinData:%s}.", + args.ContainerID, args.Netns, args.IfName, args.Args, args.Path, args.StdinData)) iptables.DisableIPTableLock = nwCfg.DisableIPTableLock - sendMetricFunc := func() { - operationTimeMs := time.Since(startTime).Milliseconds() - cniMetric.Metric = aitelemetry.Metric{ - Name: telemetry.CNIDelTimeMetricStr, - Value: float64(operationTimeMs), - AppVersion: plugin.Version, - CustomDimensions: make(map[string]string), - } - SetCustomDimensions(&cniMetric, nwCfg, err) - telemetry.SendCNIMetric(&cniMetric, plugin.tb) - } - platformInit(nwCfg) logger.Info("Execution mode", zap.String("mode", nwCfg.ExecutionMode)) if nwCfg.ExecutionMode == string(util.Baremetal) { - // schedule send metric before attempting delete - defer sendMetricFunc() _, err = plugin.nnsClient.DeleteContainerNetworking(context.Background(), k8sPodName, args.Netns) if err != nil { return fmt.Errorf("nnsClient.DeleteContainerNetworking failed with err %w", err) @@ -1125,21 +1070,40 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { // network ID is passed in and used only for migration // otherwise, in stateless, we don't need the network id for deletion epInfos, err = plugin.nm.GetEndpointState(networkID, args.ContainerID) + // if stateless CNI fail to get the endpoint from CNS for any reason other than Endpoint Not found + if err != nil { + if errors.Is(err, network.ErrConnectionFailure) { + logger.Info("failed to connect to CNS", zap.String("containerID", args.ContainerID), zap.Error(err)) + addErr := fsnotify.AddFile(args.ContainerID, args.ContainerID, watcherPath) + logger.Info("add containerid file for Asynch delete", zap.String("containerID", args.ContainerID), zap.Error(addErr)) + if addErr != nil { + logger.Error("failed to add file to watcher", zap.String("containerID", args.ContainerID), zap.Error(addErr)) + return errors.Wrap(addErr, fmt.Sprintf("failed to add file to watcher with containerID %s", args.ContainerID)) + } + return nil + } + if errors.Is(err, network.ErrEndpointStateNotFound) { + logger.Info("Endpoint Not found", zap.String("containerID", args.ContainerID), zap.Error(err)) + return nil + } + logger.Error("Get Endpoint State API returned error", zap.String("containerID", args.ContainerID), zap.Error(err)) + return plugin.RetriableError(fmt.Errorf("failed to delete endpoint: %w", err)) + } } else { epInfos = plugin.nm.GetEndpointInfosFromContainerID(args.ContainerID) } // for when the endpoint is not created, but the ips are already allocated (only works if single network, single infra) - // stateless cni won't have this issue + // this block is not applied to stateless CNI if len(epInfos) == 0 { endpointID := plugin.nm.GetEndpointID(args.ContainerID, args.IfName) if !nwCfg.MultiTenancy { - logger.Error("Failed to query endpoint", + logger.Warn("Could not query endpoint", zap.String("endpoint", endpointID), zap.Error(err)) - logger.Error("Release ip by ContainerID (endpoint not found)", + + logger.Warn("Release ip by ContainerID (endpoint not found)", zap.String("containerID", args.ContainerID)) - sendEvent(plugin, fmt.Sprintf("Release ip by ContainerID (endpoint not found):%v", args.ContainerID)) if err = plugin.ipamInvoker.Delete(nil, nwCfg, args, nwInfo.Options); err != nil { return plugin.RetriableError(fmt.Errorf("failed to release address(no endpoint): %w", err)) } @@ -1164,18 +1128,16 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { logger.Info("Deleting the endpoints from the ipam") // delete endpoint state in cns and in statefile for _, epInfo := range epInfos { - // schedule send metric before attempting delete - defer sendMetricFunc() //nolint:gocritic logger.Info("Deleting endpoint", zap.String("endpointID", epInfo.EndpointID)) - sendEvent(plugin, fmt.Sprintf("Deleting endpoint:%v", epInfo.EndpointID)) + telemetryClient.SendEvent("Deleting endpoint: " + epInfo.EndpointID) if !nwCfg.MultiTenancy && (epInfo.NICType == cns.InfraNIC || epInfo.NICType == "") { // Delegated/secondary nic ips are statically allocated so we don't need to release // Call into IPAM plugin to release the endpoint's addresses. for i := range epInfo.IPAddresses { logger.Info("Release ip", zap.String("ip", epInfo.IPAddresses[i].IP.String())) - sendEvent(plugin, fmt.Sprintf("Release ip:%s", epInfo.IPAddresses[i].IP.String())) + telemetryClient.SendEvent(fmt.Sprintf("Release ip: %s container id: %s endpoint id: %s", epInfo.IPAddresses[i].IP.String(), args.ContainerID, epInfo.EndpointID)) err = plugin.ipamInvoker.Delete(&epInfo.IPAddresses[i], nwCfg, args, nwInfo.Options) if err != nil { return plugin.RetriableError(fmt.Errorf("failed to release address: %w", err)) @@ -1195,7 +1157,6 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { if err != nil { return plugin.RetriableError(fmt.Errorf("failed to save state: %w", err)) } - sendEvent(plugin, fmt.Sprintf("CNI DEL succeeded : Released ip %+v podname %v namespace %v", nwCfg.IPAM.Address, k8sPodName, k8sNamespace)) return err } @@ -1211,11 +1172,8 @@ func (plugin *NetPlugin) Update(args *cniSkel.CmdArgs) error { podCfg *cni.K8SPodEnvArgs orchestratorContext []byte targetNetworkConfig *cns.GetNetworkContainerResponse - cniMetric telemetry.AIMetric ) - startTime := time.Now() - logger.Info("Processing UPDATE command", zap.String("netns", args.Netns), zap.String("args", args.Args), @@ -1235,19 +1193,9 @@ func (plugin *NetPlugin) Update(args *cniSkel.CmdArgs) error { logger.Info("Read network configuration", zap.Any("config", nwCfg)) iptables.DisableIPTableLock = nwCfg.DisableIPTableLock - plugin.setCNIReportDetails(nwCfg, CNI_UPDATE, "") + plugin.setCNIReportDetails(args.ContainerID, CNI_UPDATE, "") defer func() { - operationTimeMs := time.Since(startTime).Milliseconds() - cniMetric.Metric = aitelemetry.Metric{ - Name: telemetry.CNIUpdateTimeMetricStr, - Value: float64(operationTimeMs), - AppVersion: plugin.Version, - CustomDimensions: make(map[string]string), - } - SetCustomDimensions(&cniMetric, nwCfg, err) - telemetry.SendCNIMetric(&cniMetric, plugin.tb) - if result == nil { result = &cniTypesCurr.Result{} } @@ -1266,7 +1214,7 @@ func (plugin *NetPlugin) Update(args *cniSkel.CmdArgs) error { logger.Info("UPDATE command completed", zap.Any("result", result), - zap.Error(err)) + zap.Error(log.NewErrorWithoutStackTrace(err))) }() // Parse Pod arguments. @@ -1394,7 +1342,7 @@ func (plugin *NetPlugin) Update(args *cniSkel.CmdArgs) error { } msg := fmt.Sprintf("CNI UPDATE succeeded : Updated %+v podname %v namespace %v", targetNetworkConfig, k8sPodName, k8sNamespace) - plugin.setCNIReportDetails(nwCfg, CNI_UPDATE, msg) + plugin.setCNIReportDetails(args.ContainerID, CNI_UPDATE, msg) return nil } diff --git a/cni/network/network_linux.go b/cni/network/network_linux.go index 95c145e42b..2b090523c0 100644 --- a/cni/network/network_linux.go +++ b/cni/network/network_linux.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-container-networking/network" "github.com/Azure/azure-container-networking/network/policy" cniTypesCurr "github.com/containernetworking/cni/pkg/types/100" + "go.uber.org/zap" ) const ( @@ -37,13 +38,15 @@ func addSnatForDNS(gwIPString string, epInfo *network.EndpointInfo, result *netw func setNetworkOptions(cnsNwConfig *cns.GetNetworkContainerResponse, nwInfo *network.EndpointInfo) { if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 { logger.Info("Setting Network Options") - vlanMap := make(map[string]interface{}) - vlanMap[network.VlanIDKey] = strconv.Itoa(cnsNwConfig.MultiTenancyInfo.ID) - vlanMap[network.SnatBridgeIPKey] = cnsNwConfig.LocalIPConfiguration.GatewayIPAddress + "/" + strconv.Itoa(int(cnsNwConfig.LocalIPConfiguration.IPSubnet.PrefixLength)) - nwInfo.Options[dockerNetworkOption] = vlanMap + optionsMap := make(map[string]interface{}) + optionsMap[network.VlanIDKey] = strconv.Itoa(cnsNwConfig.MultiTenancyInfo.ID) + optionsMap[network.SnatBridgeIPKey] = cnsNwConfig.LocalIPConfiguration.GatewayIPAddress + "/" + strconv.Itoa(int(cnsNwConfig.LocalIPConfiguration.IPSubnet.PrefixLength)) + logger.Info("Add vlanIDkey and SnatBridgeIPKey to optionsMap", zap.String("vlanIDKey", network.VlanIDKey), zap.String("SnatBridgeIPKey", network.SnatBridgeIPKey)) + nwInfo.Options[dockerNetworkOption] = optionsMap } } +// update epInfo data field, allow host to nc, allow nc to host, and network container id func setEndpointOptions(cnsNwConfig *cns.GetNetworkContainerResponse, epInfo *network.EndpointInfo, vethName string) { if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 { logger.Info("Setting Endpoint Options") diff --git a/cni/network/network_linux_test.go b/cni/network/network_linux_test.go index b0fda74548..97304569b3 100644 --- a/cni/network/network_linux_test.go +++ b/cni/network/network_linux_test.go @@ -4,10 +4,17 @@ package network import ( + "fmt" + "net" + "regexp" "testing" + "github.com/Azure/azure-container-networking/cni" "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/network" + "github.com/Azure/azure-container-networking/platform" + cniSkel "github.com/containernetworking/cni/pkg/skel" + "github.com/containernetworking/cni/pkg/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -160,3 +167,383 @@ func TestAddSnatForDns(t *testing.T) { }) } } + +// linux swiftv2 example +func GetTestCNSResponseSecondaryLinux(macAddress string) map[string]network.InterfaceInfo { + parsedMAC, _ := net.ParseMAC(macAddress) + return map[string]network.InterfaceInfo{ + string(cns.InfraNIC): { + IPConfigs: []*network.IPConfig{ + { + Address: *getCIDRNotationForAddress("20.241.0.35/16"), + Gateway: net.ParseIP("20.241.0.35"), // actual scenario doesn't have a gateway + }, + }, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("169.254.2.1/16"), + Gw: net.ParseIP("10.244.2.1"), + }, + { + Dst: *getCIDRNotationForAddress("0.0.0.0/32"), + Gw: net.ParseIP("169.254.2.1"), + }, + }, + NICType: cns.InfraNIC, + SkipDefaultRoutes: true, + HostSubnetPrefix: *getCIDRNotationForAddress("10.224.0.0/16"), + }, + macAddress: { + MacAddress: parsedMAC, + IPConfigs: []*network.IPConfig{ + { + Address: *getCIDRNotationForAddress("10.241.0.35/32"), + Gateway: net.ParseIP("10.241.0.35"), // actual scenario doesn't have a gateway + }, + }, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("169.254.2.1/32"), + Gw: net.ParseIP("10.244.2.1"), + }, + { + Dst: *getCIDRNotationForAddress("0.0.0.0/0"), + Gw: net.ParseIP("169.254.2.1"), + }, + }, + NICType: cns.NodeNetworkInterfaceFrontendNIC, + SkipDefaultRoutes: false, + }, + } +} + +// Happy path scenario for add and delete +func TestPluginLinuxAdd(t *testing.T) { + resources := GetTestResources() + mulNwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "mulnet", + MultiTenancy: true, + EnableExactMatchForPodName: true, + Master: "eth0", + } + nwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "net", + MultiTenancy: false, + EnableExactMatchForPodName: true, + // test auto finding master interface + DNS: types.DNS{ + Nameservers: []string{ + "ns1", "ns2", + }, + Domain: "myDomain", + }, + } + macAddress := "60:45:bd76:f6:44" + parsedMACAddress, _ := net.ParseMAC(macAddress) + type endpointEntry struct { + epInfo *network.EndpointInfo + epIDRegex string + } + + tests := []struct { + name string + plugin *NetPlugin + args *cniSkel.CmdArgs + want []endpointEntry + match func(*network.EndpointInfo, *network.EndpointInfo) bool + }{ + { + // in swiftv1 linux multitenancy, we only get 1 response from cns at a time + name: "Add Happy Path Swiftv1 Multitenancy", + plugin: &NetPlugin{ + Plugin: resources.Plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + multitenancyClient: NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse3()}), + }, + args: &cniSkel.CmdArgs{ + StdinData: mulNwCfg.Serialize(), + ContainerID: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + match: func(ei1, ei2 *network.EndpointInfo) bool { + return ei1.NetworkContainerID == ei2.NetworkContainerID + }, + want: []endpointEntry{ + // should match with GetTestCNSResponse3 + { + epInfo: &network.EndpointInfo{ + ContainerID: "test-container", + Data: map[string]interface{}{ + "VlanID": 1, // Vlan ID used here + "localIP": "168.254.0.4/17", + "snatBridgeIP": "168.254.0.1/17", + "vethname": "mulnettest-containereth0", + }, + Routes: []network.RouteInfo{ + { + Dst: *parseCIDR("192.168.0.4/24"), + Gw: net.ParseIP("192.168.0.1"), + // interface to use is NOT propagated to ep info + }, + }, + AllowInboundFromHostToNC: true, + EnableSnatOnHost: true, + EnableMultiTenancy: true, + EnableSnatForDns: true, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.InfraNIC, + MasterIfName: eth0IfName, + NetworkContainerID: "Swift_74b34111-6e92-49ee-a82a-8881c850ce0e", + NetworkID: "mulnet", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "20.240.0.0/24", + Options: map[string]interface{}{ + dockerNetworkOption: map[string]interface{}{ + "VlanID": "1", // doesn't seem to be used in linux + "snatBridgeIP": "168.254.0.1/17", + }, + }, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("20.0.0.10"), + Mask: getIPNetWithString("20.0.0.10/24").Mask, + }, + }, + NATInfo: nil, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + // matches cns ip configuration (20.0.0.1/24 == 20.0.0.0/24) + Prefix: *getIPNetWithString("20.0.0.0/24"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("20.0.0.1"), + }, + }, + }, + epIDRegex: `test-con-eth0`, + }, + }, + }, + { + // Based on a live swiftv2 linux cluster's cns invoker response + name: "Add Happy Path Swiftv2", + plugin: &NetPlugin{ + Plugin: resources.Plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + ipamInvoker: &MockIpamInvoker{ + add: func(opt IPAMAddConfig) (ipamAddResult IPAMAddResult, err error) { + ipamAddResult = IPAMAddResult{interfaceInfo: make(map[string]network.InterfaceInfo)} + ipamAddResult.interfaceInfo = GetTestCNSResponseSecondaryLinux(macAddress) + opt.options["testflag"] = "copy" + return ipamAddResult, nil + }, + ipMap: make(map[string]bool), + }, + netClient: &InterfaceGetterMock{ + // used in secondary find master interface + interfaces: []net.Interface{ + { + Name: "secondary", + HardwareAddr: parsedMACAddress, + }, + { + Name: "primary", + HardwareAddr: net.HardwareAddr{}, + }, + }, + // used in primary find master interface + interfaceAddrs: map[string][]net.Addr{ + "primary": { + // match with the host subnet prefix to know that this ip belongs to the host + getCIDRNotationForAddress("10.224.0.0/16"), + }, + }, + }, + }, + args: &cniSkel.CmdArgs{ + StdinData: nwCfg.Serialize(), + ContainerID: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + match: func(ei1, ei2 *network.EndpointInfo) bool { + return ei1.NICType == ei2.NICType + }, + want: []endpointEntry{ + // should match infra + { + epInfo: &network.EndpointInfo{ + ContainerID: "test-container", + Data: map[string]interface{}{ + "vethname": "nettest-containereth0", + }, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("169.254.2.1/16"), + Gw: net.ParseIP("10.244.2.1"), + }, + { + Dst: *getCIDRNotationForAddress("0.0.0.0/32"), + Gw: net.ParseIP("169.254.2.1"), + }, + }, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.InfraNIC, + SkipDefaultRoutes: true, + MasterIfName: "primary", + NetworkID: "net", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "10.224.0.0/16", + EndpointDNS: network.DNSInfo{ + Servers: []string{ + "ns1", "ns2", + }, + Suffix: "myDomain", + }, + Options: map[string]interface{}{ + "testflag": "copy", + }, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("20.241.0.35"), + Mask: getIPNetWithString("20.241.0.35/16").Mask, + }, + }, + NATInfo: nil, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + // matches cns ip configuration (20.241.0.0/16 == 20.241.0.35/16) + Prefix: *getIPNetWithString("20.241.0.0/16"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("20.241.0.35"), + }, + }, + }, + epIDRegex: `.*`, + }, + // should match secondary + { + epInfo: &network.EndpointInfo{ + MacAddress: parsedMACAddress, + ContainerID: "test-container", + Data: map[string]interface{}{ + "vethname": "nettest-containereth0", + }, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("169.254.2.1/32"), + Gw: net.ParseIP("10.244.2.1"), + }, + { + Dst: *getCIDRNotationForAddress("0.0.0.0/0"), + Gw: net.ParseIP("169.254.2.1"), + }, + }, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.NodeNetworkInterfaceFrontendNIC, + SkipDefaultRoutes: false, + MasterIfName: "secondary", + NetworkID: "net", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "", + EndpointDNS: network.DNSInfo{ + Servers: []string{ + "ns1", "ns2", + }, + Suffix: "myDomain", + }, + Options: map[string]interface{}{ + "testflag": "copy", + }, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("10.241.0.35"), + Mask: getIPNetWithString("10.241.0.35/32").Mask, + }, + }, + NATInfo: nil, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + Prefix: *getIPNetWithString("10.241.0.35/32"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("10.241.0.35"), + }, + }, + }, + epIDRegex: `.*`, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Add(tt.args) + require.NoError(t, err) + allEndpoints, _ := tt.plugin.nm.GetAllEndpoints("") + require.Len(t, allEndpoints, len(tt.want)) + + // compare contents + for _, wantedEndpointEntry := range tt.want { + epID := "none" + for _, endpointInfo := range allEndpoints { + if !tt.match(wantedEndpointEntry.epInfo, endpointInfo) { + continue + } + // save the endpoint id before removing it + epID = endpointInfo.EndpointID + require.Regexp(t, regexp.MustCompile(wantedEndpointEntry.epIDRegex), epID) + + // omit endpoint id and ifname fields as they are nondeterministic + endpointInfo.EndpointID = "" + endpointInfo.IfName = "" + + require.Equal(t, wantedEndpointEntry.epInfo, endpointInfo) + } + if epID == "none" { + t.Fail() + } + err = tt.plugin.nm.DeleteEndpoint("", epID, nil) + require.NoError(t, err) + } + + // confirm separate entities + // that is, if one is modified, the other should not be modified + epInfos := []*network.EndpointInfo{} + for _, val := range allEndpoints { + epInfos = append(epInfos, val) + } + if len(epInfos) > 1 { + epInfo1 := epInfos[0] + epInfo2 := epInfos[1] + epInfo1.Data["dummy"] = "dummy value" + epInfo1.Options["dummy"] = "another dummy value" + require.NotEqual(t, epInfo1.Data, epInfo2.Data) + require.NotEqual(t, epInfo1.Options, epInfo2.Options) + } + + // ensure deleted + require.Empty(t, allEndpoints) + }) + } +} diff --git a/cni/network/network_test.go b/cni/network/network_test.go index 430fd6ea57..bb123ede04 100644 --- a/cni/network/network_test.go +++ b/cni/network/network_test.go @@ -5,6 +5,7 @@ import ( "net" "os" "runtime" + "strconv" "testing" "github.com/Azure/azure-container-networking/cni" @@ -16,7 +17,6 @@ import ( "github.com/Azure/azure-container-networking/network/networkutils" "github.com/Azure/azure-container-networking/network/policy" "github.com/Azure/azure-container-networking/nns" - "github.com/Azure/azure-container-networking/telemetry" cniSkel "github.com/containernetworking/cni/pkg/skel" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -75,10 +75,9 @@ func GetTestResources() *NetPlugin { config := &common.PluginConfig{} grpcClient := &nns.MockGrpcClient{} plugin, _ := NewPlugin(pluginName, config, grpcClient, &Multitenancy{}) - plugin.report = &telemetry.CNIReport{} mockNetworkManager := acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)) plugin.nm = mockNetworkManager - plugin.ipamInvoker = NewMockIpamInvoker(isIPv6, false, false, false, false, false, false) + plugin.ipamInvoker = NewMockIpamInvoker(isIPv6, false, false, false, false) return plugin } @@ -394,9 +393,9 @@ func TestIpamAddFail(t *testing.T) { for i, method := range tt.methods { fmt.Println("method", method, "wanterr", tt.wantErr[i]) if tt.wantErr[i] { - plugin.ipamInvoker = NewMockIpamInvoker(false, true, false, false, false, false, false) + plugin.ipamInvoker = NewMockIpamInvoker(false, true, false, false, false) } else { - plugin.ipamInvoker = NewMockIpamInvoker(false, false, false, false, false, false, false) + plugin.ipamInvoker = NewMockIpamInvoker(false, false, false, false, false) } if tt.wantEndpointErr { @@ -459,7 +458,7 @@ func TestIpamDeleteFail(t *testing.T) { err := plugin.Add(tt.args) require.NoError(t, err) - plugin.ipamInvoker = NewMockIpamInvoker(false, true, false, false, false, false, false) + plugin.ipamInvoker = NewMockIpamInvoker(false, true, false, false, false) err = plugin.Delete(args) if tt.wantErr { require.Error(t, err) @@ -489,9 +488,7 @@ func TestAddDualStack(t *testing.T) { plugin: &NetPlugin{ Plugin: cniPlugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(true, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(true, false, false, false, false), }, wantErr: false, }, @@ -500,9 +497,7 @@ func TestAddDualStack(t *testing.T) { plugin: &NetPlugin{ Plugin: cniPlugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(true, false, true, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(true, false, true, false, false), }, wantErr: true, }, @@ -546,9 +541,7 @@ func TestPluginGet(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, wantErr: false, }, @@ -558,9 +551,7 @@ func TestPluginGet(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, wantErr: true, wantErrMsg: "Network not found", @@ -571,9 +562,7 @@ func TestPluginGet(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, wantErr: true, wantErrMsg: "Endpoint not found", @@ -609,6 +598,121 @@ func TestPluginGet(t *testing.T) { /* Multitenancy scenarios */ +// For use with GetNetworkContainer +func GetTestCNSResponse0() *cns.GetNetworkContainerResponse { + return &cns.GetNetworkContainerResponse{ + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "192.168.0.4", + PrefixLength: ipPrefixLen, + }, + GatewayIPAddress: "192.168.0.1", + }, + LocalIPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "169.254.0.4", + PrefixLength: localIPPrefixLen, + }, + GatewayIPAddress: "169.254.0.1", + }, + + PrimaryInterfaceIdentifier: "10.240.0.4/24", + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: cns.Vlan, + ID: 1, + }, + } +} + +// For use with GetAllNetworkContainers +func GetTestCNSResponse1() *cns.GetNetworkContainerResponse { + return &cns.GetNetworkContainerResponse{ + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "20.0.0.10", + PrefixLength: ipPrefixLen, + }, + GatewayIPAddress: "20.0.0.1", + }, + LocalIPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "168.254.0.4", + PrefixLength: localIPPrefixLen, + }, + GatewayIPAddress: "168.254.0.1", + }, + + PrimaryInterfaceIdentifier: "20.240.0.4/24", + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: cns.Vlan, + ID: multiTenancyVlan1, + }, + } +} + +// For use with GetAllNetworkContainers in windows dualnic +func GetTestCNSResponse2() *cns.GetNetworkContainerResponse { + return &cns.GetNetworkContainerResponse{ + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "10.0.0.10", + PrefixLength: ipPrefixLen, + }, + GatewayIPAddress: "10.0.0.1", + }, + LocalIPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "169.254.0.4", + PrefixLength: localIPPrefixLen, + }, + GatewayIPAddress: "169.254.0.1", + }, + + PrimaryInterfaceIdentifier: "10.240.0.4/24", + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: cns.Vlan, + ID: multiTenancyVlan2, + }, + } +} + +// For use with GetAllNetworkContainers in linux multitenancy +func GetTestCNSResponse3() *cns.GetNetworkContainerResponse { + return &cns.GetNetworkContainerResponse{ + NetworkContainerID: "Swift_74b34111-6e92-49ee-a82a-8881c850ce0e", + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "20.0.0.10", + PrefixLength: ipPrefixLen, + }, + DNSServers: []string{ + "168.63.129.16", + }, + GatewayIPAddress: "20.0.0.1", + }, + Routes: []cns.Route{ + // dummy route + { + IPAddress: "192.168.0.4/24", + GatewayIPAddress: "192.168.0.1", + }, + }, + MultiTenancyInfo: cns.MultiTenancyInfo{ + EncapType: cns.Vlan, + ID: multiTenancyVlan1, + }, + PrimaryInterfaceIdentifier: "20.240.0.4/24", + LocalIPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "168.254.0.4", + PrefixLength: localIPPrefixLen, + }, + GatewayIPAddress: "168.254.0.1", + }, + AllowHostToNCCommunication: true, + AllowNCToHostCommunication: false, + } +} // Test Multitenancy Add func TestPluginMultitenancyAdd(t *testing.T) { @@ -634,15 +738,13 @@ func TestPluginMultitenancyAdd(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - tb: &telemetry.TelemetryBuffer{}, - report: &telemetry.CNIReport{}, - multitenancyClient: NewMockMultitenancy(false), + multitenancyClient: NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1()}), }, args: &cniSkel.CmdArgs{ StdinData: localNwCfg.Serialize(), ContainerID: "test-container", - Netns: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), IfName: eth0IfName, }, @@ -653,9 +755,7 @@ func TestPluginMultitenancyAdd(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - tb: &telemetry.TelemetryBuffer{}, - report: &telemetry.CNIReport{}, - multitenancyClient: NewMockMultitenancy(true), + multitenancyClient: NewMockMultitenancy(true, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1()}), }, args: &cniSkel.CmdArgs{ StdinData: localNwCfg.Serialize(), @@ -679,6 +779,7 @@ func TestPluginMultitenancyAdd(t *testing.T) { } else { require.NoError(t, err) endpoints, _ := tt.plugin.nm.GetAllEndpoints(localNwCfg.Name) + require.Condition(t, assert.Comparison(func() bool { return len(endpoints) == 1 })) } }) @@ -687,7 +788,7 @@ func TestPluginMultitenancyAdd(t *testing.T) { func TestPluginMultitenancyDelete(t *testing.T) { plugin := GetTestResources() - plugin.multitenancyClient = NewMockMultitenancy(false) + plugin.multitenancyClient = NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1()}) localNwCfg := cni.NetworkConfig{ CNIVersion: "0.3.0", Name: "mulnet", @@ -788,8 +889,6 @@ func TestPluginBaremetalAdd(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - tb: &telemetry.TelemetryBuffer{}, - report: &telemetry.CNIReport{}, nnsClient: &nns.MockGrpcClient{}, }, args: &cniSkel.CmdArgs{ @@ -806,8 +905,6 @@ func TestPluginBaremetalAdd(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - tb: &telemetry.TelemetryBuffer{}, - report: &telemetry.CNIReport{}, nnsClient: &nns.MockGrpcClient{Fail: true}, }, args: &cniSkel.CmdArgs{ @@ -1157,8 +1254,9 @@ func TestGetPodSubnetNatInfo(t *testing.T) { } type InterfaceGetterMock struct { - interfaces []net.Interface - err error + interfaces []net.Interface + interfaceAddrs map[string][]net.Addr // key is interfaceName, value is one interface's CIDRs(IPs+Masks) + err error } func (n *InterfaceGetterMock) GetNetworkInterfaces() ([]net.Interface, error) { @@ -1168,6 +1266,21 @@ func (n *InterfaceGetterMock) GetNetworkInterfaces() ([]net.Interface, error) { return n.interfaces, nil } +func (n *InterfaceGetterMock) GetNetworkInterfaceAddrs(iface *net.Interface) ([]net.Addr, error) { + if n.err != nil { + return nil, n.err + } + + // actual net.Addr invokes syscall; here just create a mocked net.Addr{} + netAddrs := []net.Addr{} + for _, intf := range n.interfaces { + if iface.Name == intf.Name { + return n.interfaceAddrs[iface.Name], nil + } + } + return netAddrs, nil +} + func TestPluginSwiftV2Add(t *testing.T) { plugin, _ := cni.NewPlugin("name", "0.3.0") @@ -1199,9 +1312,7 @@ func TestPluginSwiftV2Add(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, false, true, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, false), netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1216,9 +1327,7 @@ func TestPluginSwiftV2Add(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, true, true, true), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, true), netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1240,9 +1349,7 @@ func TestPluginSwiftV2Add(t *testing.T) { return nil })), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, false), netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1253,54 +1360,12 @@ func TestPluginSwiftV2Add(t *testing.T) { wantErr: true, wantErrMsg: "failed to create endpoint: MockEndpointClient Error : AddEndpoints Delegated VM NIC failed", }, - { - name: "SwiftV2 EndpointClient Add fail with AccelnetNIC", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(func(ep *acnnetwork.EndpointInfo) error { - if ep.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { - return acnnetwork.NewErrorMockEndpointClient("AddEndpoints Accelnet VM NIC failed") //nolint:wrapcheck // ignore wrapping for test - } - - return nil - })), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, true, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantErr: true, - wantErrMsg: "failed to create endpoint: MockEndpointClient Error : AddEndpoints Accelnet VM NIC failed", - }, { name: "SwiftV2 Find Interface By MAC Address Fail with delegated VM NIC", plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{}, - }, - }, - args: args, - wantErr: true, - wantErrMsg: "Failed to find the master interface", - }, - { - name: "SwiftV2 Find Interface By MAC Address Fail with Accelnet VM NIC", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, true, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, false), netClient: &InterfaceGetterMock{ interfaces: []net.Interface{}, }, @@ -1314,9 +1379,7 @@ func TestPluginSwiftV2Add(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), netClient: &InterfaceGetterMock{ interfaces: []net.Interface{}, }, @@ -1399,33 +1462,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { NICType: cns.NodeNetworkInterfaceFrontendNIC, }, }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantErr: false, - wantNumEps: 2, - }, - { - name: "SwiftV2 Add Delegated and Accelnet", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewCustomMockIpamInvoker(map[string]acnnetwork.InterfaceInfo{ - "eth1": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - "eth0": { - NICType: cns.NodeNetworkInterfaceFrontendNIC, - }, - }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1436,34 +1472,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { wantErr: false, wantNumEps: 2, }, - { - name: "SwiftV2 Add Infra and Delegated and Accelnet", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewCustomMockIpamInvoker(map[string]acnnetwork.InterfaceInfo{ - "eth0": { - NICType: cns.InfraNIC, - }, - "eth1": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - "eth2": { - NICType: cns.NodeNetworkInterfaceFrontendNIC, - }, - }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantErr: false, - wantNumEps: 3, - }, { name: "SwiftV2 Add Infra and InfiniteBand", plugin: &NetPlugin{ @@ -1477,8 +1485,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { NICType: cns.BackendNIC, }, }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1502,33 +1508,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { NICType: cns.NodeNetworkInterfaceFrontendNIC, }, }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantErr: false, - wantNumEps: 2, - }, - { - name: "SwiftV2 Add Two Accelnet", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(nil)), - ipamInvoker: NewCustomMockIpamInvoker(map[string]acnnetwork.InterfaceInfo{ - "eth1": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - "eth2": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1560,8 +1539,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { NICType: cns.NodeNetworkInterfaceFrontendNIC, }, }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, netClient: &InterfaceGetterMock{ interfaces: []net.Interface{ {Name: "eth0"}, @@ -1573,70 +1550,6 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { wantErr: true, wantErrMsg: "failed to create endpoint: MockEndpointClient Error : AddEndpoints Delegated VM NIC failed", }, - { - name: "SwiftV2 Partial Add fail with Accelnet VM NIC", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(func(ep *acnnetwork.EndpointInfo) error { - if ep.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { - return acnnetwork.NewErrorMockEndpointClient("AddEndpoints Accelnet VM NIC failed") //nolint:wrapcheck // ignore wrapping for test - } - - return nil - })), - ipamInvoker: NewCustomMockIpamInvoker(map[string]acnnetwork.InterfaceInfo{ - "eth0": { - NICType: cns.NodeNetworkInterfaceFrontendNIC, - }, - "eth1": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantNumEps: 0, - wantErr: true, - wantErrMsg: "failed to create endpoint: MockEndpointClient Error : AddEndpoints Accelnet VM NIC failed", - }, - { - name: "SwiftV2 Partial Add fail with Infra+Accelnet VM NIC", - plugin: &NetPlugin{ - Plugin: plugin, - nm: acnnetwork.NewMockNetworkmanager(acnnetwork.NewMockEndpointClient(func(ep *acnnetwork.EndpointInfo) error { - if ep.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { - return acnnetwork.NewErrorMockEndpointClient("AddEndpoints Accelnet VM NIC failed") //nolint:wrapcheck // ignore wrapping for test - } - - return nil - })), - ipamInvoker: NewCustomMockIpamInvoker(map[string]acnnetwork.InterfaceInfo{ - "eth0": { - NICType: cns.InfraNIC, - }, - "eth1": { - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - }), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - netClient: &InterfaceGetterMock{ - interfaces: []net.Interface{ - {Name: "eth0"}, - }, - }, - }, - args: args, - wantNumEps: 0, - wantErr: true, - wantErrMsg: "failed to create endpoint: MockEndpointClient Error : AddEndpoints Accelnet VM NIC failed", - }, } for _, tt := range tests { @@ -1668,6 +1581,205 @@ func TestPluginSwiftV2MultipleAddDelete(t *testing.T) { } } +// test findMasterInterface with different NIC types +func TestFindMasterInterface(t *testing.T) { + plugin, _ := cni.NewPlugin("name", "0.3.0") + endpointIndex := 1 + macAddress := "12:34:56:78:90:ab" + + tests := []struct { + name string + endpointOpt createEpInfoOpt + plugin *NetPlugin + nwCfg *cni.NetworkConfig + want string // expected master interface name + wantErr bool + }{ + { + name: "Find master interface by infraNIC with a master interfaceName in swiftv1 path", + plugin: &NetPlugin{ + Plugin: plugin, + netClient: &InterfaceGetterMock{ + interfaces: []net.Interface{ + { + Name: "eth0", + }, + }, + }, + }, + endpointOpt: createEpInfoOpt{ + ipamAddConfig: &IPAMAddConfig{ + nwCfg: &cni.NetworkConfig{ + Master: "eth0", // return this master interface name + }, + }, + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: cns.InfraNIC, + HostSubnetPrefix: net.IPNet{ + IP: net.ParseIP("10.255.0.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + want: "eth0", + wantErr: false, + }, + { + name: "Find master interface by one infraNIC", + plugin: &NetPlugin{ + Plugin: plugin, + netClient: &InterfaceGetterMock{ + interfaces: []net.Interface{ + { + Index: 0, + Name: "eth0", + }, + }, + interfaceAddrs: map[string][]net.Addr{ + "eth0": { + &net.IPNet{ + IP: net.IPv4(10, 255, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + &net.IPNet{ + IP: net.IPv4(192, 168, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + }, + }, + }, + }, + endpointOpt: createEpInfoOpt{ + ipamAddConfig: &IPAMAddConfig{ + nwCfg: &cni.NetworkConfig{ + Master: "", + }, + }, + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: cns.InfraNIC, + HostSubnetPrefix: net.IPNet{ + IP: net.ParseIP("10.255.0.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + want: "eth0", + wantErr: false, + }, + { + name: "Find master interface from multiple infraNIC interfaces", + plugin: &NetPlugin{ + Plugin: plugin, + netClient: &InterfaceGetterMock{ + interfaces: []net.Interface{ + { + Index: 0, + Name: "eth0", + }, + { + Index: 1, + Name: "eth1", + }, + }, + interfaceAddrs: map[string][]net.Addr{ + "eth0": { + &net.IPNet{ + IP: net.IPv4(10, 255, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + &net.IPNet{ + IP: net.IPv4(192, 168, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + }, + "eth1": { + &net.IPNet{ + IP: net.IPv4(20, 255, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + &net.IPNet{ + IP: net.IPv4(30, 255, 0, 1), + Mask: net.IPv4Mask(255, 255, 255, 0), + }, + }, + }, + }, + }, + endpointOpt: createEpInfoOpt{ + ipamAddConfig: &IPAMAddConfig{ + nwCfg: &cni.NetworkConfig{ + Master: "", + }, + }, + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: cns.InfraNIC, + HostSubnetPrefix: net.IPNet{ + IP: net.ParseIP("20.255.0.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + want: "eth1", + wantErr: false, + }, + { + name: "Find master interface by delegatedVMNIC", + plugin: &NetPlugin{ + Plugin: plugin, + netClient: &InterfaceGetterMock{ + interfaces: []net.Interface{ + { + Name: "eth1", + HardwareAddr: net.HardwareAddr(macAddress), + }, + }, + }, + }, + endpointOpt: createEpInfoOpt{ + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: cns.NodeNetworkInterfaceFrontendNIC, + MacAddress: net.HardwareAddr(macAddress), + }, + }, + want: "eth1", + wantErr: false, + }, + { + name: "Find master interface by backend NIC", + endpointOpt: createEpInfoOpt{ + endpointIndex: endpointIndex, + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: cns.BackendNIC, + MacAddress: net.HardwareAddr(macAddress), + }, + }, + want: ibInterfacePrefix + strconv.Itoa(endpointIndex), + wantErr: false, + }, + { + name: "Find master interface by invalid NIC type", + endpointOpt: createEpInfoOpt{ + endpointIndex: endpointIndex, + ifInfo: &acnnetwork.InterfaceInfo{ + NICType: "invalidType", + MacAddress: net.HardwareAddr(macAddress), + }, + }, + want: "", // default interface name is "" + wantErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + masterInterface := tt.plugin.findMasterInterface(&tt.endpointOpt) + t.Logf("masterInterface is %s\n", masterInterface) + require.Equal(t, tt.want, masterInterface) + }) + } +} + func TestValidateArgs(t *testing.T) { p, _ := cni.NewPlugin("name", "0.3.0") plugin := &NetPlugin{ diff --git a/cni/network/network_windows.go b/cni/network/network_windows.go index d78865e5ac..f7d2e5defb 100644 --- a/cni/network/network_windows.go +++ b/cni/network/network_windows.go @@ -41,9 +41,10 @@ func addSnatForDNS(_ string, _ *network.EndpointInfo, _ *network.InterfaceInfo) func setNetworkOptions(cnsNwConfig *cns.GetNetworkContainerResponse, nwInfo *network.EndpointInfo) { if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 { logger.Info("Setting Network Options") - vlanMap := make(map[string]interface{}) - vlanMap[network.VlanIDKey] = strconv.Itoa(cnsNwConfig.MultiTenancyInfo.ID) - nwInfo.Options[dockerNetworkOption] = vlanMap + optionsMap := make(map[string]interface{}) + optionsMap[network.VlanIDKey] = strconv.Itoa(cnsNwConfig.MultiTenancyInfo.ID) + logger.Info("Add vlanIDKey to optionsMap", zap.String("vlanIDKey", network.VlanIDKey)) + nwInfo.Options[dockerNetworkOption] = optionsMap } } @@ -73,8 +74,7 @@ func (plugin *NetPlugin) getNetworkName(netNs string, interfaceInfo *network.Int determineWinVer() // Swiftv2 L1VH Network Name swiftv2NetworkNamePrefix := "azure-" - if interfaceInfo != nil && (interfaceInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC || interfaceInfo.NICType == cns.BackendNIC || - interfaceInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC) { + if interfaceInfo != nil && (interfaceInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC || interfaceInfo.NICType == cns.BackendNIC) { logger.Info("swiftv2", zap.String("network name", interfaceInfo.MacAddress.String())) return swiftv2NetworkNamePrefix + interfaceInfo.MacAddress.String(), nil } diff --git a/cni/network/network_windows_test.go b/cni/network/network_windows_test.go index 5898c0b0c9..8c47d739ca 100644 --- a/cni/network/network_windows_test.go +++ b/cni/network/network_windows_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "net" + "regexp" "testing" "github.com/Azure/azure-container-networking/cni" @@ -14,9 +15,9 @@ import ( "github.com/Azure/azure-container-networking/network" "github.com/Azure/azure-container-networking/network/hnswrapper" "github.com/Azure/azure-container-networking/network/policy" - "github.com/Azure/azure-container-networking/telemetry" + "github.com/Azure/azure-container-networking/platform" hnsv2 "github.com/Microsoft/hcsshim/hcn" - "github.com/containernetworking/cni/pkg/skel" + cniSkel "github.com/containernetworking/cni/pkg/skel" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -69,78 +70,6 @@ func TestAddWithRunTimeNetPolicies(t *testing.T) { } } -func TestPluginSecondAddSamePodWindows(t *testing.T) { - plugin, _ := cni.NewPlugin("name", "0.3.0") - - tests := []struct { - name string - methods []string - cniArgs skel.CmdArgs - plugin *NetPlugin - wantErr bool - wantErrMsg string - }{ - { - name: "CNI consecutive add already hot attached", - methods: []string{"ADD", "ADD"}, - cniArgs: skel.CmdArgs{ - ContainerID: "test1-container", - Netns: "test1-container", - StdinData: nwCfg.Serialize(), - Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "container1", "container1-ns"), - IfName: eth0IfName, - }, - plugin: &NetPlugin{ - Plugin: plugin, - nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - }, - wantErr: false, - }, - { - name: "CNI consecutive add not hot attached", - methods: []string{"ADD", "ADD"}, - cniArgs: skel.CmdArgs{ - ContainerID: "test1-container", - Netns: "test1-container", - StdinData: nwCfg.Serialize(), - Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "container1", "container1-ns"), - IfName: eth0IfName, - }, - plugin: &NetPlugin{ - Plugin: plugin, - nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var err error - for _, method := range tt.methods { - if method == "ADD" { - err = tt.plugin.Add(&tt.cniArgs) - } - } - - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - endpoints, _ := tt.plugin.nm.GetAllEndpoints(nwCfg.Name) - require.Condition(t, assert.Comparison(func() bool { return len(endpoints) == 1 }), "Expected 2 but got %v", len(endpoints)) - } - }) - } -} - func TestSetNetworkOptions(t *testing.T) { tests := []struct { name string @@ -478,9 +407,7 @@ func TestGetNetworkNameFromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "net", nwCfg: &cni.NetworkConfig{ @@ -511,9 +438,7 @@ func TestGetNetworkNameFromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "net", nwCfg: &cni.NetworkConfig{ @@ -544,9 +469,7 @@ func TestGetNetworkNameFromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "net", nwCfg: &cni.NetworkConfig{ @@ -577,9 +500,7 @@ func TestGetNetworkNameFromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "", nwCfg: &cni.NetworkConfig{ @@ -610,9 +531,7 @@ func TestGetNetworkNameFromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "azure", nwCfg: &cni.NetworkConfig{ @@ -663,7 +582,7 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { netNs string nwCfg *cni.NetworkConfig interfaceInfo *network.InterfaceInfo - want net.HardwareAddr + want string wantErr bool }{ { @@ -671,9 +590,7 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, false), }, netNs: "azure", nwCfg: &cni.NetworkConfig{ @@ -685,7 +602,7 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { MacAddress: parsedMacAddress, NICType: cns.NodeNetworkInterfaceFrontendNIC, }, - want: parsedMacAddress, + want: swiftv2NetworkNamePrefix + parsedMacAddress.String(), wantErr: false, }, { @@ -693,9 +610,7 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, true, false, false, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, true, false), }, netNs: "azure", nwCfg: &cni.NetworkConfig{ @@ -707,17 +622,31 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { MacAddress: parsedMacAddress, NICType: cns.BackendNIC, }, - want: parsedMacAddress, + want: swiftv2NetworkNamePrefix + parsedMacAddress.String(), wantErr: false, }, { - name: "Get Network Name from CNS for swiftv2 AccelnetNIC", + name: "Unhappy path: Get Network Name from CNS for swiftv2 AccelnetNIC with empty interfaceInfo", plugin: &NetPlugin{ Plugin: plugin, nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), - ipamInvoker: NewMockIpamInvoker(false, false, false, false, false, true, false), - report: &telemetry.CNIReport{}, - tb: &telemetry.TelemetryBuffer{}, + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), + }, + netNs: "azure", + nwCfg: &cni.NetworkConfig{ + CNIVersion: "0.3.0", + MultiTenancy: false, + }, + interfaceInfo: &network.InterfaceInfo{}, // return empty network name with empty interfaceInfo + want: "", + wantErr: false, + }, + { + name: "Unhappy path: Get Network Name from CNS for swiftv2 AccelnetNIC with invalid nicType", + plugin: &NetPlugin{ + Plugin: plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), }, netNs: "azure", nwCfg: &cni.NetworkConfig{ @@ -727,9 +656,9 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { interfaceInfo: &network.InterfaceInfo{ Name: "swiftv2L1VHAccelnetInterface", MacAddress: parsedMacAddress, - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - }, - want: parsedMacAddress, + NICType: "invalidNICType", + }, // return empty network name with invalid nic type + want: "", wantErr: false, }, } @@ -738,23 +667,678 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { t.Log(tt.interfaceInfo) + // compare networkNamess networkName, err := tt.plugin.getNetworkName(tt.netNs, tt.interfaceInfo, tt.nwCfg) if tt.wantErr { require.Error(t, err) } else { - expectedMacAddress := swiftv2NetworkNamePrefix + tt.want.String() - require.NoError(t, err) - require.Equal(t, expectedMacAddress, networkName) + require.Equal(t, tt.want, networkName) } + // compare networkIDs networkID, err := tt.plugin.getNetworkID(tt.netNs, tt.interfaceInfo, tt.nwCfg) if tt.wantErr { require.Error(t, err) } else { - expectedMacAddress := swiftv2NetworkNamePrefix + tt.want.String() + require.Equal(t, tt.want, networkID) + } + }) + } +} + +// Test Multitenancy Windows Add (Dualnic) +func TestPluginMultitenancyWindowsAdd(t *testing.T) { + plugin, _ := cni.NewPlugin("test", "0.3.0") + + localNwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "mulnet", + MultiTenancy: true, + EnableExactMatchForPodName: true, + Master: "eth0", + } + + tests := []struct { + name string + plugin *NetPlugin + args *cniSkel.CmdArgs + wantErr bool + wantErrMsg string + }{ + { + name: "Add Happy path", + plugin: &NetPlugin{ + Plugin: plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + multitenancyClient: NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1(), GetTestCNSResponse2()}), + }, + + args: &cniSkel.CmdArgs{ + StdinData: localNwCfg.Serialize(), + ContainerID: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + wantErr: false, + }, + { + name: "Add Fail", + plugin: &NetPlugin{ + Plugin: plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + multitenancyClient: NewMockMultitenancy(true, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1(), GetTestCNSResponse2()}), + }, + args: &cniSkel.CmdArgs{ + StdinData: localNwCfg.Serialize(), + ContainerID: "test-container", + Netns: "test-container", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + wantErr: true, + wantErrMsg: errMockMulAdd.Error(), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Add(tt.args) + if tt.wantErr { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErrMsg, "Expected %v but got %+v", tt.wantErrMsg, err.Error()) + } else { require.NoError(t, err) - require.Equal(t, expectedMacAddress, networkID) + endpoints, _ := tt.plugin.nm.GetAllEndpoints(localNwCfg.Name) + // an extra cns response is added in windows multitenancy to test dualnic + require.Condition(t, assert.Comparison(func() bool { return len(endpoints) == 2 })) } }) } } + +func TestPluginMultitenancyWindowsDelete(t *testing.T) { + plugin := GetTestResources() + plugin.multitenancyClient = NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1(), GetTestCNSResponse2()}) + localNwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "mulnet", + MultiTenancy: true, + EnableExactMatchForPodName: true, + Master: "eth0", + } + + happyArgs := &cniSkel.CmdArgs{ + StdinData: localNwCfg.Serialize(), + ContainerID: "test-container", + Netns: "test-container", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + } + + tests := []struct { + name string + methods []string + args *cniSkel.CmdArgs + delArgs *cniSkel.CmdArgs + wantErr bool + wantErrMsg string + }{ + { + name: "Multitenancy delete success", + methods: []string{CNI_ADD, CNI_DEL}, + args: happyArgs, + delArgs: happyArgs, + wantErr: false, + }, + { + name: "Multitenancy delete net not found", + methods: []string{CNI_ADD, CNI_DEL}, + args: happyArgs, + delArgs: &cniSkel.CmdArgs{ + StdinData: (&cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "othernet", + MultiTenancy: true, + EnableExactMatchForPodName: true, + Master: "eth0", + }).Serialize(), + ContainerID: "test-container", + Netns: "test-container", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + var err error + for _, method := range tt.methods { + if method == CNI_ADD { + err = plugin.Add(tt.args) + } else if method == CNI_DEL { + err = plugin.Delete(tt.delArgs) + } + } + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + endpoints, _ := plugin.nm.GetAllEndpoints(localNwCfg.Name) + require.Condition(t, assert.Comparison(func() bool { return len(endpoints) == 0 })) + } + }) + } +} + +// windows swiftv2 example +func GetTestCNSResponseSecondaryWindows(macAddress string) map[string]network.InterfaceInfo { + parsedMAC, _ := net.ParseMAC(macAddress) + return map[string]network.InterfaceInfo{ + string(cns.InfraNIC): { + IPConfigs: []*network.IPConfig{ + { + Address: *getCIDRNotationForAddress("10.244.2.107/16"), + Gateway: net.ParseIP("10.244.2.1"), + }, + }, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("1.1.1.1/24"), + Gw: net.ParseIP("10.244.2.1"), + }, + }, + SkipDefaultRoutes: true, + NICType: cns.InfraNIC, + HostSubnetPrefix: *getCIDRNotationForAddress("20.224.0.0/16"), + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + }, + }, + macAddress: { + MacAddress: parsedMAC, + IPConfigs: []*network.IPConfig{ + { + Address: *getCIDRNotationForAddress("10.241.0.21/16"), + Gateway: net.ParseIP("10.241.0.1"), + }, + }, + Routes: []network.RouteInfo{ + { + // just to ensure we don't overwrite if we had more routes + Dst: *getCIDRNotationForAddress("2.2.2.2/24"), + Gw: net.ParseIP("99.244.2.1"), + }, + }, + NICType: cns.NodeNetworkInterfaceFrontendNIC, + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + }, + }, + } +} + +func GetRawACLPolicy() (ret json.RawMessage) { + var data map[string]interface{} + formatted := []byte(`{ + "Type": "ACL", + "Protocols": "6", + "Action": "Block", + "Direction": "Out", + "RemoteAddresses": "168.63.129.16/32", + "RemotePorts": "80", + "Priority": 200, + "RuleType": "Switch" + }`) + json.Unmarshal(formatted, &data) // nolint + minified, _ := json.Marshal(data) // nolint + ret = json.RawMessage(minified) + return ret +} + +func GetRawOutBoundNATPolicy() (ret json.RawMessage) { + var data map[string]interface{} + formatted := []byte(`{ + "Type": "OutBoundNAT", + "ExceptionList": [ + "10.224.0.0/16" + ] + }`) + json.Unmarshal(formatted, &data) // nolint + minified, _ := json.Marshal(data) // nolint + ret = json.RawMessage(minified) + return ret +} + +// Happy path scenario for add and delete +func TestPluginWindowsAdd(t *testing.T) { + resources := GetTestResources() + localNwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "mulnet", + MultiTenancy: true, + EnableExactMatchForPodName: true, + Master: "eth0", + // these are added to test that policies propagate to endpoint info + AdditionalArgs: []cni.KVPair{ + { + Name: "EndpointPolicy", + Value: GetRawOutBoundNATPolicy(), + }, + { + Name: "EndpointPolicy", + Value: GetRawACLPolicy(), + }, + }, + WindowsSettings: cni.WindowsSettings{ // included to test functionality + EnableLoopbackDSR: true, + }, + } + nwCfg := cni.NetworkConfig{ + CNIVersion: "0.3.0", + Name: "net", + MultiTenancy: false, + EnableExactMatchForPodName: true, + } + macAddress := "60:45:bd:76:f6:44" + parsedMACAddress, _ := net.ParseMAC(macAddress) + + type endpointEntry struct { + epInfo *network.EndpointInfo + epIDRegex string + } + + tests := []struct { + name string + plugin *NetPlugin + args *cniSkel.CmdArgs + want []endpointEntry + match func(*network.EndpointInfo, *network.EndpointInfo) bool + }{ + { + name: "Add Happy Path Dual NIC", + plugin: &NetPlugin{ + Plugin: resources.Plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + multitenancyClient: NewMockMultitenancy(false, []*cns.GetNetworkContainerResponse{GetTestCNSResponse1(), GetTestCNSResponse2()}), + }, + args: &cniSkel.CmdArgs{ + StdinData: localNwCfg.Serialize(), + ContainerID: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + match: func(ei1, ei2 *network.EndpointInfo) bool { + return ei1.NetworkID == ei2.NetworkID + }, + want: []endpointEntry{ + // should match with GetTestCNSResponse1 + { + epInfo: &network.EndpointInfo{ + ContainerID: "test-container", + Data: map[string]interface{}{ + "cnetAddressSpace": []string(nil), + }, + Routes: []network.RouteInfo{}, + EnableSnatOnHost: true, + EnableMultiTenancy: true, + EnableSnatForDns: true, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.InfraNIC, + MasterIfName: eth0IfName, + NetworkID: "mulnet-vlan1-20-0-0-0_24", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "20.240.0.0/24", + Options: map[string]interface{}{ + dockerNetworkOption: map[string]interface{}{ + "VlanID": "1", + }, + }, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("20.0.0.10"), + Mask: getIPNetWithString("20.0.0.10/24").Mask, + }, + }, + // LocalIPConfiguration doesn't seem used in windows + // Constant, in windows, NAT Info comes from + // options > ipamAddConfig > + // cns invoker may populate network.SNATIPKey with the default response received > + // getNATInfo (with nwCfg) > adds nat info based on condition + // typically adds azure dns (168.63.129.16) + NATInfo: []policy.NATInfo{ + { + Destinations: []string{"168.63.129.16"}, + }, + }, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + // matches cns ip configuration (20.0.0.1/24 == 20.0.0.0/24) + Prefix: *getIPNetWithString("20.0.0.0/24"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("20.0.0.1"), + }, + }, + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + { + Type: policy.EndpointPolicy, + // if enabled we create a loopback dsr policy based on the cns ip config + Data: json.RawMessage(`{"Type":"LoopbackDSR","IPAddress":"20.0.0.10"}`), + }, + }, + NetworkPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + }, + }, + epIDRegex: `.*`, + }, + // should match with GetTestCNSResponse2 + { + epInfo: &network.EndpointInfo{ + ContainerID: "test-container", + Data: map[string]interface{}{ + "cnetAddressSpace": []string(nil), + }, + Routes: []network.RouteInfo{}, + EnableSnatOnHost: true, + EnableMultiTenancy: true, + EnableSnatForDns: true, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.InfraNIC, + MasterIfName: eth0IfName, + NetworkID: "mulnet-vlan2-10-0-0-0_24", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "10.240.0.0/24", + Options: map[string]interface{}{ + dockerNetworkOption: map[string]interface{}{ + "VlanID": "2", + }, + }, + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("10.0.0.10"), + Mask: getIPNetWithString("10.0.0.10/24").Mask, + }, + }, + NATInfo: []policy.NATInfo{ + { + Destinations: []string{"168.63.129.16"}, + }, + }, + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + Prefix: *getIPNetWithString("10.0.0.0/24"), + Gateway: net.ParseIP("10.0.0.1"), + }, + }, + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + { + Type: policy.EndpointPolicy, + Data: json.RawMessage(`{"Type":"LoopbackDSR","IPAddress":"10.0.0.10"}`), + }, + }, + NetworkPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + }, + }, + epIDRegex: `.*`, + }, + }, + }, + { + // Based on a live swiftv2 windows cluster's (infra + delegated) cns invoker response + name: "Add Happy Path Swiftv2", + plugin: &NetPlugin{ + Plugin: resources.Plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + + ipamInvoker: NewCustomMockIpamInvoker(GetTestCNSResponseSecondaryWindows(macAddress)), + netClient: &InterfaceGetterMock{ + // used in secondary find master interface + interfaces: []net.Interface{ + { + Name: "secondary", + HardwareAddr: parsedMACAddress, + }, + { + Name: "primary", + HardwareAddr: net.HardwareAddr{}, + }, + }, + // used in primary find master interface + interfaceAddrs: map[string][]net.Addr{ + "primary": { + // match with the host subnet prefix to know that this ip belongs to the host + getCIDRNotationForAddress("20.224.0.0/16"), + }, + }, + }, + }, + args: &cniSkel.CmdArgs{ + StdinData: nwCfg.Serialize(), + ContainerID: "test-container", + Netns: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + Args: fmt.Sprintf("K8S_POD_NAME=%v;K8S_POD_NAMESPACE=%v", "test-pod", "test-pod-ns"), + IfName: eth0IfName, + }, + match: func(ei1, ei2 *network.EndpointInfo) bool { + return ei1.NICType == ei2.NICType + }, + want: []endpointEntry{ + // should match infra + { + epInfo: &network.EndpointInfo{ + ContainerID: "test-container", + Data: map[string]interface{}{}, + Routes: []network.RouteInfo{ + { + Dst: *getCIDRNotationForAddress("1.1.1.1/24"), + Gw: net.ParseIP("10.244.2.1"), + }, + }, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.InfraNIC, + SkipDefaultRoutes: true, + MasterIfName: "primary", + NetworkID: "net", + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "20.224.0.0/16", + Options: map[string]interface{}{}, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("10.244.2.107"), + Mask: getIPNetWithString("10.244.2.107/16").Mask, + }, + }, + NATInfo: nil, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + Prefix: *getIPNetWithString("10.244.0.0/16"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("10.244.2.1"), + }, + }, + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawACLPolicy(), + }, + }, + }, + epIDRegex: `.*`, + }, + // should match secondary + { + epInfo: &network.EndpointInfo{ + MacAddress: parsedMACAddress, + ContainerID: "test-container", + Data: map[string]interface{}{}, + Routes: []network.RouteInfo{ + { + // just to ensure we don't overwrite if we had more routes + Dst: *getCIDRNotationForAddress("2.2.2.2/24"), + Gw: net.ParseIP("99.244.2.1"), + }, + }, + PODName: "test-pod", + PODNameSpace: "test-pod-ns", + NICType: cns.NodeNetworkInterfaceFrontendNIC, + SkipDefaultRoutes: false, + MasterIfName: "secondary", + NetworkID: "azure-" + macAddress, + NetNsPath: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + NetNs: "bc526fae-4ba0-4e80-bc90-ad721e5850bf", + HostSubnetPrefix: "", + Options: map[string]interface{}{}, + // matches with cns ip configuration + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("10.241.0.21"), + Mask: getIPNetWithString("10.241.0.21/16").Mask, + }, + }, + NATInfo: nil, + // ip config pod ip + mask(s) from cns > interface info > subnet info + Subnets: []network.SubnetInfo{ + { + Family: platform.AfINET, + Prefix: *getIPNetWithString("10.241.0.21/16"), + // matches cns ip configuration gateway ip address + Gateway: net.ParseIP("10.241.0.1"), + }, + }, + EndpointPolicies: []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: GetRawOutBoundNATPolicy(), + }, + }, + }, + epIDRegex: `.*`, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Add(tt.args) + require.NoError(t, err) + allEndpoints, _ := tt.plugin.nm.GetAllEndpoints("") + require.Len(t, allEndpoints, len(tt.want)) + for _, wantedEndpointEntry := range tt.want { + epID := "none" + for _, endpointInfo := range allEndpoints { + if !tt.match(wantedEndpointEntry.epInfo, endpointInfo) { + continue + } + // save the endpoint id before removing it + epID = endpointInfo.EndpointID + require.Regexp(t, regexp.MustCompile(wantedEndpointEntry.epIDRegex), epID) + + // omit endpoint id and ifname fields as they are nondeterministic + endpointInfo.EndpointID = "" + endpointInfo.IfName = "" + + require.Equal(t, wantedEndpointEntry.epInfo, endpointInfo) + } + if epID == "none" { + t.Fail() + } + err = tt.plugin.nm.DeleteEndpoint("", epID, nil) + require.NoError(t, err) + } + + // confirm separate entities + // that is, if one is modified, the other should not be modified + epInfos := []*network.EndpointInfo{} + for _, val := range allEndpoints { + epInfos = append(epInfos, val) + } + if len(epInfos) > 1 { + // ensure the endpoint data and options are separate entities when in separate endpoint infos + epInfo1 := epInfos[0] + epInfo2 := epInfos[1] + epInfo1.Data["dummy"] = "dummy value" + epInfo1.Options["dummy"] = "another dummy value" + require.NotEqual(t, epInfo1.Data, epInfo2.Data) + require.NotEqual(t, epInfo1.Options, epInfo2.Options) + + // ensure the endpoint policy slices are separate entities when in separate endpoint infos + if len(epInfo1.EndpointPolicies) > 0 { + epInfo1.EndpointPolicies[0] = policy.Policy{ + Type: policy.ACLPolicy, + } + require.Len(t, epInfo1.EndpointPolicies, 1) + require.Len(t, epInfo2.EndpointPolicies, 1) + require.NotEqual(t, epInfo1.EndpointPolicies, epInfo2.EndpointPolicies) + } + // ensure the network policy slices are separate entities when in separate endpoint infos + if len(epInfo1.NetworkPolicies) > 0 { + epInfo1.NetworkPolicies[0] = policy.Policy{ + Type: policy.ACLPolicy, + } + require.NotEqual(t, epInfo1.NetworkPolicies, epInfo2.NetworkPolicies) + } + } + + // ensure deleted + require.Empty(t, allEndpoints) + }) + } +} diff --git a/cni/network/plugin/main.go b/cni/network/plugin/main.go index 89b36994ac..08ccc3cbd1 100644 --- a/cni/network/plugin/main.go +++ b/cni/network/plugin/main.go @@ -8,7 +8,6 @@ import ( "os" "time" - "github.com/Azure/azure-container-networking/aitelemetry" "github.com/Azure/azure-container-networking/cni" "github.com/Azure/azure-container-networking/cni/api" zaplog "github.com/Azure/azure-container-networking/cni/log" @@ -16,14 +15,12 @@ import ( "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/nns" "github.com/Azure/azure-container-networking/platform" - "github.com/Azure/azure-container-networking/store" "github.com/Azure/azure-container-networking/telemetry" "github.com/pkg/errors" "go.uber.org/zap" ) const ( - hostNetAgentURL = "http://168.63.129.16/machine/plugins?comp=netagent&type=cnireport" ipamQueryURL = "http://168.63.129.16/machine/plugins?comp=nmagent&type=getinterfaceinfov1" pluginName = "CNI" telemetryNumRetries = 5 @@ -53,23 +50,16 @@ func printVersion() { } func rootExecute() error { - var ( - config common.PluginConfig - tb *telemetry.TelemetryBuffer - ) + var config common.PluginConfig config.Version = version reportManager := &telemetry.ReportManager{ - HostNetAgentURL: hostNetAgentURL, - ContentType: telemetry.ContentType, Report: &telemetry.CNIReport{ - Context: "AzureCNI", - SystemDetails: telemetry.SystemInfo{}, - InterfaceDetails: telemetry.InterfaceInfo{}, - BridgeDetails: telemetry.BridgeInfo{}, - Version: version, - Logger: logger, + Context: "AzureCNI", + SystemDetails: telemetry.SystemInfo{}, + Version: version, + Logger: logger, }, } @@ -101,32 +91,20 @@ func rootExecute() error { cniReport.VMUptime = upTime.Format("2006-01-02 15:04:05") } - // CNI Acquires lock + // CNI attempts to acquire lock if err = netPlugin.Plugin.InitializeKeyValueStore(&config); err != nil { + // Error acquiring lock network.PrintCNIError(fmt.Sprintf("Failed to initialize key-value store of network plugin: %v", err)) - tb = telemetry.NewTelemetryBuffer(logger) - if tberr := tb.Connect(); tberr != nil { - logger.Error("Cannot connect to telemetry service", zap.Error(tberr)) - return errors.Wrap(err, "lock acquire error") - } + // Connect to telemetry service if it is running, otherwise skips telemetry + telemetry.AIClient.ConnectTelemetry(logger) + defer telemetry.AIClient.DisconnectTelemetry() - network.ReportPluginError(reportManager, tb, err) - - if errors.Is(err, store.ErrTimeoutLockingStore) { - var cniMetric telemetry.AIMetric - cniMetric.Metric = aitelemetry.Metric{ - Name: telemetry.CNILockTimeoutStr, - Value: 1.0, - CustomDimensions: make(map[string]string), - } - sendErr := telemetry.SendCNIMetric(&cniMetric, tb) - if sendErr != nil { - logger.Error("Couldn't send cnilocktimeout metric", zap.Error(sendErr)) - } + if telemetry.AIClient.IsConnected() { + telemetry.AIClient.SendError(err) + } else { + logger.Error("Not connected to telemetry service, skipping sending error to application insights") } - - tb.Close() return errors.Wrap(err, "lock acquire error") } @@ -139,21 +117,19 @@ func rootExecute() error { os.Exit(1) } }() - + // At this point, lock is acquired // Start telemetry process if not already started. This should be done inside lock, otherwise multiple process // end up creating/killing telemetry process results in undesired state. - tb = telemetry.NewTelemetryBuffer(logger) - tb.ConnectToTelemetryService(telemetryNumRetries, telemetryWaitTimeInMilliseconds) - defer tb.Close() - - netPlugin.SetCNIReport(cniReport, tb) + telemetry.AIClient.StartAndConnectTelemetry(logger) + defer telemetry.AIClient.DisconnectTelemetry() + telemetry.AIClient.SetSettings(cniReport) t := time.Now() cniReport.Timestamp = t.Format("2006-01-02 15:04:05") if err = netPlugin.Start(&config); err != nil { network.PrintCNIError(fmt.Sprintf("Failed to start network plugin, err:%v.\n", err)) - network.ReportPluginError(reportManager, tb, err) + telemetry.AIClient.SendError(err) panic("network plugin start fatal error") } @@ -186,13 +162,8 @@ func rootExecute() error { if cniCmd == cni.CmdVersion { return errors.Wrap(err, "Execute netplugin failure") } - netPlugin.Stop() - if err != nil { - network.ReportPluginError(reportManager, tb, err) - } - return errors.Wrap(err, "Execute netplugin failure") } diff --git a/cni/network/stateless/main.go b/cni/network/stateless/main.go index 2b273da7eb..a42647eb14 100644 --- a/cni/network/stateless/main.go +++ b/cni/network/stateless/main.go @@ -24,11 +24,10 @@ import ( var logger = zapLog.CNILogger.With(zap.String("component", "cni-main")) const ( - hostNetAgentURL = "http://168.63.129.16/machine/plugins?comp=netagent&type=cnireport" - ipamQueryURL = "http://168.63.129.16/machine/plugins?comp=nmagent&type=getinterfaceinfov1" - pluginName = "CNI" - name = "azure-vnet" - stateless = true + ipamQueryURL = "http://168.63.129.16/machine/plugins?comp=nmagent&type=getinterfaceinfov1" + pluginName = "CNI" + name = "azure-vnet" + stateless = true ) // Version is populated by make during build. @@ -51,10 +50,7 @@ func printVersion() { } func rootExecute() error { - var ( - config common.PluginConfig - tb *telemetry.TelemetryBuffer - ) + var config common.PluginConfig log.SetName(name) log.SetLevel(log.LevelInfo) @@ -67,14 +63,10 @@ func rootExecute() error { config.Stateless = stateless reportManager := &telemetry.ReportManager{ - HostNetAgentURL: hostNetAgentURL, - ContentType: telemetry.ContentType, Report: &telemetry.CNIReport{ - Context: "AzureCNI", - SystemDetails: telemetry.SystemInfo{}, - InterfaceDetails: telemetry.InterfaceInfo{}, - BridgeDetails: telemetry.BridgeInfo{}, - Version: version, + Context: "AzureCNI", + SystemDetails: telemetry.SystemInfo{}, + Version: version, }, } @@ -112,19 +104,17 @@ func rootExecute() error { } }() - // Connect to the telemetry process. - tb = telemetry.NewTelemetryBuffer(logger) - tb.ConnectToTelemetry() - defer tb.Close() - - netPlugin.SetCNIReport(cniReport, tb) + // Connect to the telemetry process. Does not start the telemetry service if it is not running. + telemetry.AIClient.ConnectTelemetry(logger) + defer telemetry.AIClient.DisconnectTelemetry() + telemetry.AIClient.SetSettings(cniReport) t := time.Now() cniReport.Timestamp = t.Format("2006-01-02 15:04:05") if err = netPlugin.Start(&config); err != nil { network.PrintCNIError(fmt.Sprintf("Failed to start network plugin, err:%v.\n", err)) - network.ReportPluginError(reportManager, tb, err) + telemetry.AIClient.SendError(err) panic("network plugin start fatal error") } } @@ -151,10 +141,6 @@ func rootExecute() error { } netPlugin.Stop() - if err != nil { - network.ReportPluginError(reportManager, tb, err) - } - return errors.Wrap(err, "Execute netplugin failure") } diff --git a/cni/telemetry/service/telemetrymain.go b/cni/telemetry/service/telemetrymain.go index bf21571d0b..8c278eafe2 100644 --- a/cni/telemetry/service/telemetrymain.go +++ b/cni/telemetry/service/telemetrymain.go @@ -167,8 +167,8 @@ func main() { GetEnvRetryWaitTimeInSecs: config.GetEnvRetryWaitTimeInSecs, } - if tb.CreateAITelemetryHandle(aiConfig, config.DisableAll, config.DisableTrace, config.DisableMetric) != nil { - logger.Error("AI Handle creation error", zap.Error(err)) + if err := tb.CreateAITelemetryHandle(aiConfig, config.DisableAll, config.DisableTrace, config.DisableMetric); err != nil { // nolint + logger.Error("AI Handle creation error:", zap.Error(err)) } logger.Info("Report to host interval", zap.Duration("seconds", config.ReportToHostIntervalInSeconds)) tb.PushData(context.Background()) diff --git a/cnm/api.go b/cnm/api.go deleted file mode 100644 index 5c6a7c76dc..0000000000 --- a/cnm/api.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package cnm - -const ( - // Libnetwork remote plugin paths - activatePath = "/Plugin.Activate" - - // Libnetwork labels - genericData = "com.docker.network.generic" -) - -type OptionMap map[string]interface{} - -// -// Libnetwork remote plugin API -// - -// Error response sent by plugin when a request was decoded but failed. -type errorResponse struct { - Err string -} - -// Request sent by libnetwork for activation. -type activateRequest struct{} - -// Response sent by plugin for activation. -type ActivateResponse struct { - Err string - Implements []string -} diff --git a/cnm/config.json b/cnm/config.json deleted file mode 100644 index 39f079ddd1..0000000000 --- a/cnm/config.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "description": "Azure VNET plugin", - "documentation": "https://github.com/Azure/azure-container-networking/", - "entrypoint": ["/usr/bin/azure-vnet-plugin"], - "interface": { - "types": ["docker.networkdriver/1.0", "docker.ipamdriver/1.0"], - "socket": "azure-vnet.sock" - }, - "network": { - "type": "host" - }, - "mounts": [ - { - "name": "logs", - "description": "Mount /var/log to expose plugin logs to host", - "source": "/var/log", - "destination": "/var/log", - "type": "bind", - "options": ["rbind", "rw"] - }, - { - "name": "modules", - "description": "Mount /lib/modules to load ebtables kernel module on demand", - "source": "/lib/modules", - "destination": "/lib/modules", - "type": "bind", - "options": ["rbind", "r"] - } - ], - "linux": { - "capabilities": ["CAP_SYS_ADMIN", "CAP_NET_ADMIN", "CAP_SYS_MODULE"] - } -} diff --git a/cnm/ipam/api.go b/cnm/ipam/api.go deleted file mode 100644 index a5aa8d673f..0000000000 --- a/cnm/ipam/api.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package ipam - -const ( - // Libnetwork IPAM plugin endpoint type - EndpointType = "IpamDriver" - - // Libnetwork IPAM plugin remote API paths - GetCapabilitiesPath = "/IpamDriver.GetCapabilities" - GetAddressSpacesPath = "/IpamDriver.GetDefaultAddressSpaces" - RequestPoolPath = "/IpamDriver.RequestPool" - ReleasePoolPath = "/IpamDriver.ReleasePool" - GetPoolInfoPath = "/IpamDriver.GetPoolInfo" - RequestAddressPath = "/IpamDriver.RequestAddress" - ReleaseAddressPath = "/IpamDriver.ReleaseAddress" - - // Libnetwork IPAM plugin options - OptAddressType = "RequestAddressType" - OptAddressTypeGateway = "com.docker.network.gateway" -) - -// Request sent by libnetwork when querying plugin capabilities. -type GetCapabilitiesRequest struct{} - -// Response sent by plugin when registering its capabilities with libnetwork. -type GetCapabilitiesResponse struct { - Err string - RequiresMACAddress bool - RequiresRequestReplay bool -} - -// Request sent by libnetwork when querying the default address space names. -type GetDefaultAddressSpacesRequest struct{} - -// Response sent by plugin when returning the default address space names. -type GetDefaultAddressSpacesResponse struct { - Err string - LocalDefaultAddressSpace string - GlobalDefaultAddressSpace string -} - -// Request sent by libnetwork when acquiring a reference to an address pool. -type RequestPoolRequest struct { - AddressSpace string - Pool string - SubPool string - Options map[string]string - V6 bool -} - -// Response sent by plugin when an address pool is successfully referenced. -type RequestPoolResponse struct { - Err string - PoolID string - Pool string - Data map[string]string -} - -// Request sent by libnetwork when releasing a previously registered address pool. -type ReleasePoolRequest struct { - PoolID string -} - -// Response sent by plugin when an address pool is successfully released. -type ReleasePoolResponse struct { - Err string -} - -// Request sent when querying address pool information. -type GetPoolInfoRequest struct { - PoolID string -} - -// Response sent by plugin when returning address pool information. -type GetPoolInfoResponse struct { - Err string - Capacity int - Available int - UnhealthyAddresses []string -} - -// Request sent by libnetwork when reserving an address from a pool. -type RequestAddressRequest struct { - PoolID string - Address string - Options map[string]string -} - -// Response sent by plugin when an address is successfully reserved. -type RequestAddressResponse struct { - Err string - Address string - Data map[string]string -} - -// Request sent by libnetwork when releasing an address back to the pool. -type ReleaseAddressRequest struct { - PoolID string - Address string - Options map[string]string -} - -// Response sent by plugin when an address is successfully released. -type ReleaseAddressResponse struct { - Err string -} diff --git a/cnm/ipam/ipam.go b/cnm/ipam/ipam.go deleted file mode 100644 index 4a6d2fec90..0000000000 --- a/cnm/ipam/ipam.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package ipam - -import ( - "net/http" - - "github.com/Azure/azure-container-networking/cnm" - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/ipam" - "github.com/Azure/azure-container-networking/log" -) - -const ( - // Plugin name. - name = "azure-vnet-ipam" - - // Plugin capabilities reported to libnetwork. - requiresMACAddress = false - requiresRequestReplay = false - returnCode = 0 - returnStr = "Success" -) - -// IpamPlugin represents a CNM (libnetwork) IPAM plugin. -type ipamPlugin struct { - *cnm.Plugin - am ipam.AddressManager -} - -type IpamPlugin interface { - common.PluginApi -} - -// NewPlugin creates a new IpamPlugin object. -func NewPlugin(config *common.PluginConfig) (IpamPlugin, error) { - // Setup base plugin. - plugin, err := cnm.NewPlugin(name, config.Version, EndpointType) - if err != nil { - return nil, err - } - - // Setup address manager. - am, err := ipam.NewAddressManager() - if err != nil { - return nil, err - } - - config.IpamApi = am - - return &ipamPlugin{ - Plugin: plugin, - am: am, - }, nil -} - -// Start starts the plugin. -func (plugin *ipamPlugin) Start(config *common.PluginConfig) error { - // Initialize base plugin. - err := plugin.Initialize(config) - if err != nil { - log.Printf("[ipam] Failed to initialize base plugin, err:%v.", err) - return err - } - - // Initialize address manager. rehyrdration required on reboot for cnm ipam plugin - err = plugin.am.Initialize(config, true, plugin.Options) - if err != nil { - log.Printf("[ipam] Failed to initialize address manager, err:%v.", err) - return err - } - - // Add protocol handlers. - listener := plugin.Listener - listener.AddEndpoint(plugin.EndpointType) - listener.AddHandler(GetCapabilitiesPath, plugin.getCapabilities) - listener.AddHandler(GetAddressSpacesPath, plugin.getDefaultAddressSpaces) - listener.AddHandler(RequestPoolPath, plugin.requestPool) - listener.AddHandler(ReleasePoolPath, plugin.releasePool) - listener.AddHandler(GetPoolInfoPath, plugin.getPoolInfo) - listener.AddHandler(RequestAddressPath, plugin.requestAddress) - listener.AddHandler(ReleaseAddressPath, plugin.releaseAddress) - - // Plugin is ready to be discovered. - err = plugin.EnableDiscovery() - if err != nil { - log.Printf("[ipam] Failed to enable discovery: %v.", err) - return err - } - - log.Printf("[ipam] Plugin started.") - - return nil -} - -// Stop stops the plugin. -func (plugin *ipamPlugin) Stop() { - plugin.DisableDiscovery() - plugin.am.Uninitialize() - plugin.Uninitialize() - log.Printf("[ipam] Plugin stopped.") -} - -// -// Libnetwork remote IPAM API implementation -// https://github.com/docker/libnetwork/blob/master/docs/ipam.md -// - -// Handles GetCapabilities requests. -func (plugin *ipamPlugin) getCapabilities(w http.ResponseWriter, r *http.Request) { - var req GetCapabilitiesRequest - - log.Request(plugin.Name, &req, nil) - - resp := GetCapabilitiesResponse{ - RequiresMACAddress: requiresMACAddress, - RequiresRequestReplay: requiresRequestReplay, - } - - err := common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles GetDefaultAddressSpaces requests. -func (plugin *ipamPlugin) getDefaultAddressSpaces(w http.ResponseWriter, r *http.Request) { - var req GetDefaultAddressSpacesRequest - var resp GetDefaultAddressSpacesResponse - - log.Request(plugin.Name, &req, nil) - - localId, globalId := plugin.am.GetDefaultAddressSpaces() - - resp.LocalDefaultAddressSpace = localId - resp.GlobalDefaultAddressSpace = globalId - - err := common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles RequestPool requests. -func (plugin *ipamPlugin) requestPool(w http.ResponseWriter, r *http.Request) { - var req RequestPoolRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - poolId, subnet, err := plugin.am.RequestPool(req.AddressSpace, req.Pool, req.SubPool, req.Options, req.V6) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - data := make(map[string]string) - poolId = ipam.NewAddressPoolId(req.AddressSpace, poolId, "").String() - resp := RequestPoolResponse{PoolID: poolId, Pool: subnet, Data: data} - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles ReleasePool requests. -func (plugin *ipamPlugin) releasePool(w http.ResponseWriter, r *http.Request) { - var req ReleasePoolRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - poolId, err := ipam.NewAddressPoolIdFromString(req.PoolID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - err = plugin.am.ReleasePool(poolId.AsId, poolId.Subnet) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := ReleasePoolResponse{} - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles GetPoolInfo requests. -func (plugin *ipamPlugin) getPoolInfo(w http.ResponseWriter, r *http.Request) { - var req GetPoolInfoRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - poolId, err := ipam.NewAddressPoolIdFromString(req.PoolID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - apInfo, err := plugin.am.GetPoolInfo(poolId.AsId, poolId.Subnet) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := GetPoolInfoResponse{ - Capacity: apInfo.Capacity, - Available: apInfo.Available, - } - - for _, addr := range apInfo.UnhealthyAddrs { - resp.UnhealthyAddresses = append(resp.UnhealthyAddresses, addr.String()) - } - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles RequestAddress requests. -func (plugin *ipamPlugin) requestAddress(w http.ResponseWriter, r *http.Request) { - var req RequestAddressRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - poolId, err := ipam.NewAddressPoolIdFromString(req.PoolID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Convert libnetwork IPAM options to core IPAM options. - options := make(map[string]string) - if req.Options[OptAddressType] == OptAddressTypeGateway { - options[ipam.OptAddressType] = ipam.OptAddressTypeGateway - } - - options[ipam.OptAddressID] = req.Options[ipam.OptAddressID] - - addr, err := plugin.am.RequestAddress(poolId.AsId, poolId.Subnet, req.Address, options) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - data := make(map[string]string) - resp := RequestAddressResponse{Address: addr, Data: data} - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles ReleaseAddress requests. -func (plugin *ipamPlugin) releaseAddress(w http.ResponseWriter, r *http.Request) { - var req ReleaseAddressRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - poolId, err := ipam.NewAddressPoolIdFromString(req.PoolID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - err = plugin.am.ReleaseAddress(poolId.AsId, poolId.Subnet, req.Address, req.Options) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := ReleaseAddressResponse{} - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} diff --git a/cnm/ipam/ipam_test.go b/cnm/ipam/ipam_test.go deleted file mode 100644 index 534fcbc638..0000000000 --- a/cnm/ipam/ipam_test.go +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package ipam - -import ( - "bytes" - "encoding/json" - "fmt" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strconv" - "testing" - - "github.com/Azure/azure-container-networking/cnm" - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/ipam" -) - -var ( - plugin IpamPlugin - mux *http.ServeMux -) - -var ( - ipamQueryUrl = "localhost:42424" - ipamQueryResponse = "" + - "" + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - "" - localAsId string - poolId1 string - address1 string -) - -// Wraps the test run with plugin setup and teardown. -func TestMain(m *testing.M) { - var config common.PluginConfig - - // Create a fake local agent to handle requests from IPAM plugin. - u, _ := url.Parse("tcp://" + ipamQueryUrl) - testAgent, err := common.NewListener(u) - if err != nil { - fmt.Printf("Failed to create agent, err:%v.\n", err) - return - } - testAgent.AddHandler("/", handleIpamQuery) - - err = testAgent.Start(make(chan error, 1)) - if err != nil { - fmt.Printf("Failed to start agent, err:%v.\n", err) - return - } - - // Create the plugin. - plugin, err = NewPlugin(&config) - if err != nil { - fmt.Printf("Failed to create IPAM plugin, err:%v.\n", err) - return - } - - // Configure test mode. - plugin.SetOption(common.OptEnvironment, common.OptEnvironmentAzure) - plugin.SetOption(common.OptAPIServerURL, "null") - plugin.SetOption(common.OptIpamQueryUrl, "http://"+ipamQueryUrl) - - // Start the plugin. - err = plugin.Start(&config) - if err != nil { - fmt.Printf("Failed to start IPAM plugin, err:%v.\n", err) - return - } - - // Get the internal http mux as test hook. - mux = plugin.(*ipamPlugin).Listener.GetMux() - - // Run tests. - exitCode := m.Run() - - // Cleanup. - plugin.Stop() - testAgent.Stop() - - os.Exit(exitCode) -} - -// Handles queries from IPAM source. -func handleIpamQuery(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(ipamQueryResponse)) -} - -// Decodes plugin's responses to test requests. -func decodeResponse(w *httptest.ResponseRecorder, response interface{}) error { - if w.Code != http.StatusOK { - return fmt.Errorf("Request failed with HTTP error %d", w.Code) - } - - if w.Body == nil { - return fmt.Errorf("Response body is empty") - } - - return json.NewDecoder(w.Body).Decode(&response) -} - -// -// Libnetwork remote IPAM API compliance tests -// https://github.com/docker/libnetwork/blob/master/docs/ipam.md -// - -// Tests Plugin.Activate functionality. -func TestActivate(t *testing.T) { - var resp cnm.ActivateResponse - - req, err := http.NewRequest(http.MethodGet, "/Plugin.Activate", nil) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" || resp.Implements[0] != "IpamDriver" { - t.Errorf("Activate response is invalid %+v", resp) - } -} - -// Tests IpamDriver.GetCapabilities functionality. -func TestGetCapabilities(t *testing.T) { - var resp GetCapabilitiesResponse - - req, err := http.NewRequest(http.MethodGet, GetCapabilitiesPath, nil) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("GetCapabilities response is invalid %+v", resp) - } -} - -// Tests IpamDriver.GetDefaultAddressSpaces functionality. -func TestGetDefaultAddressSpaces(t *testing.T) { - var resp GetDefaultAddressSpacesResponse - - req, err := http.NewRequest(http.MethodGet, GetAddressSpacesPath, nil) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" || resp.LocalDefaultAddressSpace == "" { - t.Errorf("GetDefaultAddressSpaces response is invalid %+v", resp) - } - - localAsId = resp.LocalDefaultAddressSpace -} - -// Tests IpamDriver.RequestPool functionality. -func TestRequestPool(t *testing.T) { - var body bytes.Buffer - var resp RequestPoolResponse - - payload := &RequestPoolRequest{ - AddressSpace: localAsId, - } - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, RequestPoolPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("RequestPool response is invalid %+v", resp) - } - - poolId1 = resp.PoolID -} - -// Tests IpamDriver.RequestAddress functionality. -func TestRequestAddress(t *testing.T) { - var body bytes.Buffer - var resp RequestAddressResponse - - payload := &RequestAddressRequest{ - PoolID: poolId1, - Address: "", - Options: nil, - } - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, RequestAddressPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("RequestAddress response is invalid %+v", resp) - } - - address, _, _ := net.ParseCIDR(resp.Address) - address1 = address.String() -} - -// Tests IpamDriver.GetPoolInfo functionality. -func TestGetPoolInfo(t *testing.T) { - var body bytes.Buffer - var resp GetPoolInfoResponse - - payload := &GetPoolInfoRequest{ - PoolID: poolId1, - } - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, GetPoolInfoPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("GetPoolInfo response is invalid %+v", resp) - } -} - -// Tests IpamDriver.ReleaseAddress functionality. -func TestReleaseAddress(t *testing.T) { - var body bytes.Buffer - var resp ReleaseAddressResponse - - payload := &ReleaseAddressRequest{ - PoolID: poolId1, - Address: address1, - } - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, ReleaseAddressPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("ReleaseAddress response is invalid %+v", resp) - } -} - -// Tests IpamDriver.ReleasePool functionality. -func TestReleasePool(t *testing.T) { - var body bytes.Buffer - var resp ReleasePoolResponse - - payload := &ReleasePoolRequest{ - PoolID: poolId1, - } - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, ReleasePoolPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("ReleasePool response is invalid %+v", resp) - } -} - -// Utility function to request address from IPAM. -func reqAddrInternal(payload *RequestAddressRequest) (string, error) { - var body bytes.Buffer - var resp RequestAddressResponse - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, RequestAddressPath, &body) - if err != nil { - return "", err - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil { - return "", err - } - return resp.Address, nil -} - -// Utility function to release address from IPAM. -func releaseAddrInternal(payload *ReleaseAddressRequest) error { - var body bytes.Buffer - var resp ReleaseAddressResponse - - json.NewEncoder(&body).Encode(payload) - - req, err := http.NewRequest(http.MethodGet, ReleaseAddressPath, &body) - if err != nil { - return err - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil { - return err - } - return nil -} - -// Tests IpamDriver.RequestAddress with id. -func TestRequestAddressWithID(t *testing.T) { - var ipList [2]string - - for i := 0; i < 2; i++ { - payload := &RequestAddressRequest{ - PoolID: poolId1, - Address: "", - Options: make(map[string]string), - } - - payload.Options[ipam.OptAddressID] = "id" + strconv.Itoa(i) - - addr1, err := reqAddrInternal(payload) - if err != nil { - t.Errorf("RequestAddress response is invalid %+v", err) - } - - addr2, err := reqAddrInternal(payload) - if err != nil { - t.Errorf("RequestAddress response is invalid %+v", err) - } - - if addr1 != addr2 { - t.Errorf("RequestAddress with id %+v doesn't match with retrieved addr %+v ", addr1, addr2) - } - - address, _, _ := net.ParseCIDR(addr1) - ipList[i] = address.String() - } - - for i := 0; i < 2; i++ { - payload := &ReleaseAddressRequest{ - PoolID: poolId1, - Address: ipList[i], - } - err := releaseAddrInternal(payload) - if err != nil { - t.Errorf("ReleaseAddress response is invalid %+v", err) - } - } -} - -// Tests IpamDriver.ReleaseAddress with id. -func TestReleaseAddressWithID(t *testing.T) { - reqPayload := &RequestAddressRequest{ - PoolID: poolId1, - Address: "", - Options: make(map[string]string), - } - reqPayload.Options[ipam.OptAddressID] = "id1" - - _, err := reqAddrInternal(reqPayload) - if err != nil { - t.Errorf("RequestAddress response is invalid %+v", err) - } - - releasePayload := &ReleaseAddressRequest{ - PoolID: poolId1, - Address: "", - Options: make(map[string]string), - } - releasePayload.Options[ipam.OptAddressID] = "id1" - - err = releaseAddrInternal(releasePayload) - - if err != nil { - t.Errorf("ReleaseAddress response is invalid %+v", err) - } -} diff --git a/cnm/network/api.go b/cnm/network/api.go deleted file mode 100644 index d11f667124..0000000000 --- a/cnm/network/api.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package network - -const ( - // Libnetwork network plugin endpoint type - endpointType = "NetworkDriver" - - // Libnetwork network plugin remote API paths - getCapabilitiesPath = "/NetworkDriver.GetCapabilities" - createNetworkPath = "/NetworkDriver.CreateNetwork" - deleteNetworkPath = "/NetworkDriver.DeleteNetwork" - createEndpointPath = "/NetworkDriver.CreateEndpoint" - deleteEndpointPath = "/NetworkDriver.DeleteEndpoint" - joinPath = "/NetworkDriver.Join" - leavePath = "/NetworkDriver.Leave" - endpointOperInfoPath = "/NetworkDriver.EndpointOperInfo" - - // Libnetwork network plugin options - modeOption = "com.microsoft.azure.network.mode" -) - -// Request sent by libnetwork when querying plugin capabilities. -type getCapabilitiesRequest struct{} - -// Response sent by plugin when registering its capabilities with libnetwork. -type getCapabilitiesResponse struct { - Err string - Scope string -} - -// Request sent by libnetwork when creating a new network. -type createNetworkRequest struct { - NetworkID string - Options map[string]interface{} - IPv4Data []ipamData - IPv6Data []ipamData -} - -// IPAMData represents the per-network IP operational information. -type ipamData struct { - AddressSpace string - Pool string - Gateway string - AuxAddresses map[string]string -} - -// Response sent by plugin when a network is created. -type createNetworkResponse struct { - Err string -} - -// Request sent by libnetwork when deleting an existing network. -type deleteNetworkRequest struct { - NetworkID string -} - -// Response sent by plugin when a network is deleted. -type deleteNetworkResponse struct { - Err string -} - -// Request sent by libnetwork when creating a new endpoint. -type createEndpointRequest struct { - NetworkID string - EndpointID string - Options map[string]interface{} - Interface endpointInterface -} - -// Represents a libnetwork endpoint interface. -type endpointInterface struct { - Address string - AddressIPv6 string - MacAddress string -} - -// Response sent by plugin when an endpoint is created. -type createEndpointResponse struct { - Err string - Interface endpointInterface -} - -// Request sent by libnetwork when deleting an existing endpoint. -type deleteEndpointRequest struct { - NetworkID string - EndpointID string -} - -// Response sent by plugin when an endpoint is deleted. -type deleteEndpointResponse struct { - Err string -} - -// Request sent by libnetwork when joining an endpoint to a sandbox. -type joinRequest struct { - NetworkID string - EndpointID string - SandboxKey string - Options map[string]interface{} -} - -// Response sent by plugin when an endpoint is joined to a sandbox. -type joinResponse struct { - Err string - InterfaceName interfaceName - Gateway string - GatewayIPv6 string - StaticRoutes []staticRoute -} - -// Represents naming information for a joined interface. -type interfaceName struct { - SrcName string - DstName string - DstPrefix string -} - -// Represents a static route to be added in a sandbox for a joined interface. -type staticRoute struct { - Destination string - RouteType int - NextHop string -} - -// Request sent by libnetwork when removing an endpoint from its sandbox. -type leaveRequest struct { - NetworkID string - EndpointID string -} - -// Response sent by plugin when an endpoint is removed from its sandbox. -type leaveResponse struct { - Err string -} - -// Request sent by libnetwork when querying operational info of an endpoint. -type endpointOperInfoRequest struct { - NetworkID string - EndpointID string -} - -// Response sent by plugin when returning operational info of an endpoint. -type endpointOperInfoResponse struct { - Err string - Value map[string]interface{} -} diff --git a/cnm/network/network.go b/cnm/network/network.go deleted file mode 100644 index 94fff34bfe..0000000000 --- a/cnm/network/network.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package network - -import ( - "net" - "net/http" - "time" - - "github.com/Azure/azure-container-networking/cnm" - cnsclient "github.com/Azure/azure-container-networking/cns/client" - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/iptables" - "github.com/Azure/azure-container-networking/log" - "github.com/Azure/azure-container-networking/netio" - "github.com/Azure/azure-container-networking/netlink" - "github.com/Azure/azure-container-networking/network" - "github.com/Azure/azure-container-networking/platform" -) - -const ( - // Plugin name. - name = "azure-vnet" - - // Plugin capabilities. - scope = "local" - - // Prefix for container network interface names. - containerInterfacePrefix = "eth" - returnCode = 0 - returnStr = "Success" - defaultCNSTimeout = 15 * time.Second -) - -// NetPlugin represents a CNM (libnetwork) network plugin. -type netPlugin struct { - *cnm.Plugin - scope string - nm network.NetworkManager -} - -type NetPlugin interface { - common.PluginApi -} - -// NewPlugin creates a new NetPlugin object. -func NewPlugin(config *common.PluginConfig) (NetPlugin, error) { - // Setup base plugin. - plugin, err := cnm.NewPlugin(name, config.Version, endpointType) - if err != nil { - return nil, err - } - - nl := netlink.NewNetlink() - // Setup network manager. - nm, err := network.NewNetworkManager(nl, platform.NewExecClient(nil), &netio.NetIO{}, network.NewNamespaceClient(), iptables.NewClient()) - if err != nil { - return nil, err - } - - config.NetApi = nm - - return &netPlugin{ - Plugin: plugin, - scope: scope, - nm: nm, - }, nil -} - -// Start starts the plugin. -func (plugin *netPlugin) Start(config *common.PluginConfig) error { - // Initialize base plugin. - err := plugin.Initialize(config) - if err != nil { - log.Printf("[net] Failed to initialize base plugin, err:%v.", err) - return err - } - - // Initialize network manager. rehyrdration required on reboot for cnm plugin - err = plugin.nm.Initialize(config, true) - if err != nil { - log.Printf("[net] Failed to initialize network manager, err:%v.", err) - return err - } - - // Add protocol handlers. - listener := plugin.Listener - listener.AddEndpoint(plugin.EndpointType) - listener.AddHandler(getCapabilitiesPath, plugin.getCapabilities) - listener.AddHandler(createNetworkPath, plugin.createNetwork) - listener.AddHandler(deleteNetworkPath, plugin.deleteNetwork) - listener.AddHandler(createEndpointPath, plugin.createEndpoint) - listener.AddHandler(deleteEndpointPath, plugin.deleteEndpoint) - listener.AddHandler(joinPath, plugin.join) - listener.AddHandler(leavePath, plugin.leave) - listener.AddHandler(endpointOperInfoPath, plugin.endpointOperInfo) - - // Plugin is ready to be discovered. - err = plugin.EnableDiscovery() - if err != nil { - log.Printf("[net] Failed to enable discovery: %v.", err) - return err - } - - log.Printf("[net] Plugin started.") - - return nil -} - -// Stop stops the plugin. -func (plugin *netPlugin) Stop() { - plugin.DisableDiscovery() - plugin.nm.Uninitialize() - plugin.Uninitialize() - log.Printf("[net] Plugin stopped.") -} - -// -// Libnetwork remote network API implementation -// https://github.com/docker/libnetwork/blob/master/docs/remote.md -// - -// Handles GetCapabilities requests. -func (plugin *netPlugin) getCapabilities(w http.ResponseWriter, r *http.Request) { - var req getCapabilitiesRequest - - log.Request(plugin.Name, &req, nil) - - resp := getCapabilitiesResponse{Scope: plugin.scope} - err := common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles CreateNetwork requests. -func (plugin *netPlugin) createNetwork(w http.ResponseWriter, r *http.Request) { - var req createNetworkRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - nwInfo := network.EndpointInfo{ - NetworkID: req.NetworkID, - Options: req.Options, - } - - // Parse network options. - options := plugin.ParseOptions(req.Options) - if options != nil { - nwInfo.Mode, _ = options[modeOption].(string) - } - - // Populate subnets. - for _, data := range [][]ipamData{req.IPv4Data, req.IPv6Data} { - for _, ipamData := range data { - _, prefix, err := net.ParseCIDR(ipamData.Pool) - if err != nil { - continue - } - - subnet := network.SubnetInfo{ - Family: platform.GetAddressFamily(&prefix.IP), - Prefix: *prefix, - Gateway: platform.ConvertStringToIPAddress(ipamData.Gateway), - } - - nwInfo.Subnets = append(nwInfo.Subnets, subnet) - } - } - - err = plugin.nm.CreateNetwork(&nwInfo) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := createNetworkResponse{} - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles DeleteNetwork requests. -func (plugin *netPlugin) deleteNetwork(w http.ResponseWriter, r *http.Request) { - var req deleteNetworkRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - err = plugin.nm.DeleteNetwork(req.NetworkID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := deleteNetworkResponse{} - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles CreateEndpoint requests. -func (plugin *netPlugin) createEndpoint(w http.ResponseWriter, r *http.Request) { - var req createEndpointRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - var ipv4Address *net.IPNet - if req.Interface.Address != "" { - var ip net.IP - ip, ipv4Address, err = net.ParseCIDR(req.Interface.Address) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - ipv4Address.IP = ip - } - - epInfo := network.EndpointInfo{ - EndpointID: req.EndpointID, - IPAddresses: []net.IPNet{*ipv4Address}, - SkipHotAttachEp: true, // Skip hot attach endpoint as it's done in Join - } - - epInfo.Data = make(map[string]interface{}) - - cnscli, err := cnsclient.New("", defaultCNSTimeout) - if err != nil { - log.Errorf("failed to init CNS client", err) - } - err = plugin.nm.CreateEndpoint(cnscli, req.NetworkID, &epInfo) - // TODO: Because create endpoint no longer assigns to the map or saves to a file, you need to handle it in cnm right here! - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := createEndpointResponse{} - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles DeleteEndpoint requests. -func (plugin *netPlugin) deleteEndpoint(w http.ResponseWriter, r *http.Request) { - var req deleteEndpointRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - err = plugin.nm.DeleteEndpoint(req.NetworkID, req.EndpointID, nil) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := deleteEndpointResponse{} - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles Join requests. -func (plugin *netPlugin) join(w http.ResponseWriter, r *http.Request) { - var req joinRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - ep, err := plugin.nm.AttachEndpoint(req.NetworkID, req.EndpointID, req.SandboxKey) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - ifname := interfaceName{ - SrcName: ep.IfName, - DstPrefix: containerInterfacePrefix, - } - - resp := joinResponse{ - InterfaceName: ifname, - Gateway: ep.Gateways[0].String(), - } - - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles Leave requests. -func (plugin *netPlugin) leave(w http.ResponseWriter, r *http.Request) { - var req leaveRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - err = plugin.nm.DetachEndpoint(req.NetworkID, req.EndpointID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := leaveResponse{} - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} - -// Handles EndpointOperInfo requests. -func (plugin *netPlugin) endpointOperInfo(w http.ResponseWriter, r *http.Request) { - var req endpointOperInfoRequest - - // Decode request. - err := common.Decode(w, r, &req) - log.Request(plugin.Name, &req, err) - if err != nil { - return - } - - // Process request. - epInfo, err := plugin.nm.GetEndpointInfo(req.NetworkID, req.EndpointID) - if err != nil { - plugin.SendErrorResponse(w, err) - return - } - - // Encode response. - resp := endpointOperInfoResponse{Value: epInfo.Data} - err = common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, returnCode, returnStr, err) -} diff --git a/cnm/network/network_linux_test.go b/cnm/network/network_linux_test.go deleted file mode 100644 index 2cc4d27cf4..0000000000 --- a/cnm/network/network_linux_test.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package network - -import ( - "bytes" - "encoding/json" - "fmt" - "net" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "testing" - - "github.com/Azure/azure-container-networking/cnm" - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/log" - "github.com/Azure/azure-container-networking/netlink" - driverApi "github.com/docker/libnetwork/driverapi" - remoteApi "github.com/docker/libnetwork/drivers/remote/api" -) - -var ( - plugin NetPlugin - mux *http.ServeMux -) - -var ( - anyInterface = "dummy" - anySubnet = "192.168.1.0/24" - ipnet = net.IPNet{IP: net.ParseIP("192.168.1.0"), Mask: net.IPv4Mask(255, 255, 255, 0)} - networkID = "N1" - netns = "22212" -) - -// endpoint ID must contain 7 characters -var endpointID = "E1-xxxx" - -// Wraps the test run with plugin setup and teardown. -func TestMain(m *testing.M) { - var config common.PluginConfig - var err error - - // Create the plugin. - plugin, err = NewPlugin(&config) - if err != nil { - fmt.Printf("Failed to create network plugin %v\n", err) - os.Exit(1) - } - - // Configure test mode. - plugin.(*netPlugin).Name = "test" - - // Start the plugin. - err = plugin.Start(&config) - if err != nil { - fmt.Printf("Failed to start network plugin %v\n", err) - os.Exit(2) - } - nl := netlink.NewNetlink() - - // Create a dummy test network interface. - err = nl.AddLink(&netlink.DummyLink{ - LinkInfo: netlink.LinkInfo{ - Type: netlink.LINK_TYPE_DUMMY, - Name: anyInterface, - }, - }) - - if err != nil { - fmt.Printf("Failed to create test network interface, err:%v.\n", err) - os.Exit(3) - } - - err = plugin.(*netPlugin).nm.AddExternalInterface(anyInterface, anySubnet, "") - if err != nil { - fmt.Printf("Failed to add test network interface, err:%v.\n", err) - os.Exit(4) - } - - err = nl.AddIPAddress(anyInterface, net.ParseIP("192.168.1.4"), &ipnet) - if err != nil { - fmt.Printf("Failed to add test IP address, err:%v.\n", err) - os.Exit(5) - } - - // Get the internal http mux as test hook. - mux = plugin.(*netPlugin).Listener.GetMux() - - // Run tests. - exitCode := m.Run() - - // Cleanup. - err = nl.DeleteLink(anyInterface) - if err != nil { - fmt.Printf("Failed to delete link, err:%v.\n", err) - } - plugin.Stop() - - os.Exit(exitCode) -} - -// Decodes plugin's responses to test requests. -func decodeResponse(w *httptest.ResponseRecorder, response interface{}) error { - if w.Code != http.StatusOK { - return fmt.Errorf("Request failed with HTTP error %d", w.Code) - } - - if w.Body == nil { - return fmt.Errorf("Response body is empty") - } - - return json.NewDecoder(w.Body).Decode(&response) -} - -// -// Libnetwork remote API compliance tests -// https://github.com/docker/libnetwork/blob/master/docs/remote.md -// - -// Tests Plugin.Activate functionality. -func TestActivate(t *testing.T) { - var resp cnm.ActivateResponse - - req, err := http.NewRequest(http.MethodGet, "/Plugin.Activate", nil) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" || resp.Implements[0] != "NetworkDriver" { - t.Errorf("Activate response is invalid %+v", resp) - } -} - -// Tests NetworkDriver.GetCapabilities functionality. -func TestGetCapabilities(t *testing.T) { - var resp remoteApi.GetCapabilityResponse - - req, err := http.NewRequest(http.MethodGet, getCapabilitiesPath, nil) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" || resp.Scope != "local" { - t.Errorf("GetCapabilities response is invalid %+v", resp) - } -} - -func TestCNM(t *testing.T) { - cmd := exec.Command("ip", "netns", "add", netns) - log.Printf("%v", cmd) - output, err := cmd.Output() - if err != nil { - t.Fatalf("%v:%v", output, err.Error()) - return - } - - defer func() { - cmd = exec.Command("ip", "netns", "delete", netns) - _, err = cmd.Output() - - if err != nil { - t.Fatalf("%v:%v", output, err) - return - } - }() - - log.Printf("###CreateNetwork#####################################################################################") - createNetworkT(t) - log.Printf("###CreateEndpoint####################################################################################") - createEndpointT(t) - log.Printf("###EndpointOperInfo#####################################################################################") - endpointOperInfoT(t) - log.Printf("###DeleteEndpoint#####################################################################################") - deleteEndpointT(t) - log.Printf("###DeleteNetwork#####################################################################################") - // deleteNetworkT(t) -} - -// Tests NetworkDriver.CreateNetwork functionality. -func createNetworkT(t *testing.T) { - var body bytes.Buffer - var resp remoteApi.CreateNetworkResponse - - _, pool, _ := net.ParseCIDR(anySubnet) - - info := &remoteApi.CreateNetworkRequest{ - NetworkID: networkID, - IPv4Data: []driverApi.IPAMData{ - { - Pool: pool, - }, - }, - } - - json.NewEncoder(&body).Encode(info) - - req, err := http.NewRequest(http.MethodGet, createNetworkPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Response.Err != "" { - t.Errorf("CreateNetwork response is invalid %+v, received err %v", resp, err) - } -} - -// Tests NetworkDriver.CreateEndpoint functionality. -func createEndpointT(t *testing.T) { - var body bytes.Buffer - var resp remoteApi.CreateEndpointResponse - - info := &remoteApi.CreateEndpointRequest{ - NetworkID: networkID, - EndpointID: endpointID, - Interface: &remoteApi.EndpointInterface{Address: anySubnet}, - } - - json.NewEncoder(&body).Encode(info) - - req, err := http.NewRequest(http.MethodGet, createEndpointPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Response.Err != "" { - t.Errorf("CreateEndpoint response is invalid %+v, received err %v", resp, err) - } -} - -// Tests NetworkDriver.EndpointOperInfo functionality. -func endpointOperInfoT(t *testing.T) { - var body bytes.Buffer - var resp remoteApi.EndpointInfoResponse - - info := &remoteApi.EndpointInfoRequest{ - NetworkID: networkID, - EndpointID: endpointID, - } - - json.NewEncoder(&body).Encode(info) - - req, err := http.NewRequest(http.MethodGet, endpointOperInfoPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - if err != nil || resp.Err != "" { - t.Errorf("EndpointOperInfo response is invalid %+v, received err %v", resp, err) - } -} - -func deleteEndpointT(t *testing.T) { - var body bytes.Buffer - var resp remoteApi.DeleteEndpointResponse - - info := &remoteApi.DeleteEndpointRequest{ - NetworkID: networkID, - EndpointID: endpointID, - } - - json.NewEncoder(&body).Encode(info) - - req, err := http.NewRequest(http.MethodGet, deleteEndpointPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Response.Err != "" { - t.Errorf("DeleteEndpoint response is invalid %+v, received err %v", resp, err) - } -} - -// Tests NetworkDriver.DeleteNetwork functionality. -func deleteNetworkT(t *testing.T) { - var body bytes.Buffer - var resp remoteApi.DeleteNetworkResponse - - info := &remoteApi.DeleteNetworkRequest{ - NetworkID: networkID, - } - - json.NewEncoder(&body).Encode(info) - - req, err := http.NewRequest(http.MethodGet, deleteNetworkPath, &body) - if err != nil { - t.Fatal(err) - } - - w := httptest.NewRecorder() - mux.ServeHTTP(w, req) - - err = decodeResponse(w, &resp) - - if err != nil || resp.Err != "" { - t.Errorf("DeleteNetwork response is invalid %+v, received err %v", resp, err) - } -} diff --git a/cnm/plugin.go b/cnm/plugin.go deleted file mode 100644 index 417552920c..0000000000 --- a/cnm/plugin.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package cnm - -import ( - "net/http" - "net/url" - "os" - - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/log" -) - -// Plugin is the parent class for CNM plugins. -type Plugin struct { - *common.Plugin - EndpointType string - Listener *common.Listener -} - -// NewPlugin creates a new Plugin object. -func NewPlugin(name, version, endpointType string) (*Plugin, error) { - // Setup base plugin. - plugin, err := common.NewPlugin(name, version) - if err != nil { - return nil, err - } - - return &Plugin{ - Plugin: plugin, - EndpointType: endpointType, - }, nil -} - -// Initialize initializes the plugin and starts the listener. -func (plugin *Plugin) Initialize(config *common.PluginConfig) error { - // Initialize the base plugin. - plugin.Plugin.Initialize(config) - - // Initialize the shared listener. - if config.Listener == nil { - // Fetch and parse the API server URL. - u, err := url.Parse(plugin.getAPIServerURL()) - if err != nil { - return err - } - - // Create the listener. - listener, err := common.NewListener(u) - if err != nil { - return err - } - - // Add generic protocol handlers. - listener.AddHandler(activatePath, plugin.activate) - - // Start the listener. - err = listener.Start(config.ErrChan) - if err != nil { - return err - } - - config.Listener = listener - } - - plugin.Listener = config.Listener - - return nil -} - -// Uninitialize cleans up the plugin. -func (plugin *Plugin) Uninitialize() { - plugin.Listener.Stop() - plugin.Plugin.Uninitialize() -} - -// EnableDiscovery enables Docker to discover the plugin by creating the plugin spec file. -func (plugin *Plugin) EnableDiscovery() error { - // Plugins using unix domain sockets do not need a spec file. - if plugin.Listener.URL.Scheme == "unix" { - return nil - } - - // Create the spec directory. - path := plugin.getSpecPath() - os.MkdirAll(path, 0o755) - - // Write the listener URL to the spec file. - fileName := path + plugin.Name + ".spec" - url := plugin.Listener.URL.String() - err := os.WriteFile(fileName, []byte(url), 0o644) - return err -} - -// DisableDiscovery disables discovery by deleting the plugin spec file. -func (plugin *Plugin) DisableDiscovery() { - // Plugins using unix domain sockets do not need a spec file. - if plugin.Listener.URL.Scheme == "unix" { - return - } - - fileName := plugin.getSpecPath() + plugin.Name + ".spec" - os.Remove(fileName) -} - -// ParseOptions returns generic options from a libnetwork request. -func (plugin *Plugin) ParseOptions(options OptionMap) OptionMap { - opt, _ := options[genericData].(map[string]interface{}) - return opt -} - -// -// Libnetwork remote plugin API -// - -// Activate handles Activate requests. -func (plugin *Plugin) activate(w http.ResponseWriter, r *http.Request) { - var req activateRequest - - log.Request(plugin.Name, &req, nil) - - resp := ActivateResponse{Implements: plugin.Listener.GetEndpoints()} - err := common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, 0, "Success", err) -} - -// SendErrorResponse sends and logs an error response. -func (plugin *Plugin) SendErrorResponse(w http.ResponseWriter, errMsg error) { - resp := errorResponse{errMsg.Error()} - err := common.Encode(w, &resp) - - log.Response(plugin.Name, &resp, 0, "Success", err) -} diff --git a/cnm/plugin/main.go b/cnm/plugin/main.go deleted file mode 100644 index 227e588cf6..0000000000 --- a/cnm/plugin/main.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package main - -import ( - "fmt" - "os" - "os/signal" - "syscall" - - "github.com/Azure/azure-container-networking/cnm/ipam" - "github.com/Azure/azure-container-networking/cnm/network" - "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/log" - "github.com/Azure/azure-container-networking/platform" - "github.com/Azure/azure-container-networking/processlock" - "github.com/Azure/azure-container-networking/store" -) - -const ( - // Plugin name as used in socket, log and store names. - name = "azure-vnet" -) - -// Version is populated by make during build. -var version string - -// Command line arguments for CNM plugin. -var args = common.ArgumentList{ - { - Name: common.OptEnvironment, - Shorthand: common.OptEnvironmentAlias, - Description: "Set the operating environment", - Type: "string", - DefaultValue: common.OptEnvironmentAzure, - ValueMap: map[string]interface{}{ - common.OptEnvironmentAzure: 0, - common.OptEnvironmentMAS: 0, - common.OptEnvironmentFileIpam: 0, - }, - }, - { - Name: common.OptAPIServerURL, - Shorthand: common.OptAPIServerURLAlias, - Description: "Set the API server URL", - Type: "string", - DefaultValue: "", - }, - { - Name: common.OptLogLevel, - Shorthand: common.OptLogLevelAlias, - Description: "Set the logging level", - Type: "int", - DefaultValue: common.OptLogLevelInfo, - ValueMap: map[string]interface{}{ - common.OptLogLevelInfo: log.LevelInfo, - common.OptLogLevelDebug: log.LevelDebug, - }, - }, - { - Name: common.OptLogTarget, - Shorthand: common.OptLogTargetAlias, - Description: "Set the logging target", - Type: "int", - DefaultValue: common.OptLogTargetFile, - ValueMap: map[string]interface{}{ - common.OptLogTargetSyslog: log.TargetSyslog, - common.OptLogTargetStderr: log.TargetStderr, - common.OptLogTargetFile: log.TargetLogfile, - }, - }, - { - Name: common.OptLogLocation, - Shorthand: common.OptLogLocationAlias, - Description: "Set the logging directory", - Type: "string", - DefaultValue: "", - }, - { - Name: common.OptIpamQueryUrl, - Shorthand: common.OptIpamQueryUrlAlias, - Description: "Set the IPAM query URL", - Type: "string", - DefaultValue: "", - }, - { - Name: common.OptIpamQueryInterval, - Shorthand: common.OptIpamQueryIntervalAlias, - Description: "Set the IPAM plugin query interval", - Type: "int", - DefaultValue: "", - }, - { - Name: common.OptVersion, - Shorthand: common.OptVersionAlias, - Description: "Print version information", - Type: "bool", - DefaultValue: false, - }, - { - Name: common.OptStoreFileLocation, - Shorthand: common.OptStoreFileLocationAlias, - Description: "Set store file absolute path", - Type: "string", - DefaultValue: platform.CNMRuntimePath, - }, -} - -// Prints description and version information. -func printVersion() { - fmt.Printf("Azure CNM (libnetwork) plugin\n") - fmt.Printf("Version %v\n", version) -} - -// Main is the entry point for CNM plugin. -func main() { - // Initialize and parse command line arguments. - common.ParseArgs(&args, printVersion) - - environment := common.GetArg(common.OptEnvironment).(string) - url := common.GetArg(common.OptAPIServerURL).(string) - logLevel := common.GetArg(common.OptLogLevel).(int) - logTarget := common.GetArg(common.OptLogTarget).(int) - ipamQueryUrl, _ := common.GetArg(common.OptIpamQueryUrl).(string) - ipamQueryInterval, _ := common.GetArg(common.OptIpamQueryInterval).(int) - vers := common.GetArg(common.OptVersion).(bool) - storeFileLocation := common.GetArg(common.OptStoreFileLocation).(string) - - if vers { - printVersion() - os.Exit(0) - } - - // Initialize plugin common configuration. - var config common.PluginConfig - config.Version = version - - // Create a channel to receive unhandled errors from the plugins. - config.ErrChan = make(chan error, 1) - - // Create network plugin. - netPlugin, err := network.NewPlugin(&config) - if err != nil { - fmt.Printf("Failed to create network plugin, err:%v.\n", err) - return - } - - // Create IPAM plugin. - ipamPlugin, err := ipam.NewPlugin(&config) - if err != nil { - fmt.Printf("Failed to create IPAM plugin, err:%v.\n", err) - return - } - - err = platform.CreateDirectory(storeFileLocation) - if err != nil { - log.Errorf("Failed to create File Store directory %s, due to Error:%v", storeFileLocation, err.Error()) - return - } - - lockclient, err := processlock.NewFileLock(platform.CNILockPath + name + store.LockExtension) - if err != nil { - log.Printf("Error initializing file lock:%v", err) - return - } - - // Create the key value store. - storeFileName := storeFileLocation + name + ".json" - config.Store, err = store.NewJsonFileStore(storeFileName, lockclient, nil) - if err != nil { - log.Errorf("Failed to create store file: %s, due to error %v\n", storeFileName, err) - return - } - - // Create logging provider. - logDirectory := "" // Sets the current location as log directory - log.SetName(name) - log.SetLevel(logLevel) - err = log.SetTargetLogDirectory(logTarget, logDirectory) - if err != nil { - fmt.Printf("Failed to configure logging: %v\n", err) - return - } - - // Log platform information. - log.Printf("Running on %v", platform.GetOSInfo()) - common.LogNetworkInterfaces() - - // Set plugin options. - netPlugin.SetOption(common.OptAPIServerURL, url) - - ipamPlugin.SetOption(common.OptEnvironment, environment) - ipamPlugin.SetOption(common.OptAPIServerURL, url) - ipamPlugin.SetOption(common.OptIpamQueryUrl, ipamQueryUrl) - ipamPlugin.SetOption(common.OptIpamQueryInterval, ipamQueryInterval) - - // Start plugins. - if netPlugin != nil { - err = netPlugin.Start(&config) - if err != nil { - fmt.Printf("Failed to start network plugin, err:%v.\n", err) - return - } - } - - if ipamPlugin != nil { - err = ipamPlugin.Start(&config) - if err != nil { - fmt.Printf("Failed to start IPAM plugin, err:%v.\n", err) - return - } - } - - // Relay these incoming signals to OS signal channel. - osSignalChannel := make(chan os.Signal, 1) - signal.Notify(osSignalChannel, os.Interrupt, syscall.SIGTERM) - - // Wait until receiving a signal. - select { - case sig := <-osSignalChannel: - log.Printf("Received OS signal <" + sig.String() + ">, shutting down.") - case err := <-config.ErrChan: - log.Printf("Received unhandled plugin error %v, shutting down.", err) - } - - // Cleanup. - if netPlugin != nil { - netPlugin.Stop() - } - - if ipamPlugin != nil { - ipamPlugin.Stop() - } - - log.Close() -} diff --git a/cnm/plugin_linux.go b/cnm/plugin_linux.go deleted file mode 100644 index e552c98b40..0000000000 --- a/cnm/plugin_linux.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -// +build linux - -package cnm - -import ( - "os" - - "github.com/Azure/azure-container-networking/common" -) - -const ( - // Default API server URL. - defaultAPIServerURL = "unix:///run/docker/plugins/" - - // Docker plugin paths. - pluginSpecPath = "/etc/docker/plugins/" - pluginSocketPath = "/run/docker/plugins/" -) - -// GetAPIServerURL returns the API server URL. -func (plugin *Plugin) getAPIServerURL() string { - urls, _ := plugin.GetOption(common.OptAPIServerURL).(string) - if urls == "" { - urls = defaultAPIServerURL + plugin.Name + ".sock" - } - - os.MkdirAll(pluginSocketPath, 0o755) - - return urls -} - -// GetSpecPath returns the Docker plugin spec path. -func (plugin *Plugin) getSpecPath() string { - return pluginSpecPath -} diff --git a/cnm/plugin_windows.go b/cnm/plugin_windows.go deleted file mode 100644 index b6ade5153d..0000000000 --- a/cnm/plugin_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -// +build windows - -package cnm - -import ( - "os" - - "github.com/Azure/azure-container-networking/common" -) - -const ( - // Default API server URL. - defaultAPIServerURL = "tcp://localhost:48080" - - // Docker plugin paths. - pluginSpecPath = "\\docker\\plugins\\" -) - -// GetAPIServerURL returns the API server URL. -func (plugin *Plugin) getAPIServerURL() string { - urls, _ := plugin.GetOption(common.OptAPIServerURL).(string) - if urls == "" { - urls = defaultAPIServerURL - } - - return urls -} - -// GetSpecPath returns the Docker plugin spec path. -func (plugin *Plugin) getSpecPath() string { - return os.Getenv("programdata") + pluginSpecPath -} diff --git a/cns/Dockerfile b/cns/Dockerfile index 90330bec25..41444173d8 100644 --- a/cns/Dockerfile +++ b/cns/Dockerfile @@ -1,15 +1,17 @@ +# !! AUTOGENERATED - DO NOT EDIT !! +# SOURCE: cns/Dockerfile.tmpl ARG ARCH ARG OS_VERSION ARG OS -# skopeo inspect docker://mcr.microsoft.com/oss/go/microsoft/golang:1.22-cbl-mariner2.0 --format "{{.Name}}@{{.Digest}}" -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:c062e5e23f2d172a8fd590adcd171499af7005cae344a36284255f26e5ce4f8a AS go +# mcr.microsoft.com/oss/go/microsoft/golang:1.23-azurelinux3.0 +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:7d33a8015c48c06e97ecd5139181594b550d4d4d6a9d7fb19083192541213753 AS go -# skopeo inspect docker://mcr.microsoft.com/cbl-mariner/base/core:2.0 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/cbl-mariner/base/core@sha256:a490e0b0869dc570ae29782c2bc17643aaaad1be102aca83ce0b96e0d0d2d328 AS mariner-core +# mcr.microsoft.com/azurelinux/base/core:3.0 +FROM mcr.microsoft.com/azurelinux/base/core@sha256:c09a4e011a092a45b5c46ac5633253eb1e1106df028912b89cbe225d9061ef0b AS mariner-core -# skopeo inspect docker://mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/cbl-mariner/distroless/minimal@sha256:d28cbaa097167b4f5fdea02aac5404e3c9ec6c37499df1e115765e38b0a21660 AS mariner-distroless +# mcr.microsoft.com/azurelinux/distroless/minimal:3.0 +FROM mcr.microsoft.com/azurelinux/distroless/minimal@sha256:c37100f358ee19e62c60673c54fb43b83d43b2c305846e44b23b2e032e9caf30 AS mariner-distroless FROM --platform=linux/${ARCH} go AS builder ARG OS @@ -30,18 +32,10 @@ COPY --from=builder /go/bin/azure-cns /usr/local/bin/azure-cns ENTRYPOINT [ "/usr/local/bin/azure-cns" ] EXPOSE 10090 -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2019 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:7f6649348a11655e3576463fd6d55c29248f97405f8e643cab2409009339f520 AS ltsc2019 +# mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image:v1.0.0 +FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-process-containers-base-image@sha256:b4c9637e032f667c52d1eccfa31ad8c63f1b035e8639f3f48a510536bf34032b as hpc -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2022 --format "{{.Name}}@{{.Digest}}" -FROM mcr.microsoft.com/windows/nanoserver@sha256:244113e50a678a25a63930780f9ccafd22e1a37aa9e3d93295e4cebf0f170a11 AS ltsc2022 - -# skopeo inspect --override-os windows docker://mcr.microsoft.com/windows/nanoserver:ltsc2025 --format "{{.Name}}@{{.Digest}}" ## 2025 isn't tagged yet -FROM mcr.microsoft.com/windows/nanoserver/insider@sha256:67e0ab7f3a79cd73be4a18bae24659c03b294aed0dbeaa624feb3810931f0bd2 AS ltsc2025 - -FROM ${OS_VERSION} AS windows -COPY --from=builder /azure-container-networking/cns/kubeconfigtemplate.yaml kubeconfigtemplate.yaml -COPY --from=builder /azure-container-networking/npm/examples/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 +FROM hpc as windows COPY --from=builder /go/bin/azure-cns /azure-cns.exe ENTRYPOINT ["azure-cns.exe"] EXPOSE 10090 diff --git a/cns/Dockerfile.tmpl b/cns/Dockerfile.tmpl new file mode 100644 index 0000000000..34bde0dd8c --- /dev/null +++ b/cns/Dockerfile.tmpl @@ -0,0 +1,41 @@ +# {{.RENDER_MSG}} +# SOURCE: {{.SRC}} +ARG ARCH +ARG OS_VERSION +ARG OS + +# {{.GO_IMG}} +FROM --platform=linux/${ARCH} {{.GO_PIN}} AS go + +# {{.MARINER_CORE_IMG}} +FROM {{.MARINER_CORE_PIN}} AS mariner-core + +# {{.MARINER_DISTROLESS_IMG}} +FROM {{.MARINER_DISTROLESS_PIN}} AS mariner-distroless + +FROM --platform=linux/${ARCH} go AS builder +ARG OS +ARG CNS_AI_ID +ARG CNS_AI_PATH +ARG VERSION +WORKDIR /azure-container-networking +COPY . . +RUN GOOS=$OS CGO_ENABLED=0 go build -a -o /go/bin/azure-cns -ldflags "-X main.version="$VERSION" -X "$CNS_AI_PATH"="$CNS_AI_ID"" -gcflags="-dwarflocationlists=true" cns/service/*.go + +FROM mariner-core AS iptables +RUN tdnf install -y iptables + +FROM mariner-distroless AS linux +COPY --from=iptables /usr/sbin/*tables* /usr/sbin/ +COPY --from=iptables /usr/lib /usr/lib +COPY --from=builder /go/bin/azure-cns /usr/local/bin/azure-cns +ENTRYPOINT [ "/usr/local/bin/azure-cns" ] +EXPOSE 10090 + +# {{.WIN_HPC_IMG}} +FROM --platform=windows/${ARCH} {{.WIN_HPC_PIN}} as hpc + +FROM hpc as windows +COPY --from=builder /go/bin/azure-cns /azure-cns.exe +ENTRYPOINT ["azure-cns.exe"] +EXPOSE 10090 diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index ae0f3bc5bf..406c45b554 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" + "github.com/Azure/azure-container-networking/network/policy" "github.com/google/uuid" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -18,6 +19,7 @@ import ( const ( SetOrchestratorType = "/network/setorchestratortype" GetHomeAz = "/homeaz" + GetNCList = "/nclist" GetVMUniqueID = "/metadata/vmuniqueid" CreateOrUpdateNetworkContainer = "/network/createorupdatenetworkcontainer" DeleteNetworkContainer = "/network/deletenetworkcontainer" @@ -99,10 +101,13 @@ const ( Managed = "Managed" CRD = "CRD" MultiTenantCRD = "MultiTenantCRD" + AzureHost = "AzureHost" ) -var ErrInvalidNCID = errors.New("invalid NetworkContainerID") -var ErrInvalidIP = errors.New("invalid IP") +var ( + ErrInvalidNCID = errors.New("invalid NetworkContainerID") + ErrInvalidIP = errors.New("invalid IP") +) // CreateNetworkContainerRequest specifies request to create a network container or network isolation boundary. type CreateNetworkContainerRequest struct { @@ -196,14 +201,14 @@ func (f PodInfoByIPProviderFunc) PodInfoByIP() (map[string]PodInfo, error) { return f() } -var GlobalPodInfoScheme podInfoScheme +var GlobalPodInfoScheme = InterfaceIDPodInfoScheme // podInfoScheme indicates which schema should be used when generating // the map key in the Key() function on a podInfo object. type podInfoScheme int const ( - KubernetesPodInfoScheme podInfoScheme = iota + _ podInfoScheme = iota InterfaceIDPodInfoScheme InfraIDPodInfoScheme ) @@ -280,12 +285,8 @@ func (p *podInfo) Key() string { switch p.Version { case InfraIDPodInfoScheme: return p.PodInfraContainerID - case InterfaceIDPodInfoScheme: - return p.PodInterfaceID - case KubernetesPodInfoScheme: - return p.PodName + ":" + p.PodNamespace default: - return p.PodName + ":" + p.PodNamespace + return p.PodInterfaceID } } @@ -309,6 +310,21 @@ func (p *podInfo) SecondaryInterfacesExist() bool { return p.SecondaryInterfaceSet } +func (p *podInfo) UnmarshalJSON(b []byte) error { + type alias podInfo + // Unmarshal into a temporary struct to avoid infinite recursion + a := &struct { + *alias + }{ + alias: (*alias)(p), + } + if err := json.Unmarshal(b, a); err != nil { + return errors.Wrap(err, "failed to unmarshal podInfo") + } + p.Version = GlobalPodInfoScheme + return nil +} + // NewPodInfo returns an implementation of PodInfo that returns the passed // configuration for their namesake functions. func NewPodInfo(infraContainerID, interfaceID, name, namespace string) PodInfo { @@ -323,14 +339,12 @@ func NewPodInfo(infraContainerID, interfaceID, name, namespace string) PodInfo { } } -// UnmarshalPodInfo wraps json.Unmarshal to return an implementation of -// PodInfo. func UnmarshalPodInfo(b []byte) (PodInfo, error) { p := &podInfo{} - if err := json.Unmarshal(b, p); err != nil { + err := json.Unmarshal(b, p) + if err != nil { return nil, err } - p.Version = GlobalPodInfoScheme return p, nil } @@ -500,6 +514,8 @@ type PodIpInfo struct { Routes []Route // PnpId is set for backend interfaces, Pnp Id identifies VF. Plug and play id(pnp) is also called as PCI ID PnPID string + // Default Deny ACL's to configure on HNS endpoints for Swiftv2 window nodes + EndpointPolicies []policy.Policy } type HostIPInfo struct { diff --git a/cns/NetworkContainerContract_test.go b/cns/NetworkContainerContract_test.go index ab0197e787..fc17a58a4d 100644 --- a/cns/NetworkContainerContract_test.go +++ b/cns/NetworkContainerContract_test.go @@ -23,6 +23,7 @@ func TestUnmarshalPodInfo(t *testing.T) { PodName: "pod", PodNamespace: "namespace", }, + Version: GlobalPodInfoScheme, }, }, { @@ -33,6 +34,7 @@ func TestUnmarshalPodInfo(t *testing.T) { PodName: "pod", PodNamespace: "namespace", }, + Version: GlobalPodInfoScheme, }, }, { @@ -44,7 +46,8 @@ func TestUnmarshalPodInfo(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := UnmarshalPodInfo(tt.b) + got := &podInfo{} + err := json.Unmarshal(tt.b, got) if tt.wantErr { assert.Error(t, err) return @@ -58,7 +61,6 @@ func TestUnmarshalPodInfo(t *testing.T) { func TestNewPodInfoFromIPConfigsRequest(t *testing.T) { GlobalPodInfoScheme = InterfaceIDPodInfoScheme - defer func() { GlobalPodInfoScheme = KubernetesPodInfoScheme }() tests := []struct { name string req IPConfigsRequest diff --git a/cns/api.go b/cns/api.go index 872b67c735..6974a4207f 100644 --- a/cns/api.go +++ b/cns/api.go @@ -22,16 +22,13 @@ const ( DeleteNetworkPath = "/network/delete" CreateHnsNetworkPath = "/network/hns/create" DeleteHnsNetworkPath = "/network/hns/delete" - ReserveIPAddressPath = "/network/ip/reserve" - ReleaseIPAddressPath = "/network/ip/release" GetHostLocalIPPath = "/network/ip/hostlocal" - GetIPAddressUtilizationPath = "/network/ip/utilization" - GetUnhealthyIPAddressesPath = "/network/ipaddresses/unhealthy" GetHealthReportPath = "/network/health" NumberOfCPUCoresPath = "/hostcpucores" CreateHostNCApipaEndpointPath = "/network/createhostncapipaendpoint" DeleteHostNCApipaEndpointPath = "/network/deletehostncapipaendpoint" NmAgentSupportedApisPath = "/network/nmagentsupportedapis" + NMAgentGetNCListAPIPath = "/nclist" V1Prefix = "/v0.1" V2Prefix = "/v0.2" EndpointPath = "/network/endpoints/" @@ -170,7 +167,8 @@ func (i *IPConfigurationStatus) UnmarshalJSON(b []byte) error { } } if s, ok := m["PodInfo"]; ok { - pi, err := UnmarshalPodInfo(s) + pi := NewPodInfo("", "", "", "") + err := json.Unmarshal(s, pi) if err != nil { return errors.Wrap(err, "failed to unmarshal key PodInfo to PodInfo") } @@ -357,9 +355,15 @@ type NmAgentSupportedApisResponse struct { SupportedApis []string } +type NCListResponse struct { + Response Response `json:"response"` + NCList []string `json:"ncList"` +} + type HomeAzResponse struct { - IsSupported bool `json:"isSupported"` - HomeAz uint `json:"homeAz"` + IsSupported bool `json:"isSupported"` + HomeAz uint `json:"homeAz"` + NmaAppliedTheIPV6Fix bool `json:"NmaAppliedTheIPV6Fix"` } type GetHomeAzResponse struct { diff --git a/cns/azure-cns-windows.yaml b/cns/azure-cns-windows.yaml index 1b0eba16cd..24d24c5932 100644 --- a/cns/azure-cns-windows.yaml +++ b/cns/azure-cns-windows.yaml @@ -35,17 +35,14 @@ spec: imagePullPolicy: IfNotPresent securityContext: privileged: true - command: ["powershell.exe"] + command: "azure-cns.exe" args: - [ - '.\setkubeconfigpath.ps1', ";", - 'powershell.exe', '.\azure-cns.exe', + [ '-c', "tcp://$(CNSIpAddress):$(CNSPort)", '-t', "$(CNSLogTarget)", '-o', "$(CNSLogDir)", '-storefilepath', "$(CNSStoreFilePath)", '-config-path', "%CONTAINER_SANDBOX_MOUNT_POINT%\\$(CNS_CONFIGURATION_PATH)", - '--kubeconfig', '.\kubeconfig', ] volumeMounts: - name: log diff --git a/cns/azure-cns.yaml b/cns/azure-cns.yaml index 260bb775c1..afb5b128a6 100644 --- a/cns/azure-cns.yaml +++ b/cns/azure-cns.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux @@ -189,4 +189,4 @@ data: "ManageEndpointState": false, "ProgramSNATIPTables" : false } -# Toggle ManageEndpointState and ProgramSNATIPTables to true for delegated IPAM use case. \ No newline at end of file +# Toggle ManageEndpointState and ProgramSNATIPTables to true for delegated IPAM use case. diff --git a/cns/client/client.go b/cns/client/client.go index 804580b8d8..1021d6f412 100644 --- a/cns/client/client.go +++ b/cns/client/client.go @@ -1028,30 +1028,33 @@ func (c *Client) GetEndpoint(ctx context.Context, endpointID string) (*restserve // build the request u := c.routes[cns.EndpointAPI] uString := u.String() + endpointID + var response restserver.GetEndpointResponse req, err := http.NewRequestWithContext(ctx, http.MethodGet, uString, http.NoBody) if err != nil { - return nil, errors.Wrap(err, "failed to build request") + response.Response.ReturnCode = types.UnexpectedError + return &response, errors.Wrap(err, "failed to build request") } + req.Header.Set(headerContentType, contentTypeJSON) res, err := c.client.Do(req) if err != nil { - return nil, errors.Wrap(err, "http request failed") + response.Response.ReturnCode = types.ConnectionError + return &response, &ConnectionFailureErr{cause: err} } defer res.Body.Close() if res.StatusCode != http.StatusOK { - return nil, errors.Errorf("http response %d", res.StatusCode) + response.Response.ReturnCode = types.UnexpectedError + return &response, errors.Errorf("http response %d", res.StatusCode) } - - var response restserver.GetEndpointResponse err = json.NewDecoder(res.Body).Decode(&response) if err != nil { - return nil, errors.Wrap(err, "failed to decode GetEndpointResponse") + response.Response.ReturnCode = types.UnexpectedError + return &response, errors.Wrap(err, "failed to decode GetEndpointResponse") } - if response.Response.ReturnCode != 0 { - return nil, errors.New(response.Response.Message) + return &response, errors.New(response.Response.Message) } return &response, nil @@ -1076,7 +1079,7 @@ func (c *Client) UpdateEndpoint(ctx context.Context, endpointID string, ipInfo m req.Header.Set(headerContentType, contentTypeJSON) res, err := c.client.Do(req) if err != nil { - return nil, errors.Wrap(err, "http request failed with error from server") + return nil, &ConnectionFailureErr{cause: err} } defer res.Body.Close() diff --git a/cns/client/client_test.go b/cns/client/client_test.go index 6535882642..205a1df87e 100644 --- a/cns/client/client_test.go +++ b/cns/client/client_test.go @@ -152,7 +152,7 @@ func TestMain(m *testing.M) { config := common.ServiceConfig{} httpRestService, err := restserver.NewHTTPRestService(&config, &fakes.WireserverClientFake{}, - &fakes.WireserverProxyFake{}, &fakes.NMAgentClientFake{}, nil, nil, nil, + &fakes.WireserverProxyFake{}, &restserver.IPtablesProvider{}, &fakes.NMAgentClientFake{}, nil, nil, nil, fakes.NewMockIMDSClient()) svc = httpRestService httpRestService.Name = "cns-test-server" diff --git a/cns/cnireconciler/version.go b/cns/cnireconciler/version.go deleted file mode 100644 index c6c6ccd27a..0000000000 --- a/cns/cnireconciler/version.go +++ /dev/null @@ -1,42 +0,0 @@ -package cnireconciler - -import ( - "fmt" - - "github.com/Azure/azure-container-networking/cni/client" - semver "github.com/hashicorp/go-version" - "github.com/pkg/errors" - "k8s.io/utils/exec" -) - -// >= 1.4.7 is required due to a bug in CNI when the statefile is empty -// even though the command existed since 1.4.2. -const lastCNIWithoutDumpStateVer = "1.4.6" - -// IsDumpStateVer checks if the CNI executable is a version that -// has the dump state command required to initialize CNS from CNI -// state and returns the result of that test or an error. Will always -// return false when there is an error unless the error was caused -// by the CNI not being a semver, in which case we'll assume we can -// use the command. -func IsDumpStateVer() (bool, error) { - return isDumpStateVer(exec.New()) -} - -func isDumpStateVer(exec exec.Interface) (bool, error) { - needVer, err := semver.NewVersion(lastCNIWithoutDumpStateVer) - if err != nil { - return false, err - } - cnicli := client.New(exec) - ver, err := cnicli.GetVersion() - if err != nil { - // If the error was that the CNI isn't a valid semver, assume we have the - // the dump state command - if errors.Is(err, client.ErrSemVerParse) { - return true, nil - } - return false, fmt.Errorf("failed to invoke CNI client.GetVersion(): %w", err) - } - return ver.GreaterThan(needVer), nil -} diff --git a/cns/cnireconciler/version_test.go b/cns/cnireconciler/version_test.go deleted file mode 100644 index aa04319650..0000000000 --- a/cns/cnireconciler/version_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package cnireconciler - -import ( - "testing" - - testutils "github.com/Azure/azure-container-networking/test/utils" - "github.com/stretchr/testify/assert" - "k8s.io/utils/exec" -) - -func newCNIVersionFakeExec(ver string) exec.Interface { - calls := []testutils.TestCmd{ - {Cmd: []string{"/opt/cni/bin/azure-vnet", "-v"}, Stdout: ver}, - } - - fake := testutils.GetFakeExecWithScripts(calls) - return fake -} - -func TestIsDumpStateVer(t *testing.T) { - tests := []struct { - name string - exec exec.Interface - want bool - wantErr bool - }{ - { - name: "bad ver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.1`), - want: false, - wantErr: false, - }, - { - name: "bad dirty ver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.0-2-g984c5a5e-dirty`), - want: false, - wantErr: false, - }, - { - name: "good ver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.7`), - want: true, - wantErr: false, - }, - { - name: "good dirty ver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.7-7-g7b97e1eb`), - want: true, - wantErr: false, - }, - { - name: "non-semver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.35_Win2019OverlayFix`), - want: true, - wantErr: false, - }, - { - name: "non-semver hotfix ver", - exec: newCNIVersionFakeExec(`Azure CNI Version v1.4.44.4`), - want: true, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := isDumpStateVer(tt.exec) - if tt.wantErr { - assert.Error(t, err) - return - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/cns/configuration/cns_config.json b/cns/configuration/cns_config.json index 6be0daafd3..81ef6c9b05 100644 --- a/cns/configuration/cns_config.json +++ b/cns/configuration/cns_config.json @@ -5,6 +5,7 @@ "HeartBeatIntervalInMins": 30, "RefreshIntervalInSecs": 15, "SnapshotIntervalInMins": 60, + "ConfigSnapshotIntervalInMins": 60, "TelemetryBatchIntervalInSecs": 15, "TelemetryBatchSizeBytes": 16384 }, @@ -33,5 +34,6 @@ "MellanoxMonitorIntervalSecs": 30, "AZRSettings": { "PopulateHomeAzCacheRetryIntervalSecs": 60 - } + }, + "MinTLSVersion": "TLS 1.2" } diff --git a/cns/configuration/configuration.go b/cns/configuration/configuration.go index e295e1e752..9ec5f8664f 100644 --- a/cns/configuration/configuration.go +++ b/cns/configuration/configuration.go @@ -5,10 +5,12 @@ import ( "encoding/json" "os" "path/filepath" + "runtime" "strings" "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/logger" + loggerv2 "github.com/Azure/azure-container-networking/cns/logger/v2" "github.com/Azure/azure-container-networking/common" "github.com/pkg/errors" ) @@ -25,16 +27,19 @@ type CNSConfig struct { CNIConflistFilepath string CNIConflistScenario string ChannelMode string + EnableAPIServerHealthPing bool EnableAsyncPodDelete bool EnableCNIConflistGeneration bool EnableIPAMv2 bool EnableK8sDevicePlugin bool + EnableLoggerV2 bool EnablePprof bool EnableStateMigration bool EnableSubnetScarcity bool EnableSwiftV2 bool InitializeFromCNI bool KeyVaultSettings KeyVaultSettings + Logger loggerv2.Config MSISettings MSISettings ManageEndpointState bool ManagedSettings ManagedSettings @@ -53,6 +58,7 @@ type CNSConfig struct { WatchPods bool `json:"-"` WireserverIP string GRPCSettings GRPCSettings + MinTLSVersion string } type TelemetrySettings struct { @@ -78,6 +84,8 @@ type TelemetrySettings struct { DebugMode bool // Interval for sending snapshot events. SnapshotIntervalInMins int + // Interval for sending config snapshot events. + ConfigSnapshotIntervalInMins int // AppInsightsInstrumentationKey allows the user to override the default appinsights ikey AppInsightsInstrumentationKey string } @@ -228,6 +236,15 @@ func SetCNSConfigDefaults(config *CNSConfig) { if config.GRPCSettings.Port == 0 { config.GRPCSettings.Port = 8080 } + + if config.MinTLSVersion == "" { + config.MinTLSVersion = "TLS 1.2" + } config.GRPCSettings.Enable = false config.WatchPods = config.EnableIPAMv2 || config.EnableSwiftV2 } + +// isStalessCNIMode verify if the CNI is running stateless mode +func (cnsconfig *CNSConfig) IsStalessCNIWindows() bool { + return !cnsconfig.InitializeFromCNI && cnsconfig.ManageEndpointState && runtime.GOOS == "windows" +} diff --git a/cns/configuration/configuration_test.go b/cns/configuration/configuration_test.go index 589fd97a7e..186c92c376 100644 --- a/cns/configuration/configuration_test.go +++ b/cns/configuration/configuration_test.go @@ -80,15 +80,17 @@ func TestReadConfigFromFile(t *testing.T) { HeartBeatIntervalInMins: 30, RefreshIntervalInSecs: 15, SnapshotIntervalInMins: 60, + ConfigSnapshotIntervalInMins: 60, TelemetryBatchIntervalInSecs: 15, TelemetryBatchSizeBytes: 16384, }, AZRSettings: AZRSettings{ PopulateHomeAzCacheRetryIntervalSecs: 60, }, - UseHTTPS: true, - UseMTLS: true, - WireserverIP: "168.63.129.16", + UseHTTPS: true, + UseMTLS: true, + WireserverIP: "168.63.129.16", + MinTLSVersion: "TLS 1.3", }, wantErr: false, }, @@ -220,6 +222,7 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "localhost", Port: 8080, }, + MinTLSVersion: "TLS 1.2", }, }, { @@ -250,6 +253,7 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "192.168.1.1", Port: 9090, }, + MinTLSVersion: "TLS 1.3", }, want: CNSConfig{ ChannelMode: "Other", @@ -279,6 +283,7 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "192.168.1.1", Port: 9090, }, + MinTLSVersion: "TLS 1.3", }, }, } diff --git a/cns/configuration/testdata/good.json b/cns/configuration/testdata/good.json index 185484ede1..9fda404c78 100644 --- a/cns/configuration/testdata/good.json +++ b/cns/configuration/testdata/good.json @@ -26,6 +26,7 @@ "HeartBeatIntervalInMins": 30, "RefreshIntervalInSecs": 15, "SnapshotIntervalInMins": 60, + "ConfigSnapshotIntervalInMins": 60, "TelemetryBatchIntervalInSecs": 15, "TelemetryBatchSizeBytes": 16384 }, @@ -34,5 +35,6 @@ "WireserverIP": "168.63.129.16", "AZRSettings": { "PopulateHomeAzCacheRetryIntervalSecs": 60 - } + }, + "MinTLSVersion": "TLS 1.3" } diff --git a/cns/deviceplugin/plugin.go b/cns/deviceplugin/plugin.go new file mode 100644 index 0000000000..68dd98d3a3 --- /dev/null +++ b/cns/deviceplugin/plugin.go @@ -0,0 +1,184 @@ +package deviceplugin + +import ( + "context" + "fmt" + "net" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" +) + +type Plugin struct { + Logger *zap.Logger + ResourceName string + SocketWatcher *SocketWatcher + Socket string + deviceCountMutex sync.Mutex + deviceCount int + deviceType v1alpha1.DeviceType + kubeletSocket string + deviceCheckInterval time.Duration + devicePluginDirectory string +} + +func NewPlugin(l *zap.Logger, resourceName string, socketWatcher *SocketWatcher, pluginDir string, + initialDeviceCount int, deviceType v1alpha1.DeviceType, kubeletSocket string, deviceCheckInterval time.Duration, +) *Plugin { + return &Plugin{ + Logger: l.With(zap.String("resourceName", resourceName)), + ResourceName: resourceName, + SocketWatcher: socketWatcher, + Socket: getSocketName(pluginDir, deviceType), + deviceCount: initialDeviceCount, + deviceType: deviceType, + kubeletSocket: kubeletSocket, + deviceCheckInterval: deviceCheckInterval, + devicePluginDirectory: pluginDir, + } +} + +// Run runs the plugin until the context is cancelled, restarting the server as needed +func (p *Plugin) Run(ctx context.Context) { + defer p.mustCleanUp() + for { + select { + case <-ctx.Done(): + return + default: + p.Logger.Info("starting device plugin for resource", zap.String("resource", p.ResourceName)) + if err := p.run(ctx); err != nil { + p.Logger.Error("device plugin for resource exited", zap.String("resource", p.ResourceName), zap.Error(err)) + } + } + } +} + +// Here we start the gRPC server and wait for it to be ready +// Once the server is ready, device plugin registers with the Kubelet +// so that it can start serving the kubelet requests +func (p *Plugin) run(ctx context.Context) error { + childCtx, cancel := context.WithCancel(ctx) + defer cancel() + + s := NewServer(p.Logger, p.Socket, p, p.deviceCheckInterval) + // Run starts the grpc server and blocks until an error or context is cancelled + runErrChan := make(chan error, 2) //nolint:gomnd // disabled in favor of readability + go func(errChan chan error) { + if err := s.Run(childCtx); err != nil { + errChan <- err + } + }(runErrChan) + + // Wait till the server is ready before registering with kubelet + // This call is not blocking and returns as soon as the server is ready + readyErrChan := make(chan error, 2) //nolint:gomnd // disabled in favor of readability + go func(errChan chan error) { + errChan <- s.Ready(childCtx) + }(readyErrChan) + + select { + case err := <-runErrChan: + return errors.Wrap(err, "error starting grpc server") + case err := <-readyErrChan: + if err != nil { + return errors.Wrap(err, "error waiting on grpc server to be ready") + } + case <-ctx.Done(): + return nil + } + + p.Logger.Info("registering with kubelet") + // register with kubelet + if err := p.registerWithKubelet(childCtx); err != nil { + return errors.Wrap(err, "failed to register with kubelet") + } + + // run until the socket goes away or the context is cancelled + <-p.SocketWatcher.WatchSocket(childCtx, p.Socket) + return nil +} + +func (p *Plugin) registerWithKubelet(ctx context.Context) error { + conn, err := grpc.Dial(p.kubeletSocket, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + return errors.Wrap(err, "error connecting to kubelet") + } + defer conn.Close() + + client := v1beta1.NewRegistrationClient(conn) + request := &v1beta1.RegisterRequest{ + Version: v1beta1.Version, + Endpoint: filepath.Base(p.Socket), + ResourceName: p.ResourceName, + } + if _, err = client.Register(ctx, request); err != nil { + return errors.Wrap(err, "error sending request to register with kubelet") + } + return nil +} + +func (p *Plugin) mustCleanUp() { + p.Logger.Info("cleaning up device plugin") + if err := os.Remove(p.Socket); err != nil && !os.IsNotExist(err) { + p.Logger.Panic("failed to remove socket", zap.Error(err)) + } +} + +func (p *Plugin) CleanOldState() error { + entries, err := os.ReadDir(p.devicePluginDirectory) + if err != nil { + return errors.Wrap(err, "error listing existing device plugin sockets") + } + for _, entry := range entries { + if strings.HasPrefix(entry.Name(), path.Base(getSocketPrefix(p.devicePluginDirectory, p.deviceType))) { + // try to delete it + f := path.Join(p.devicePluginDirectory, entry.Name()) + if err := os.Remove(f); err != nil { + return errors.Wrapf(err, "error removing old socket %q", f) + } + } + } + return nil +} + +func (p *Plugin) UpdateDeviceCount(count int) { + p.deviceCountMutex.Lock() + p.deviceCount = count + p.deviceCountMutex.Unlock() +} + +func (p *Plugin) getDeviceCount() int { + p.deviceCountMutex.Lock() + defer p.deviceCountMutex.Unlock() + return p.deviceCount +} + +// getSocketPrefix returns a fully qualified path prefix for a given device type. For example, if the device plugin directory is +// /home/foo and the device type is acn.azure.com/vnet-nic, this function returns /home/foo/acn.azure.com_vnet-nic +func getSocketPrefix(devicePluginDirectory string, deviceType v1alpha1.DeviceType) string { + sanitizedDeviceName := strings.ReplaceAll(string(deviceType), "/", "_") + return path.Join(devicePluginDirectory, sanitizedDeviceName) +} + +func getSocketName(devicePluginDirectory string, deviceType v1alpha1.DeviceType) string { + return fmt.Sprintf("%s-%d.sock", getSocketPrefix(devicePluginDirectory, deviceType), time.Now().Unix()) +} diff --git a/cns/deviceplugin/pluginmanager.go b/cns/deviceplugin/pluginmanager.go new file mode 100644 index 0000000000..38adafd78a --- /dev/null +++ b/cns/deviceplugin/pluginmanager.go @@ -0,0 +1,113 @@ +package deviceplugin + +import ( + "context" + "sync" + "time" + + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/pkg/errors" + "go.uber.org/zap" + "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" +) + +const ( + defaultDevicePluginDirectory = "/var/lib/kubelet/device-plugins" + defaultDeviceCheckInterval = 5 * time.Second +) + +type pluginManagerOptions struct { + devicePluginDirectory string + kubeletSocket string + deviceCheckInterval time.Duration +} + +type pluginManagerOption func(*pluginManagerOptions) + +func PluginManagerSocketPrefix(prefix string) func(*pluginManagerOptions) { + return func(opts *pluginManagerOptions) { + opts.devicePluginDirectory = prefix + } +} + +func PluginManagerKubeletSocket(socket string) func(*pluginManagerOptions) { + return func(opts *pluginManagerOptions) { + opts.kubeletSocket = socket + } +} + +func PluginDeviceCheckInterval(i time.Duration) func(*pluginManagerOptions) { + return func(opts *pluginManagerOptions) { + opts.deviceCheckInterval = i + } +} + +// PluginManager runs device plugins for vnet nics and ib nics +type PluginManager struct { + Logger *zap.Logger + plugins []*Plugin + socketWatcher *SocketWatcher + options pluginManagerOptions + mu sync.Mutex +} + +func NewPluginManager(l *zap.Logger, opts ...pluginManagerOption) *PluginManager { + logger := l.With(zap.String("component", "devicePlugin")) + socketWatcher := NewSocketWatcher(logger) + options := pluginManagerOptions{ + devicePluginDirectory: defaultDevicePluginDirectory, + kubeletSocket: v1beta1.KubeletSocket, + deviceCheckInterval: defaultDeviceCheckInterval, + } + for _, o := range opts { + o(&options) + } + return &PluginManager{ + Logger: logger, + socketWatcher: socketWatcher, + options: options, + } +} + +func (pm *PluginManager) AddPlugin(deviceType v1alpha1.DeviceType, deviceCount int) *PluginManager { + pm.mu.Lock() + defer pm.mu.Unlock() + p := NewPlugin(pm.Logger, string(deviceType), pm.socketWatcher, + pm.options.devicePluginDirectory, deviceCount, deviceType, pm.options.kubeletSocket, pm.options.deviceCheckInterval) + pm.plugins = append(pm.plugins, p) + return pm +} + +// Run runs the plugin manager until the context is cancelled or error encountered +func (pm *PluginManager) Run(ctx context.Context) error { + // clean up any leftover state from previous failed plugins + // this can happen if the process crashes before it is able to clean up after itself + for _, plugin := range pm.plugins { + if err := plugin.CleanOldState(); err != nil { + return errors.Wrap(err, "error cleaning state from previous plugin process") + } + } + + var wg sync.WaitGroup + for _, plugin := range pm.plugins { + wg.Add(1) //nolint:gomnd // in favor of readability + go func(p *Plugin) { + defer wg.Done() + p.Run(ctx) + }(plugin) + } + + wg.Wait() + return nil +} + +func (pm *PluginManager) TrackDevices(deviceType v1alpha1.DeviceType, count int) { + pm.mu.Lock() + defer pm.mu.Unlock() + for _, plugin := range pm.plugins { + if plugin.deviceType == deviceType { + plugin.UpdateDeviceCount(count) + break + } + } +} diff --git a/cns/deviceplugin/pluginmanager_test.go b/cns/deviceplugin/pluginmanager_test.go new file mode 100644 index 0000000000..c5e4154887 --- /dev/null +++ b/cns/deviceplugin/pluginmanager_test.go @@ -0,0 +1,311 @@ +package deviceplugin_test + +import ( + "context" + "net" + "os" + "path" + "testing" + "time" + + "github.com/Azure/azure-container-networking/cns/deviceplugin" + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/avast/retry-go/v3" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" +) + +func TestPluginManagerStartStop(t *testing.T) { + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("error getting logger: %v", err) + } + + // start up the fake kubelet + fakeKubeletSocketDir := os.TempDir() + kubeletSocket := path.Join(fakeKubeletSocketDir, "kubelet.sock") + kubeletErrChan := make(chan error) + vnetPluginRegisterChan := make(chan string) + ibPluginRegisterChan := make(chan string) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + kubeletErr := runFakeKubelet(ctx, kubeletSocket, vnetPluginRegisterChan, ibPluginRegisterChan, fakeKubeletSocketDir) + kubeletErrChan <- kubeletErr + }() + + // run the plugin manager + expectedVnetNICs := 2 + expectedIBNICs := 3 + manager := deviceplugin.NewPluginManager(logger, + deviceplugin.PluginManagerSocketPrefix(fakeKubeletSocketDir), + deviceplugin.PluginManagerKubeletSocket(kubeletSocket), + deviceplugin.PluginDeviceCheckInterval(time.Second)) + + manager.AddPlugin(v1alpha1.DeviceTypeVnetNIC, expectedVnetNICs) + manager.AddPlugin(v1alpha1.DeviceTypeInfiniBandNIC, expectedIBNICs) + + errChan := make(chan error) + go func() { + errChan <- manager.Run(ctx) + }() + + // wait till the two plugins register themselves with fake kubelet + vnetPluginEndpoint := <-vnetPluginRegisterChan + ibPluginEndpoint := <-ibPluginRegisterChan + + // assert the plugin reports the expected vnet nic count + gotVnetNICCount := getDeviceCount(t, vnetPluginEndpoint) + if gotVnetNICCount != expectedVnetNICs { + t.Fatalf("expected %d vnet nics but got %d", expectedVnetNICs, gotVnetNICCount) + } + gotIBNICCount := getDeviceCount(t, ibPluginEndpoint) + if gotIBNICCount != expectedIBNICs { + t.Fatalf("expected %d ib nics but got %d", expectedIBNICs, gotIBNICCount) + } + + // update the device counts and assert they match expected after some time + expectedVnetNICs = 5 + expectedIBNICs = 6 + manager.TrackDevices(v1alpha1.DeviceTypeVnetNIC, expectedVnetNICs) + + manager.TrackDevices(v1alpha1.DeviceTypeInfiniBandNIC, expectedIBNICs) + + checkDeviceCounts := func() error { + gotVnetNICCount := getDeviceCount(t, vnetPluginEndpoint) + if gotVnetNICCount != expectedVnetNICs { + return errors.Errorf("expected %d vnet nics but got %d", expectedVnetNICs, gotVnetNICCount) + } + gotIBNICCount := getDeviceCount(t, ibPluginEndpoint) + if gotIBNICCount != expectedIBNICs { + return errors.Errorf("expected %d ib nics but got %d", expectedIBNICs, gotIBNICCount) + } + return nil + } + + deviceCountErr := retry.Do( + checkDeviceCounts, + retry.Attempts(6), + retry.Delay(500*time.Millisecond), + ) + + if deviceCountErr != nil { + t.Fatalf("failed to verify device counts: %v", err) + } + + // call allocate method and check the response + req := &v1beta1.AllocateRequest{ + ContainerRequests: []*v1beta1.ContainerAllocateRequest{ + { + DevicesIDs: []string{"device-0", "device-1"}, + }, + }, + } + allocateResp := getAllocateResponse(t, vnetPluginEndpoint, req) + + if len(allocateResp.ContainerResponses[0].Envs) != len(req.ContainerRequests[0].DevicesIDs) { + t.Fatalf("expected allocations %v but received allocations %v", len(req.ContainerRequests[0].DevicesIDs), len(allocateResp.ContainerResponses[0].Envs)) + } + + // call getDevicePluginOptions method + _, err = getDevicePluginOptionsResponse(vnetPluginEndpoint) + if err != nil { + t.Fatalf("error calling getDevicePluginOptions: %v", err) + } + + // call getPreferredAllocation method + _, err = getPreferredAllocationResponse(vnetPluginEndpoint) + if err != nil { + t.Fatalf("error calling getPreferredAllocation: %v", err) + } + + // call preStartContainer method + _, err = getPreStartContainerResponse(vnetPluginEndpoint) + if err != nil { + t.Fatalf("error calling PreStartContainer: %v", err) + } + + // shut down the plugin manager and fake kubelet + cancel() + + // ensure the plugin manager didn't report an error + if err := <-errChan; err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // ensure the fake kubelet didn't report an error + if err := <-kubeletErrChan; err != nil { + t.Fatalf("unexpected error from fake kubelet: %v", err) + } +} + +type fakeKubelet struct { + vnetPluginRegisterChan chan string + ibPluginRegisterChan chan string + pluginPrefix string +} + +func (f *fakeKubelet) Register(_ context.Context, req *v1beta1.RegisterRequest) (*v1beta1.Empty, error) { + switch req.ResourceName { + case string(v1alpha1.DeviceTypeVnetNIC): + f.vnetPluginRegisterChan <- path.Join(f.pluginPrefix, req.Endpoint) + case string(v1alpha1.DeviceTypeInfiniBandNIC): + f.ibPluginRegisterChan <- path.Join(f.pluginPrefix, req.Endpoint) + } + return &v1beta1.Empty{}, nil +} + +func runFakeKubelet(ctx context.Context, address string, vnetPluginRegisterChan, ibPluginRegisterChan chan string, pluginPrefix string) error { + if err := os.Remove(address); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "error cleaning up previous kubelet socket") + } + + k := &fakeKubelet{ + vnetPluginRegisterChan: vnetPluginRegisterChan, + ibPluginRegisterChan: ibPluginRegisterChan, + pluginPrefix: pluginPrefix, + } + grpcServer := grpc.NewServer() + v1beta1.RegisterRegistrationServer(grpcServer, k) + + l, err := net.Listen("unix", address) + if err != nil { + return errors.Wrap(err, "error from fake kubelet listening on socket") + } + errChan := make(chan error, 2) + go func() { + errChan <- grpcServer.Serve(l) + }() + defer grpcServer.Stop() + + select { + case err := <-errChan: + return errors.Wrap(err, "error running fake kubelet grpc server") + case <-ctx.Done(): + } + return nil +} + +func getDeviceCount(t *testing.T, pluginAddress string) int { + conn, err := grpc.Dial(pluginAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + t.Fatalf("error connecting to fake kubelet: %v", err) + } + defer conn.Close() + + client := v1beta1.NewDevicePluginClient(conn) + lwClient, err := client.ListAndWatch(context.Background(), &v1beta1.Empty{}) + if err != nil { + t.Fatalf("error from listAndWatch: %v", err) + } + + resp, err := lwClient.Recv() + if err != nil { + t.Fatalf("error from listAndWatch Recv: %v", err) + } + + return len(resp.Devices) +} + +func getAllocateResponse(t *testing.T, pluginAddress string, req *v1beta1.AllocateRequest) *v1beta1.AllocateResponse { + conn, err := grpc.Dial(pluginAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + t.Fatalf("error connecting to fake kubelet: %v", err) + } + defer conn.Close() + + client := v1beta1.NewDevicePluginClient(conn) + resp, err := client.Allocate(context.Background(), req) + if err != nil { + t.Fatalf("error from Allocate: %v", err) + } + return resp +} + +func getDevicePluginOptionsResponse(pluginAddress string) (*v1beta1.DevicePluginOptions, error) { + conn, err := grpc.Dial(pluginAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + return nil, errors.Wrap(err, "error connecting to fake kubelet") + } + defer conn.Close() + + client := v1beta1.NewDevicePluginClient(conn) + resp, err := client.GetDevicePluginOptions(context.Background(), &v1beta1.Empty{}) + if err != nil { + return nil, errors.Wrapf(err, "error calling GetDevicePluginOptions") + } + return resp, nil +} + +func getPreferredAllocationResponse(pluginAddress string) (*v1beta1.PreferredAllocationResponse, error) { + conn, err := grpc.Dial(pluginAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + return nil, errors.Wrap(err, "error connecting to fake kubelet") + } + defer conn.Close() + + client := v1beta1.NewDevicePluginClient(conn) + resp, err := client.GetPreferredAllocation(context.Background(), &v1beta1.PreferredAllocationRequest{}) + if err != nil { + return nil, errors.Wrapf(err, "error calling GetPreferredAllocation") + } + return resp, nil +} + +func getPreStartContainerResponse(pluginAddress string) (*v1beta1.PreStartContainerResponse, error) { + conn, err := grpc.Dial(pluginAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + d := &net.Dialer{} + conn, err := d.DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + })) + if err != nil { + return nil, errors.Wrap(err, "error connecting to fake kubelet") + } + defer conn.Close() + + client := v1beta1.NewDevicePluginClient(conn) + resp, err := client.PreStartContainer(context.Background(), &v1beta1.PreStartContainerRequest{}) + if err != nil { + return nil, errors.Wrapf(err, "error calling PreStartContainer") + } + return resp, nil +} diff --git a/cns/deviceplugin/server.go b/cns/deviceplugin/server.go new file mode 100644 index 0000000000..c5243368b9 --- /dev/null +++ b/cns/deviceplugin/server.go @@ -0,0 +1,166 @@ +package deviceplugin + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" +) + +const devicePrefix = "NIC-" + +type deviceCounter interface { + getDeviceCount() int +} + +type Server struct { + address string + logger *zap.Logger + deviceCounter deviceCounter + shutdownCh <-chan struct{} + deviceCheckInterval time.Duration +} + +func NewServer(logger *zap.Logger, address string, deviceCounter deviceCounter, deviceCheckInterval time.Duration) *Server { + return &Server{ + address: address, + logger: logger, + deviceCounter: deviceCounter, + deviceCheckInterval: deviceCheckInterval, + } +} + +// Run starts the grpc server and blocks until an error or context is cancelled. Wait on Ready to know when the server is ready. +func (s *Server) Run(ctx context.Context) error { + grpcServer := grpc.NewServer() + v1beta1.RegisterDevicePluginServer(grpcServer, s) + + childCtx, cancel := context.WithCancel(ctx) + defer cancel() + s.shutdownCh = childCtx.Done() + + l, err := net.Listen("unix", s.address) + if err != nil { + return errors.Wrap(err, "error listening on socket") + } + defer l.Close() + + go func() { + <-ctx.Done() + grpcServer.GracefulStop() + }() + + if err := grpcServer.Serve(l); err != nil && !errors.Is(err, grpc.ErrServerStopped) { + return errors.Wrap(err, "error running grpc server") + } + return nil +} + +// Ready blocks until the server is ready +func (s *Server) Ready(ctx context.Context) error { + c, err := grpc.DialContext(ctx, s.address, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), //nolint:staticcheck // TODO: Move to grpc.NewClient method + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + conn, err := (&net.Dialer{}).DialContext(ctx, "unix", addr) + if err != nil { + return nil, errors.Wrap(err, "failed to dial context") + } + return conn, nil + }), + ) + if err != nil { + return errors.Wrap(err, "error dialing local grpc server") + } + if err := c.Close(); err != nil { + return errors.Wrap(err, "error closing connection to local grpc server") + } + return nil +} + +// This is a dummy implementation for allocate to conform to the interface requirements. +// Allocate is called during container creation so that the Device +// Plugin can run device specific operations and instruct Kubelet +// of the steps to make the Device available in the container +// We are not using this functionality currently +func (s *Server) Allocate(_ context.Context, req *v1beta1.AllocateRequest) (*v1beta1.AllocateResponse, error) { + s.logger.Info("allocate request", zap.Any("req", *req)) + resps := make([]*v1beta1.ContainerAllocateResponse, len(req.ContainerRequests)) + for i, containerReq := range req.ContainerRequests { + resp := &v1beta1.ContainerAllocateResponse{ + Envs: make(map[string]string), + } + for j := range containerReq.DevicesIDs { + resp.Envs[fmt.Sprintf("%s%d", devicePrefix, j)] = containerReq.DevicesIDs[j] + } + resps[i] = resp + } + r := &v1beta1.AllocateResponse{ + ContainerResponses: resps, + } + return r, nil +} + +func (s *Server) ListAndWatch(_ *v1beta1.Empty, stream v1beta1.DevicePlugin_ListAndWatchServer) error { + // send the initial count right away + advertisedCount := s.deviceCounter.getDeviceCount() + devices := make([]*v1beta1.Device, advertisedCount) + for i := range devices { + devices[i] = &v1beta1.Device{ + ID: fmt.Sprintf("%s%d", devicePrefix, i), + Health: v1beta1.Healthy, + } + } + if err := stream.Send(&v1beta1.ListAndWatchResponse{ + Devices: devices, + }); err != nil { + return errors.Wrap(err, "error sending listAndWatch response") + } + + // every interval, check if the current count has changed from what we've previously sent, and if so, send the new count + ticker := time.NewTicker(s.deviceCheckInterval) + defer ticker.Stop() + + for { + select { + case <-s.shutdownCh: + return nil + case <-stream.Context().Done(): + return errors.Wrap(stream.Context().Err(), "client context done") + case <-ticker.C: + currentCount := s.deviceCounter.getDeviceCount() + if currentCount == advertisedCount { + continue + } + advertisedCount = currentCount + devices := make([]*v1beta1.Device, advertisedCount) + for i := range devices { + devices[i] = &v1beta1.Device{ + ID: fmt.Sprintf("%s%d", devicePrefix, i), + Health: v1beta1.Healthy, + } + } + if err := stream.Send(&v1beta1.ListAndWatchResponse{ + Devices: devices, + }); err != nil { + return errors.Wrap(err, "error sending listAndWatch response") + } + } + } +} + +func (s *Server) GetDevicePluginOptions(context.Context, *v1beta1.Empty) (*v1beta1.DevicePluginOptions, error) { + return &v1beta1.DevicePluginOptions{}, nil +} + +func (s *Server) GetPreferredAllocation(context.Context, *v1beta1.PreferredAllocationRequest) (*v1beta1.PreferredAllocationResponse, error) { + return &v1beta1.PreferredAllocationResponse{}, nil +} + +func (s *Server) PreStartContainer(context.Context, *v1beta1.PreStartContainerRequest) (*v1beta1.PreStartContainerResponse, error) { + return &v1beta1.PreStartContainerResponse{}, nil +} diff --git a/cns/deviceplugin/socketwatcher.go b/cns/deviceplugin/socketwatcher.go new file mode 100644 index 0000000000..05b7df602b --- /dev/null +++ b/cns/deviceplugin/socketwatcher.go @@ -0,0 +1,75 @@ +package deviceplugin + +import ( + "context" + "os" + "sync" + "time" + + "go.uber.org/zap" +) + +const defaultStatInterval time.Duration = 5 * time.Second + +type SocketWatcherOption func(*socketWatcherOptions) + +type socketWatcherOptions struct { + statInterval time.Duration +} + +func SocketWatcherStatInterval(d time.Duration) SocketWatcherOption { + return func(o *socketWatcherOptions) { + o.statInterval = d + } +} + +type SocketWatcher struct { + socketChans map[string]<-chan struct{} + mutex sync.Mutex + logger *zap.Logger + options socketWatcherOptions +} + +func NewSocketWatcher(logger *zap.Logger, opts ...SocketWatcherOption) *SocketWatcher { + defaultOptions := socketWatcherOptions{ + statInterval: defaultStatInterval, + } + for _, o := range opts { + o(&defaultOptions) + } + return &SocketWatcher{ + socketChans: make(map[string]<-chan struct{}), + logger: logger, + options: defaultOptions, + } +} + +// watchSocket returns a channel that will be closed when the socket is removed or the context is cancelled +func (s *SocketWatcher) WatchSocket(ctx context.Context, socket string) <-chan struct{} { + s.mutex.Lock() + defer s.mutex.Unlock() + // if a socket is already being watched, return its channel + if ch, ok := s.socketChans[socket]; ok { + return ch + } + // otherwise, start watching it and return a new channel + socketChan := make(chan struct{}) + s.socketChans[socket] = socketChan + go func() { + defer close(socketChan) + ticker := time.NewTicker(s.options.statInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if _, err := os.Lstat(socket); err != nil { + s.logger.Info("failed to stat socket", zap.Error(err)) + return + } + } + } + }() + return socketChan +} diff --git a/cns/deviceplugin/socketwatcher_test.go b/cns/deviceplugin/socketwatcher_test.go new file mode 100644 index 0000000000..e987358481 --- /dev/null +++ b/cns/deviceplugin/socketwatcher_test.go @@ -0,0 +1,136 @@ +package deviceplugin_test + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + "github.com/Azure/azure-container-networking/cns/deviceplugin" + "go.uber.org/zap" +) + +func TestWatchContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + logger, _ := zap.NewDevelopment() + s := deviceplugin.NewSocketWatcher(logger) + done := make(chan struct{}) + go func(done chan struct{}) { + <-s.WatchSocket(ctx, "testdata/socket.sock") + close(done) + }(done) + + // done chan should stil be open + select { + case <-done: + t.Fatal("socket watcher isn't watching but the context is still not cancelled") + default: + } + + cancel() + + // done chan should be closed since the context was cancelled + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("socket watcher is still watching 5 seconds after context is cancelled") + } +} + +func TestWatchSocketDeleted(t *testing.T) { + // Create a temporary directory + tempDir, err := os.MkdirTemp("", "socket-watcher-test-") + if err != nil { + t.Fatalf("error creating temporary directory: %v", err) + } + defer os.RemoveAll(tempDir) // Ensure the directory is cleaned up + + socket := filepath.Join(tempDir, "to-be-deleted.sock") + if _, err := os.Create(socket); err != nil { + t.Fatalf("error creating test file %s: %v", socket, err) + } + + logger, _ := zap.NewDevelopment() + s := deviceplugin.NewSocketWatcher(logger, deviceplugin.SocketWatcherStatInterval(time.Second)) + done := make(chan struct{}) + go func(done chan struct{}) { + <-s.WatchSocket(context.Background(), socket) + close(done) + }(done) + + // done chan should stil be open + select { + case <-done: + t.Fatal("socket watcher isn't watching but the file still exists") + default: + } + + if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { + t.Fatalf("failed to remove socket") + } + + // done chan should be closed since the socket file was deleted + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("socket watcher is still watching 5 seconds after file is deleted") + } +} + +func TestWatchSocketTwice(t *testing.T) { + // Create a temporary directory + tempDir, err := os.MkdirTemp("", "socket-watcher-test-") + if err != nil { + t.Fatalf("error creating temporary directory: %v", err) + } + defer os.RemoveAll(tempDir) // Ensure the directory is cleaned up + + socket := filepath.Join(tempDir, "to-be-deleted.sock") + if _, err := os.Create(socket); err != nil { + t.Fatalf("error creating test file %s: %v", socket, err) + } + + logger, _ := zap.NewDevelopment() + s := deviceplugin.NewSocketWatcher(logger, deviceplugin.SocketWatcherStatInterval(time.Second)) + done1 := make(chan struct{}) + done2 := make(chan struct{}) + go func(done chan struct{}) { + <-s.WatchSocket(context.Background(), socket) + close(done) + }(done1) + go func(done chan struct{}) { + <-s.WatchSocket(context.Background(), socket) + close(done) + }(done2) + + // done chans should stil be open + select { + case <-done1: + t.Fatal("socket watcher isn't watching but the file still exists") + default: + } + + select { + case <-done2: + t.Fatal("socket watcher isn't watching but the file still exists") + default: + } + + if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { + t.Fatalf("failed to remove socket") + } + + // done chans should be closed since the socket file was deleted + select { + case <-done1: + case <-time.After(5 * time.Second): + t.Fatal("socket watcher is still watching 5 seconds after file is deleted") + } + + select { + case <-done2: + case <-time.After(5 * time.Second): + t.Fatal("socket watcher is still watching 5 seconds after file is deleted") + } +} diff --git a/cns/deviceplugin/testdata/socket.sock b/cns/deviceplugin/testdata/socket.sock new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cns/endpointmanager/endpointmanager.go b/cns/endpointmanager/endpointmanager.go new file mode 100644 index 0000000000..e71f0a6cbc --- /dev/null +++ b/cns/endpointmanager/endpointmanager.go @@ -0,0 +1,21 @@ +package endpointmanager + +import ( + "context" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/restserver" +) + +type EndpointManager struct { + cli releaseIPsClient // nolint +} + +type releaseIPsClient interface { + ReleaseIPs(ctx context.Context, ipconfig cns.IPConfigsRequest) error + GetEndpoint(ctx context.Context, endpointID string) (*restserver.GetEndpointResponse, error) +} + +func WithPlatformReleaseIPsManager(cli releaseIPsClient) *EndpointManager { + return &EndpointManager{cli: cli} +} diff --git a/cns/endpointmanager/endpointmanager_linux.go b/cns/endpointmanager/endpointmanager_linux.go new file mode 100644 index 0000000000..4441d84110 --- /dev/null +++ b/cns/endpointmanager/endpointmanager_linux.go @@ -0,0 +1,13 @@ +package endpointmanager + +import ( + "context" + + "github.com/Azure/azure-container-networking/cns" + "github.com/pkg/errors" +) + +// ReleaseIPs implements an Interface in fsnotify for async delete of the HNS endpoint and IP addresses +func (em *EndpointManager) ReleaseIPs(ctx context.Context, ipconfigreq cns.IPConfigsRequest) error { + return errors.Wrap(em.cli.ReleaseIPs(ctx, ipconfigreq), "failed to release IP from CNS") +} diff --git a/cns/endpointmanager/endpointmanager_windows.go b/cns/endpointmanager/endpointmanager_windows.go new file mode 100644 index 0000000000..dd928b721d --- /dev/null +++ b/cns/endpointmanager/endpointmanager_windows.go @@ -0,0 +1,42 @@ +package endpointmanager + +import ( + "context" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/hnsclient" + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/pkg/errors" +) + +// ReleaseIPs implements an Interface in fsnotify for async delete of the HNS endpoint and IP addresses +func (em *EndpointManager) ReleaseIPs(ctx context.Context, ipconfigreq cns.IPConfigsRequest) error { + logger.Printf("deleting HNS Endpoint asynchronously") + // remove HNS endpoint + if err := em.deleteEndpoint(ctx, ipconfigreq.InfraContainerID); err != nil { + logger.Errorf("failed to remove HNS endpoint %s", err.Error()) + } + return errors.Wrap(em.cli.ReleaseIPs(ctx, ipconfigreq), "failed to release IP from CNS") +} + +// deleteEndpoint API to get the state and then remove assiciated HNS +func (em *EndpointManager) deleteEndpoint(ctx context.Context, containerid string) error { + endpointResponse, err := em.cli.GetEndpoint(ctx, containerid) + if err != nil { + return errors.Wrap(err, "failed to read the endpoint from CNS state") + } + for _, ipInfo := range endpointResponse.EndpointInfo.IfnameToIPMap { + hnsEndpointID := ipInfo.HnsEndpointID + // we need to get the HNSENdpoint via the IP address if the HNSEndpointID is not present in the statefile + if ipInfo.HnsEndpointID == "" { + if hnsEndpointID, err = hnsclient.GetHNSEndpointbyIP(ipInfo.IPv4, ipInfo.IPv6); err != nil { + return errors.Wrap(err, "failed to find HNS endpoint with id") + } + } + logger.Printf("deleting HNS Endpoint with id %v", hnsEndpointID) + if err := hnsclient.DeleteHNSEndpointbyID(hnsEndpointID); err != nil { + return errors.Wrap(err, "failed to delete HNS endpoint with id "+ipInfo.HnsEndpointID) + } + } + return nil +} diff --git a/cns/fakes/iptablesfake.go b/cns/fakes/iptablesfake.go new file mode 100644 index 0000000000..f80fd075c4 --- /dev/null +++ b/cns/fakes/iptablesfake.go @@ -0,0 +1,103 @@ +package fakes + +import ( + "errors" + "strings" + + "github.com/Azure/azure-container-networking/iptables" +) + +var ( + errChainExists = errors.New("chain already exists") + errChainNotFound = errors.New("chain not found") + errRuleExists = errors.New("rule already exists") +) + +type IPTablesMock struct { + state map[string]map[string][]string +} + +func NewIPTablesMock() *IPTablesMock { + return &IPTablesMock{ + state: make(map[string]map[string][]string), + } +} + +func (c *IPTablesMock) ensureTableExists(table string) { + _, exists := c.state[table] + if !exists { + c.state[table] = make(map[string][]string) + } +} + +func (c *IPTablesMock) ChainExists(table, chain string) (bool, error) { + c.ensureTableExists(table) + + builtins := []string{iptables.Input, iptables.Output, iptables.Prerouting, iptables.Postrouting, iptables.Forward} + + _, exists := c.state[table][chain] + + // these chains always exist + for _, val := range builtins { + if chain == val && !exists { + c.state[table][chain] = []string{} + return true, nil + } + } + + return exists, nil +} + +func (c *IPTablesMock) NewChain(table, chain string) error { + c.ensureTableExists(table) + + exists, _ := c.ChainExists(table, chain) + + if exists { + return errChainExists + } + + c.state[table][chain] = []string{} + return nil +} + +func (c *IPTablesMock) Exists(table, chain string, rulespec ...string) (bool, error) { + c.ensureTableExists(table) + + chainExists, _ := c.ChainExists(table, chain) + if !chainExists { + return false, nil + } + + targetRule := strings.Join(rulespec, " ") + chainRules := c.state[table][chain] + + for _, chainRule := range chainRules { + if targetRule == chainRule { + return true, nil + } + } + return false, nil +} + +func (c *IPTablesMock) Append(table, chain string, rulespec ...string) error { + c.ensureTableExists(table) + + chainExists, _ := c.ChainExists(table, chain) + if !chainExists { + return errChainNotFound + } + + ruleExists, _ := c.Exists(table, chain, rulespec...) + if ruleExists { + return errRuleExists + } + + targetRule := strings.Join(rulespec, " ") + c.state[table][chain] = append(c.state[table][chain], targetRule) + return nil +} + +func (c *IPTablesMock) Insert(table, chain string, _ int, rulespec ...string) error { + return c.Append(table, chain, rulespec...) +} diff --git a/cns/fakes/nmagentclientfake.go b/cns/fakes/nmagentclientfake.go index 67988d98e6..a5359ca006 100644 --- a/cns/fakes/nmagentclientfake.go +++ b/cns/fakes/nmagentclientfake.go @@ -14,9 +14,10 @@ import ( // NMAgentClientFake can be used to query to VM Host info. type NMAgentClientFake struct { - SupportedAPIsF func(context.Context) ([]string, error) - GetNCVersionListF func(context.Context) (nmagent.NCVersionList, error) - GetHomeAzF func(context.Context) (nmagent.AzResponse, error) + SupportedAPIsF func(context.Context) ([]string, error) + GetNCVersionListF func(context.Context) (nmagent.NCVersionList, error) + GetHomeAzF func(context.Context) (nmagent.AzResponse, error) + GetInterfaceIPInfoF func(ctx context.Context) (nmagent.Interfaces, error) } func (n *NMAgentClientFake) SupportedAPIs(ctx context.Context) ([]string, error) { @@ -30,3 +31,7 @@ func (n *NMAgentClientFake) GetNCVersionList(ctx context.Context) (nmagent.NCVer func (n *NMAgentClientFake) GetHomeAz(ctx context.Context) (nmagent.AzResponse, error) { return n.GetHomeAzF(ctx) } + +func (n *NMAgentClientFake) GetInterfaceIPInfo(ctx context.Context) (nmagent.Interfaces, error) { + return n.GetInterfaceIPInfoF(ctx) +} diff --git a/cns/fsnotify/fsnotify.go b/cns/fsnotify/fsnotify.go index cc09751d88..16bff5a49d 100644 --- a/cns/fsnotify/fsnotify.go +++ b/cns/fsnotify/fsnotify.go @@ -15,12 +15,12 @@ import ( "golang.org/x/sync/errgroup" ) -type releaseIPsClient interface { +type ReleaseIPsClient interface { ReleaseIPs(ctx context.Context, ipconfig cns.IPConfigsRequest) error } type watcher struct { - cli releaseIPsClient + cli ReleaseIPsClient path string log *zap.Logger @@ -29,7 +29,7 @@ type watcher struct { } // Create the AsyncDelete watcher. -func New(cli releaseIPsClient, path string, logger *zap.Logger) (*watcher, error) { //nolint +func New(cli ReleaseIPsClient, path string, logger *zap.Logger) (*watcher, error) { //nolint // Add directory where intended deletes are kept if err := os.Mkdir(path, 0o755); err != nil && !errors.Is(err, fs.ErrExist) { //nolint logger.Error("error making directory", zap.String("path", path), zap.Error(err)) diff --git a/cns/healthserver/healthz.go b/cns/healthserver/healthz.go new file mode 100644 index 0000000000..66023ea0ec --- /dev/null +++ b/cns/healthserver/healthz.go @@ -0,0 +1,59 @@ +package healthserver + +import ( + "net/http" + + "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(v1alpha.AddToScheme(scheme)) +} + +type Config struct { + PingAPIServer bool +} + +// NewHealthzHandlerWithChecks will return a [http.Handler] for CNS's /healthz endpoint. +// Depending on what we expect CNS to be able to read (based on the [configuration.CNSConfig]) +// then the checks registered to the handler will test for those expectations. For example, in +// ChannelMode: CRD, the health check will ensure that CNS is able to list NNCs successfully. +func NewHealthzHandlerWithChecks(cfg *Config) (http.Handler, error) { + checks := make(map[string]healthz.Checker) + if cfg.PingAPIServer { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get kubeconfig") + } + cli, err := client.New(cfg, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to build client") + } + checks["nnc"] = func(req *http.Request) error { + ctx := req.Context() + // we just care that we're allowed to List NNCs so set limit to 1 to minimize + // additional load on apiserver + if err := cli.List(ctx, &v1alpha.NodeNetworkConfigList{}, &client.ListOptions{ + Namespace: metav1.NamespaceSystem, + Limit: int64(1), + }); err != nil { + return errors.Wrap(err, "failed to list NodeNetworkConfig") + } + return nil + } + } + return &healthz.Handler{ + Checks: checks, + }, nil +} diff --git a/cns/healthserver/healthz_test.go b/cns/healthserver/healthz_test.go new file mode 100644 index 0000000000..bf300e8a33 --- /dev/null +++ b/cns/healthserver/healthz_test.go @@ -0,0 +1,290 @@ +package healthserver + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +const nncCRD = `{ + "kind": "APIResourceList", + "apiVersion": "v1", + "groupVersion": "acn.azure.com/v1alpha", + "resources": [ + { + "name": "nodenetworkconfigs", + "singularName": "nodenetworkconfig", + "namespaced": true, + "kind": "NodeNetworkConfig", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ], + "shortNames": [ + "nnc" + ], + "storageVersionHash": "aGVsbG93cmxk" + }, + { + "name": "nodenetworkconfigs/status", + "singularName": "", + "namespaced": true, + "kind": "NodeNetworkConfig", + "verbs": [ + "get", + "patch", + "update" + ] + } + ] +}` + +const nncResult = `{ + "apiVersion": "acn.azure.com/v1alpha", + "items": [ + { + "apiVersion": "acn.azure.com/v1alpha", + "kind": "NodeNetworkConfig", + "metadata": { + "creationTimestamp": "2024-12-04T20:42:17Z", + "finalizers": [ + "finalizers.acn.azure.com/dnc-operations" + ], + "generation": 1, + "labels": { + "kubernetes.azure.com/podnetwork-delegationguid": "", + "kubernetes.azure.com/podnetwork-subnet": "", + "kubernetes.azure.com/podnetwork-type": "overlay", + "managed": "true", + "owner": "aks-nodepool1-1234567-vmss000000" + }, + "managedFields": [ + { + "apiVersion": "acn.azure.com/v1alpha", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:metadata": { + "f:finalizers": { + ".": {}, + "v:\"finalizers.acn.azure.com/dnc-operations\"": {} + }, + "f:labels": { + ".": {}, + "f:kubernetes.azure.com/podnetwork-delegationguid": {}, + "f:kubernetes.azure.com/podnetwork-subnet": {}, + "f:kubernetes.azure.com/podnetwork-type": {}, + "f:managed": {}, + "f:owner": {} + }, + "f:ownerReferences": { + ".": {}, + "k:{\"uid\":\"f5117020-bbc5-11ef-8433-1b9e59caeb1d\"}": {} + } + }, + "f:spec": { + ".": {}, + "f:requestedIPCount": {} + } + }, + "manager": "dnc-rc", + "operation": "Update", + "time": "2024-12-04T20:42:17Z" + }, + { + "apiVersion": "acn.azure.com/v1alpha", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:status": { + ".": {}, + "f:assignedIPCount": {}, + "f:networkContainers": {} + } + }, + "manager": "dnc-rc", + "operation": "Update", + "subresource": "status", + "time": "2024-12-04T20:42:18Z" + } + ], + "name": "aks-nodepool1-1234567-vmss000000", + "namespace": "kube-system", + "ownerReferences": [ + { + "apiVersion": "v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "Node", + "name": "aks-nodepool1-1234567-vmss000000", + "uid": "02df1fcc-bbc6-11ef-a76a-4b1af8d399a2" + } + ], + "resourceVersion": "123456789", + "uid": "0dc75e5e-bbc6-11ef-878f-ab45432262d6" + }, + "spec": { + "requestedIPCount": 0 + }, + "status": { + "assignedIPCount": 256, + "networkContainers": [ + { + "assignmentMode": "static", + "id": "13f630c0-bbc6-11ef-b3b7-bb8e46de5973", + "nodeIP": "10.224.0.4", + "primaryIP": "10.244.2.0/24", + "subnetAddressSpace": "10.244.0.0/16", + "subnetName": "routingdomain_1f7eb6ba-bbc6-11ef-8c54-7b2c1e3cbbe4_overlaysubnet", + "type": "overlay", + "version": 0 + } + ] + } + } + ], + "kind": "NodeNetworkConfigList", + "metadata": { + "continue": "", + "resourceVersion": "9876543210" + } +}` + +func TestNewHealthzHandlerWithChecks(t *testing.T) { + tests := []struct { + name string + config *Config + apiStatusCode int + expectedHealthy bool + }{ + { + name: "list NNC gives 200 should indicate healthy", + config: &Config{ + PingAPIServer: true, + }, + apiStatusCode: http.StatusOK, + expectedHealthy: true, + }, + { + name: "unauthorized (401) from apiserver should be unhealthy", + config: &Config{ + PingAPIServer: true, + }, + apiStatusCode: http.StatusUnauthorized, + expectedHealthy: false, + }, + { + name: "channel nodesubnet should not call apiserver so it doesn't matter if the status code is a 401", + config: &Config{ + PingAPIServer: false, + }, + apiStatusCode: http.StatusUnauthorized, + expectedHealthy: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + configureLocalAPIServer(t, tt.apiStatusCode) + + responseRecorder := httptest.NewRecorder() + healthHandler, err := NewHealthzHandlerWithChecks(tt.config) + healthHandler = http.StripPrefix("/healthz", healthHandler) + require.NoError(t, err) + + healthHandler.ServeHTTP(responseRecorder, httptest.NewRequest("GET", "/healthz", http.NoBody)) + + require.Equal(t, tt.expectedHealthy, responseRecorder.Code == http.StatusOK) + }) + } +} + +func configureLocalAPIServer(t *testing.T, expectedNNCStatusCode int) { + // setup apiserver + server := setupMockAPIServer(expectedNNCStatusCode) + + // write kubeConfig for test server + kubeConfigFile, err := writeTmpKubeConfig(server.URL) + require.NoError(t, err) + + // set env var to kubeconfig + os.Setenv("KUBECONFIG", kubeConfigFile) + + t.Cleanup(func() { + server.Close() + os.Remove(kubeConfigFile) + os.Unsetenv("KUBECONFIG") + }) +} + +func writeTmpKubeConfig(host string) (string, error) { + tempKubeConfig := ` +apiVersion: v1 +clusters: +- cluster: + server: ` + host + ` + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +kind: Config +preferences: {} +users: +- name: test-user + user: + token: test-token +` + kubeConfigFile, err := os.CreateTemp("", "kubeconfig") + if err != nil { + return "", fmt.Errorf("failed to create temp kubeconfig file: %w", err) + } + + _, err = kubeConfigFile.WriteString(tempKubeConfig) + if err != nil { + return "", fmt.Errorf("failed to write kubeconfig to temp file: %w", err) + } + kubeConfigFile.Close() + return kubeConfigFile.Name(), nil +} + +func setupMockAPIServer(code int) *httptest.Server { + // Start a mock HTTP server + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Handle requests based on the path + switch r.URL.Path { + case "/apis/acn.azure.com/v1alpha": + _, err := w.Write([]byte(nncCRD)) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + case "/apis/acn.azure.com/v1alpha/namespaces/kube-system/nodenetworkconfigs": + if code == http.StatusOK { + w.Header().Set("Cache-Control", "no-cache, private") + w.Header().Set("Content-Type", "application/json") + _, err := w.Write([]byte(nncResult)) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } else { + w.WriteHeader(code) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + return mockServer +} diff --git a/cns/hnsclient/hnsclient_windows.go b/cns/hnsclient/hnsclient_windows.go index cc24b7917a..b97781a17a 100644 --- a/cns/hnsclient/hnsclient_windows.go +++ b/cns/hnsclient/hnsclient_windows.go @@ -14,8 +14,10 @@ import ( "github.com/Azure/azure-container-networking/network/policy" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/hcn" + "github.com/pkg/errors" ) +// TODO redesign hnsclient on windows const ( // Name of the external hns network ExtHnsNetworkName = "ext" @@ -28,7 +30,6 @@ const ( // HNS network types hnsL2Bridge = "l2bridge" - hnsL2Tunnel = "l2tunnel" // hcnSchemaVersionMajor indicates major version number for hcn schema hcnSchemaVersionMajor = 2 @@ -52,6 +53,12 @@ const ( // Name of the loopback adapter needed to create Host NC apipa network hostNCLoopbackAdapterName = "LoopbackAdapterHostNCConnectivity" + // Name of the loopback adapter created by HNS for Host NC apipa network + vEthernethostNCLoopbackAdapterName = "vEthernet (" + hostNCLoopbackAdapterName + ")" + + // HNS rehydration issue requires this GW to be different than the loopback adapter ip, so we set it to .2 + defaultHnsGwIPAddress = "169.254.128.2" + hnsLoopbackAdapterIPAddress = "169.254.128.1" // protocolTCP indicates the TCP protocol identifier in HCN protocolTCP = "6" @@ -75,6 +82,9 @@ const ( // signals a APIPA endpoint type apipaEndpointType = "APIPA" + + // default network name used by HNS + defaultNetworkName = "azure" ) // Named Lock for network and endpoint creation/deletion @@ -137,7 +147,7 @@ func CreateDefaultExtNetwork(networkType string) error { return nil } - if networkType != hnsL2Bridge && networkType != hnsL2Tunnel { + if networkType != hnsL2Bridge { return fmt.Errorf("Invalid hns network type %s", networkType) } @@ -294,10 +304,19 @@ func createHostNCApipaNetwork( } // Create loopback adapter needed for this HNS network - if interfaceExists, _ := networkcontainers.InterfaceExists(hostNCLoopbackAdapterName); !interfaceExists { + // We need to fitst check the existence of either "LoopbackAdapterHostNCConnectivity" or the vEthernet(LoopbackAdapterHostNCConnectivity) interfaces + // If neither exists, we create the loopback adapter with the specified IP configuration. + loopbackInterfaceExists, _ := networkcontainers.InterfaceExists(hostNCLoopbackAdapterName) + vethernetLoopbackInterfaceExists, _ := networkcontainers.InterfaceExists(vEthernethostNCLoopbackAdapterName) + if loopbackInterfaceExists { + logger.Printf("%s already created, skipping loopback interface creation", hostNCLoopbackAdapterName) + } + if vethernetLoopbackInterfaceExists { + logger.Printf("%s already created, skipping loopback interface creation", vEthernethostNCLoopbackAdapterName) + } else if !loopbackInterfaceExists && !vethernetLoopbackInterfaceExists { ipconfig := cns.IPConfiguration{ IPSubnet: cns.IPSubnet{ - IPAddress: localIPConfiguration.GatewayIPAddress, + IPAddress: hnsLoopbackAdapterIPAddress, PrefixLength: localIPConfiguration.IPSubnet.PrefixLength, }, GatewayIPAddress: localIPConfiguration.GatewayIPAddress, @@ -500,13 +519,12 @@ func configureHostNCApipaEndpoint( } networkContainerApipaIP := localIPConfiguration.IPSubnet.IPAddress - hostApipaIP := localIPConfiguration.GatewayIPAddress protocolList := []string{protocolICMPv4, protocolTCP, protocolUDP} endpointPolicies, err := configureAclSettingHostNCApipaEndpoint( protocolList, networkContainerApipaIP, - hostApipaIP, + hnsLoopbackAdapterIPAddress, allowNCToHostCommunication, allowHostToNCCommunication, ncPolicies) @@ -519,8 +537,9 @@ func configureHostNCApipaEndpoint( endpoint.Policies = append(endpoint.Policies, endpointPolicy) } + // keep Apipa Endpoint gw as 169.254.128.1 to make sure NC to host connectivity work for both Linux and Windows containers hcnRoute := hcn.Route{ - NextHop: hostApipaIP, + NextHop: hnsLoopbackAdapterIPAddress, DestinationPrefix: "0.0.0.0/0", } @@ -569,6 +588,7 @@ func CreateHostNCApipaEndpoint( return endpoint.Id, nil } + updateGwForLocalIPConfiguration(&localIPConfiguration) if network, err = createHostNCApipaNetwork(localIPConfiguration); err != nil { logger.Errorf("[Azure CNS] Failed to create HostNCApipaNetwork. Error: %v", err) return "", err @@ -600,6 +620,17 @@ func CreateHostNCApipaEndpoint( return endpoint.Id, nil } +// updateGwForLocalIPConfiguration applies change on gw IP address for apipa NW and endpoint. +// Currently, cns using the same ip address "169.254.128.1" for both apipa gw and loopback adapter. This cause conflict issue when hns get restarted and not able to rehydrate the apipa endpoints. +// This func is to overwrite the address to 169.254.128.2 when the gateway address is 169.254.128.1 +func updateGwForLocalIPConfiguration(localIPConfiguration *cns.IPConfiguration) { + // When gw address is 169.254.128.1, should use .2 instead. If gw address is not .1, that mean this value is + // configured from dnc, we should keep it + if localIPConfiguration.GatewayIPAddress == "169.254.128.1" { + localIPConfiguration.GatewayIPAddress = defaultHnsGwIPAddress + } +} + func getHostNCApipaEndpointName( networkContainerID string) string { return hostNCApipaEndpointNamePrefix + "-" + networkContainerID @@ -685,3 +716,68 @@ func DeleteHostNCApipaEndpoint( return nil } + +// DeleteHNSEndpointbyID deletes the HNS endpoint +func DeleteHNSEndpointbyID(hnsEndpointID string) error { + var ( + hcnEndpoint *hcn.HostComputeEndpoint + err error + ) + + logger.Printf("Deleting hcn endpoint with id %v", hnsEndpointID) + hcnEndpoint, err = hcn.GetEndpointByID(hnsEndpointID) + if err != nil { + // If error is anything other than EndpointNotFoundError, return error. + // else log the error but don't return error because endpoint is already deleted. + var notFoundErr hcn.EndpointNotFoundError + if errors.As(err, ¬FoundErr) { + return fmt.Errorf("Failed to get hcn endpoint with id: %s due to err: %w", hnsEndpointID, err) + } + + logger.Errorf("Delete called on the Endpoint which doesn't exist. Error:%v", err) + return nil + } + + // Remove this endpoint from the namespace + if err = hcn.RemoveNamespaceEndpoint(hcnEndpoint.HostComputeNamespace, hcnEndpoint.Id); err != nil { + logger.Errorf("Failed to remove hcn endpoint %s from namespace %s due to err: %v", hcnEndpoint.Id, hcnEndpoint.HostComputeNamespace, err) + } + + if err = hcnEndpoint.Delete(); err != nil { + return fmt.Errorf("Failed to delete endpoint: %s. Error: %w", hnsEndpointID, err) + } + + logger.Errorf("[Azure CNS] Successfully deleted endpoint: %+v", hnsEndpointID) + + return nil +} + +// GetHNSEndpointbyIP returns an HNSEndpoint with the corrsponding HNS Endpoint ID that matches an specific IP Address. +func GetHNSEndpointbyIP(ipv4, ipv6 []net.IPNet) (string, error) { + logger.Printf("Fetching missing HNS endpoint id for endpoints in network with id %s", defaultNetworkName) + hnsResponse, err := hcn.GetNetworkByName(defaultNetworkName) + if err != nil || hnsResponse == nil { + return "", errors.Wrapf(err, "HNS Network or endpoints not found") + } + hcnEndpoints, err := hcn.ListEndpointsOfNetwork(hnsResponse.Id) + if err != nil { + return "", errors.Wrapf(err, "failed to fetch HNS endpoints for the given network") + } + for i := range hcnEndpoints { + for _, ipConfiguration := range hcnEndpoints[i].IpConfigurations { + for _, ip := range ipv4 { + if ipConfiguration.IpAddress == ip.IP.String() { + logger.Printf("Successfully found hcn endpoint id for endpoint %s with ip %s", hcnEndpoints[i].Id, ip.IP.String()) + return hcnEndpoints[i].Id, nil + } + } + for _, ip := range ipv6 { + if ipConfiguration.IpAddress == ip.IP.String() { + logger.Printf("Successfully found hcn endpoint id for endpoint %s with ip %s", hcnEndpoints[i].Id, ip.IP.String()) + return hcnEndpoints[i].Id, nil + } + } + } + } + return "", errors.Wrapf(err, "No HNSEndpointID matches the IPAddress") +} diff --git a/cns/hnsclient/hnsclient_windows_test.go b/cns/hnsclient/hnsclient_windows_test.go new file mode 100644 index 0000000000..8b01d2f839 --- /dev/null +++ b/cns/hnsclient/hnsclient_windows_test.go @@ -0,0 +1,35 @@ +package hnsclient + +import ( + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/stretchr/testify/assert" +) + +func TestAdhocAdjustIPConfig(t *testing.T) { + tests := []struct { + name string + ipConfig cns.IPConfiguration + expected cns.IPConfiguration + }{ + { + name: "expect no change when gw address is not 169.254.128.1", + ipConfig: cns.IPConfiguration{GatewayIPAddress: "169.254.128.3"}, + expected: cns.IPConfiguration{GatewayIPAddress: "169.254.128.3"}, + }, + { + name: "expect default gw address is set when gw address is 169.254.128.1", + ipConfig: cns.IPConfiguration{GatewayIPAddress: "169.254.128.1"}, + expected: cns.IPConfiguration{GatewayIPAddress: "169.254.128.2"}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + updateGwForLocalIPConfiguration(&tt.ipConfig) + assert.Equal(t, tt.expected.GatewayIPAddress, tt.ipConfig.GatewayIPAddress) + }) + } +} diff --git a/cns/ipamclient/ipamclient.go b/cns/ipamclient/ipamclient.go deleted file mode 100644 index 369a3c4193..0000000000 --- a/cns/ipamclient/ipamclient.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -package ipamclient - -import ( - "bytes" - "encoding/json" - "fmt" - - cnmIpam "github.com/Azure/azure-container-networking/cnm/ipam" - ipam "github.com/Azure/azure-container-networking/ipam" - "github.com/Azure/azure-container-networking/log" -) - -// IpamClient specifies a client to connect to Ipam Plugin. -type IpamClient struct { - connectionURL string -} - -// NewIpamClient create a new ipam client. -func NewIpamClient(url string) (*IpamClient, error) { - if url == "" { - url = defaultIpamPluginURL - } - return &IpamClient{ - connectionURL: url, - }, nil -} - -// GetAddressSpace request to get address space ID. -func (ic *IpamClient) GetAddressSpace() (string, error) { - log.Printf("[Azure CNS] GetAddressSpace Request") - - client, err := getClient(ic.connectionURL) - if err != nil { - return "", err - } - - url := ic.connectionURL + cnmIpam.GetAddressSpacesPath - - res, err := client.Post(url, "application/json", nil) - if err != nil { - log.Printf("[Azure CNS] HTTP Post returned error %v", err.Error()) - return "", err - } - - defer res.Body.Close() - - if res.StatusCode == 200 { - var resp cnmIpam.GetDefaultAddressSpacesResponse - err := json.NewDecoder(res.Body).Decode(&resp) - if err != nil { - log.Printf("[Azure CNS] Error received while parsing GetAddressSpace response resp:%v err:%v", res.Body, err.Error()) - return "", err - } - - if resp.Err != "" { - log.Printf("[Azure CNS] GetAddressSpace received error response :%v", resp.Err) - return "", fmt.Errorf(resp.Err) - } - - return resp.LocalDefaultAddressSpace, nil - } - log.Printf("[Azure CNS] GetAddressSpace invalid http status code: %v err:%v", res.StatusCode, err.Error()) - return "", err -} - -// GetPoolID Request to get poolID. -func (ic *IpamClient) GetPoolID(asID, subnet string) (string, error) { - var body bytes.Buffer - log.Printf("[Azure CNS] GetPoolID Request") - - client, err := getClient(ic.connectionURL) - if err != nil { - return "", err - } - - url := ic.connectionURL + cnmIpam.RequestPoolPath - - payload := &cnmIpam.RequestPoolRequest{ - AddressSpace: asID, - Pool: subnet, - } - - json.NewEncoder(&body).Encode(payload) - - res, err := client.Post(url, "application/json", &body) - if err != nil { - log.Printf("[Azure CNS] HTTP Post returned error %v", err.Error()) - return "", err - } - - defer res.Body.Close() - - if res.StatusCode == 200 { - var resp cnmIpam.RequestPoolResponse - err := json.NewDecoder(res.Body).Decode(&resp) - if err != nil { - log.Printf("[Azure CNS] Error received while parsing GetPoolID response resp:%v err:%v", res.Body, err.Error()) - return "", err - } - - if resp.Err != "" { - log.Printf("[Azure CNS] GetPoolID received error response :%v", resp.Err) - return "", fmt.Errorf(resp.Err) - } - - return resp.PoolID, nil - } - log.Printf("[Azure CNS] GetPoolID invalid http status code: %v err:%v", res.StatusCode, err.Error()) - return "", err -} - -// ReserveIPAddress request an Ip address for the reservation id. -func (ic *IpamClient) ReserveIPAddress(poolID string, reservationID string) (string, error) { - var body bytes.Buffer - log.Printf("[Azure CNS] ReserveIpAddress") - - client, err := getClient(ic.connectionURL) - if err != nil { - return "", err - } - - url := ic.connectionURL + cnmIpam.RequestAddressPath - - payload := &cnmIpam.RequestAddressRequest{ - PoolID: poolID, - Address: "", - Options: make(map[string]string), - } - payload.Options[ipam.OptAddressID] = reservationID - json.NewEncoder(&body).Encode(payload) - - res, err := client.Post(url, "application/json", &body) - if err != nil { - log.Printf("[Azure CNS] HTTP Post returned error %v", err.Error()) - return "", err - } - - defer res.Body.Close() - - if res.StatusCode == 200 { - var reserveResp cnmIpam.RequestAddressResponse - - err = json.NewDecoder(res.Body).Decode(&reserveResp) - if err != nil { - log.Printf("[Azure CNS] Error received while parsing reserve response resp:%v err:%v", res.Body, err.Error()) - return "", err - } - - if reserveResp.Err != "" { - log.Printf("[Azure CNS] ReserveIP received error response :%v", reserveResp.Err) - return "", fmt.Errorf(reserveResp.Err) - } - - return reserveResp.Address, nil - } - - log.Printf("[Azure CNS] ReserveIp invalid http status code: %v err:%v", res.StatusCode, err.Error()) - return "", err -} - -// ReleaseIPAddress release an IP address for the reservation id. -func (ic *IpamClient) ReleaseIPAddress(poolID string, reservationID string) error { - var body bytes.Buffer - log.Printf("[Azure CNS] ReleaseIPAddress") - - client, err := getClient(ic.connectionURL) - if err != nil { - return err - } - - url := ic.connectionURL + cnmIpam.ReleaseAddressPath - - payload := &cnmIpam.ReleaseAddressRequest{ - PoolID: poolID, - Address: "", - Options: make(map[string]string), - } - - payload.Options[ipam.OptAddressID] = reservationID - - json.NewEncoder(&body).Encode(payload) - - res, err := client.Post(url, "application/json", &body) - if err != nil { - log.Printf("[Azure CNS] HTTP Post returned error %v", err.Error()) - return err - } - - defer res.Body.Close() - - if res.StatusCode == 200 { - var releaseResp cnmIpam.ReleaseAddressResponse - err := json.NewDecoder(res.Body).Decode(&releaseResp) - if err != nil { - log.Printf("[Azure CNS] Error received while parsing release response :%v err:%v", res.Body, err.Error()) - return err - } - - if releaseResp.Err != "" { - log.Printf("[Azure CNS] ReleaseIP received error response :%v", releaseResp.Err) - return fmt.Errorf(releaseResp.Err) - } - - return nil - } - log.Printf("[Azure CNS] ReleaseIP invalid http status code: %v", res.StatusCode) - return err -} - -// GetIPAddressUtilization - returns number of available, reserved and unhealthy addresses list. -func (ic *IpamClient) GetIPAddressUtilization(poolID string) (int, int, []string, error) { - var body bytes.Buffer - log.Printf("[Azure CNS] GetIPAddressUtilization") - - client, err := getClient(ic.connectionURL) - if err != nil { - return 0, 0, nil, err - } - url := ic.connectionURL + cnmIpam.GetPoolInfoPath - - payload := &cnmIpam.GetPoolInfoRequest{ - PoolID: poolID, - } - - json.NewEncoder(&body).Encode(payload) - - res, err := client.Post(url, "application/json", &body) - if err != nil { - log.Printf("[Azure CNS] HTTP Post returned error %v", err.Error()) - return 0, 0, nil, err - } - - defer res.Body.Close() - - if res.StatusCode == 200 { - var poolInfoResp cnmIpam.GetPoolInfoResponse - err := json.NewDecoder(res.Body).Decode(&poolInfoResp) - if err != nil { - log.Printf("[Azure CNS] Error received while parsing GetIPUtilization response :%v err:%v", res.Body, err.Error()) - return 0, 0, nil, err - } - - if poolInfoResp.Err != "" { - log.Printf("[Azure CNS] GetIPUtilization received error response :%v", poolInfoResp.Err) - return 0, 0, nil, fmt.Errorf(poolInfoResp.Err) - } - - return poolInfoResp.Capacity, poolInfoResp.Available, poolInfoResp.UnhealthyAddresses, nil - } - log.Printf("[Azure CNS] GetIPUtilization invalid http status code: %v err:%v", res.StatusCode, err.Error()) - return 0, 0, nil, err -} diff --git a/cns/ipamclient/ipamclient_linux.go b/cns/ipamclient/ipamclient_linux.go deleted file mode 100644 index cb3afbd300..0000000000 --- a/cns/ipamclient/ipamclient_linux.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -// +build linux - -package ipamclient - -import ( - "context" - "net" - "net/http" - - "github.com/Azure/azure-container-networking/cns/logger" -) - -const ( - defaultIpamPluginURL = "http://unix" - pluginSockPath = "/run/docker/plugins/azure-vnet.sock" -) - -// getClient - returns unix http client -func getClient(url string) (*http.Client, error) { - var httpc *http.Client - if url == defaultIpamPluginURL { - dialContext, err := net.Dial("unix", pluginSockPath) - if err != nil { - logger.Errorf("[Azure CNS] Error.Dial context error %v", err.Error()) - return nil, err - } - - httpc = &http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return dialContext, nil - }, - }, - } - } else { - httpc = &http.Client{} - } - - return httpc, nil -} diff --git a/cns/ipamclient/ipamclient_test.go b/cns/ipamclient/ipamclient_test.go deleted file mode 100644 index 3aea708110..0000000000 --- a/cns/ipamclient/ipamclient_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package ipamclient - -import ( - "fmt" - "log" - "net/http" - "net/url" - "os" - "testing" - - "github.com/Azure/azure-container-networking/cnm/ipam" - "github.com/Azure/azure-container-networking/common" -) - -var ( - ipamQueryUrl = "localhost:42424" - ic *IpamClient -) - -// Wraps the test run with service setup and teardown. -func TestMain(m *testing.M) { - // Create a fake IPAM plugin to handle requests from CNS plugin. - u, _ := url.Parse("tcp://" + ipamQueryUrl) - ipamAgent, err := common.NewListener(u) - if err != nil { - fmt.Printf("Failed to create agent, err:%v.\n", err) - return - } - ipamAgent.AddHandler(ipam.GetAddressSpacesPath, handleIpamAsIDQuery) - ipamAgent.AddHandler(ipam.RequestPoolPath, handlePoolIDQuery) - ipamAgent.AddHandler(ipam.RequestAddressPath, handleReserveIPQuery) - ipamAgent.AddHandler(ipam.ReleasePoolPath, handleReleaseIPQuery) - ipamAgent.AddHandler(ipam.GetPoolInfoPath, handleIPUtilizationQuery) - - err = ipamAgent.Start(make(chan error, 1)) - if err != nil { - fmt.Printf("Failed to start agent, err:%v.\n", err) - return - } - ic, err = NewIpamClient("http://" + ipamQueryUrl) - if err != nil { - fmt.Printf("Ipam client creation failed %+v", err) - } - - // Run tests. - exitCode := m.Run() - - ipamAgent.Stop() - - os.Exit(exitCode) -} - -// Handles queries from GetAddressSpace. -func handleIpamAsIDQuery(w http.ResponseWriter, r *http.Request) { - addressSpaceResp := "{\"LocalDefaultAddressSpace\": \"local\", \"GlobalDefaultAddressSpace\": \"global\"}" - w.Write([]byte(addressSpaceResp)) -} - -// Handles queries from GetPoolID -func handlePoolIDQuery(w http.ResponseWriter, r *http.Request) { - requestPoolResp := "{\"PoolID\":\"10.0.0.0/16\", \"Pool\": \"\"}" - w.Write([]byte(requestPoolResp)) -} - -// Handles queries from ReserveIPAddress. -func handleReserveIPQuery(w http.ResponseWriter, r *http.Request) { - reserveIPResp := "{\"Address\":\"10.0.0.2/16\"}" - w.Write([]byte(reserveIPResp)) -} - -// Handles queries from ReleaseIPAddress. -func handleReleaseIPQuery(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("{}")) -} - -// Handles queries from GetIPAddressUtiltization. -func handleIPUtilizationQuery(w http.ResponseWriter, r *http.Request) { - ipUtilizationResp := "{\"Capacity\":10, \"Available\":7, \"UnhealthyAddresses\":[\"10.0.0.5\",\"10.0.0.6\",\"10.0.0.7\"]}" - w.Write([]byte(ipUtilizationResp)) -} - -// Tests IpamClient GetAddressSpace function to get AddressSpaceID. -func TestAddressSpaces(t *testing.T) { - asID, err := ic.GetAddressSpace() - if err != nil { - t.Errorf("GetAddressSpace failed with %v\n", err) - return - } - - if asID != "local" { - t.Errorf("GetAddressSpace failed with invalid as id %s", asID) - } -} - -// Tests IpamClient GetPoolID function to get PoolID. -func TestGetPoolID(t *testing.T) { - subnet := "10.0.0.0/16" - - asID, err := ic.GetAddressSpace() - if err != nil { - t.Errorf("GetAddressSpace failed with %v\n", err) - return - } - - poolID, err := ic.GetPoolID(asID, subnet) - if err != nil { - t.Errorf("GetPoolID failed with %v\n", err) - return - } - - if poolID != "10.0.0.0/16" { - t.Errorf("GetPoolId failed with invalid pool id %s", poolID) - } -} - -// Tests IpamClient ReserveIPAddress function to request IP for ID. -func TestReserveIP(t *testing.T) { - subnet := "10.0.0.0/16" - - asID, err := ic.GetAddressSpace() - if err != nil { - t.Errorf("GetAddressSpace failed with %v\n", err) - return - } - - poolID, err := ic.GetPoolID(asID, subnet) - if err != nil { - t.Errorf("GetPoolID failed with %v\n", err) - return - } - - addr1, err := ic.ReserveIPAddress(poolID, "id1") - if err != nil { - t.Errorf("GetReserveIP failed with %v\n", err) - return - } - if addr1 != "10.0.0.2/16" { - t.Errorf("GetReserveIP returned ivnvalid IP %s\n", addr1) - return - } - addr2, err := ic.ReserveIPAddress(poolID, "id1") - if err != nil { - t.Errorf("GetReserveIP failed with %v\n", err) - return - } - if addr1 != addr2 { - t.Errorf("GetReserveIP with id returned ivnvalid IP1 %s IP2 %s\n", addr1, addr2) - return - } -} - -// Tests IpamClient ReleaseIPAddress function to release IP associated with ID. -func TestReleaseIP(t *testing.T) { - subnet := "10.0.0.0/16" - - asID, err := ic.GetAddressSpace() - if err != nil { - t.Errorf("GetAddressSpace failed with %v\n", err) - return - } - - poolID, err := ic.GetPoolID(asID, subnet) - if err != nil { - t.Errorf("GetPoolID failed with %v\n", err) - return - } - - addr1, err := ic.ReserveIPAddress(poolID, "id1") - if err != nil { - t.Errorf("GetReserveIP failed with %v\n", err) - return - } - if addr1 != "10.0.0.2/16" { - t.Errorf("GetReserveIP returned ivnvalid IP %s\n", addr1) - return - } - - err = ic.ReleaseIPAddress(poolID, "id1") - if err != nil { - t.Errorf("Release reservation failed with %v\n", err) - return - } -} - -// Tests IpamClient GetIPAddressUtilization function to retrieve IP Utilization info. -func TestIPAddressUtilization(t *testing.T) { - subnet := "10.0.0.0/16" - - asID, err := ic.GetAddressSpace() - if err != nil { - t.Errorf("GetAddressSpace failed with %v\n", err) - return - } - - poolID, err := ic.GetPoolID(asID, subnet) - if err != nil { - t.Errorf("GetPoolID failed with %v\n", err) - return - } - - capacity, available, unhealthyAddrs, err := ic.GetIPAddressUtilization(poolID) - if err != nil { - t.Errorf("GetIPUtilization failed with %v\n", err) - return - } - - if capacity != 10 && available != 7 && len(unhealthyAddrs) == 3 { - t.Errorf("GetIPUtilization returned invalid either capacity %v / available %v count/ unhealthyaddrs %v \n", capacity, available, unhealthyAddrs) - return - } - - log.Printf("Capacity %v Available %v Unhealthy %v", capacity, available, unhealthyAddrs) -} diff --git a/cns/ipamclient/ipamclient_windows.go b/cns/ipamclient/ipamclient_windows.go deleted file mode 100644 index fac502a73a..0000000000 --- a/cns/ipamclient/ipamclient_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 Microsoft. All rights reserved. -// MIT License - -// +build windows - -package ipamclient - -import ( - "net/http" -) - -const ( - defaultIpamPluginURL = "http://localhost:48080" -) - -func getClient(url string) (http.Client, error) { - httpc := http.Client{} - return httpc, nil -} diff --git a/cns/ipampool/metrics.go b/cns/ipampool/metrics/metrics.go similarity index 65% rename from cns/ipampool/metrics.go rename to cns/ipampool/metrics/metrics.go index 75bcdf806d..c9b30a08f9 100644 --- a/cns/ipampool/metrics.go +++ b/cns/ipampool/metrics/metrics.go @@ -1,4 +1,4 @@ -package ipampool +package metrics import ( "github.com/prometheus/client_golang/prometheus" @@ -6,12 +6,12 @@ import ( ) const ( - subnetLabel = "subnet" - subnetCIDRLabel = "subnet_cidr" - podnetARMIDLabel = "podnet_arm_id" + SubnetLabel = "subnet" + SubnetCIDRLabel = "subnet_cidr" + PodnetARMIDLabel = "podnet_arm_id" customerMetricLabel = "customer_metric" customerMetricLabelValue = "customer metric" - subnetExhaustionStateLabel = "subnet_exhaustion_state" + SubnetExhaustionStateLabel = "subnet_exhaustion_state" SubnetIPExhausted = 1 SubnetIPNotExhausted = 0 ) @@ -23,7 +23,7 @@ var ( Help: "IPs currently in use by Pods on this CNS Node.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamAvailableIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -31,7 +31,7 @@ var ( Help: "IPs available on this CNS Node for use by a Pod.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamBatchSize = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -39,7 +39,7 @@ var ( Help: "IPAM IP pool scaling batch size.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamCurrentAvailableIPcount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -47,7 +47,7 @@ var ( Help: "Current available IP count.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamExpectedAvailableIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -55,7 +55,7 @@ var ( Help: "Expected future available IP count assuming the Requested IP count is honored.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamMaxIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -63,7 +63,7 @@ var ( Help: "Maximum Secondary IPs allowed on this Node.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamPendingProgramIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -71,7 +71,7 @@ var ( Help: "IPs reserved but not yet available (Pending Programming).", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamPendingReleaseIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -79,7 +79,7 @@ var ( Help: "IPs reserved but not available anymore (Pending Release).", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamPrimaryIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -87,7 +87,7 @@ var ( Help: "NC Primary IP count (reserved from Pod Subnet for DNS and IMDS SNAT).", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamRequestedIPConfigCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -95,7 +95,7 @@ var ( Help: "Secondary Pod Subnet IPs requested by this CNS Node (for Pods).", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamSecondaryIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -103,7 +103,7 @@ var ( Help: "Node NC Secondary IP count (reserved usable by Pods).", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamTotalIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -111,7 +111,7 @@ var ( Help: "Count of total IP pool size allocated to CNS by DNC.", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamSubnetExhaustionState = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -119,14 +119,14 @@ var ( Help: "IPAM view of subnet exhaustion state", ConstLabels: prometheus.Labels{customerMetricLabel: customerMetricLabelValue}, }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel}, ) IpamSubnetExhaustionCount = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cx_ipam_subnet_exhaustion_state_count_total", Help: "Count of the number of times the ipam pool monitor sees subnet exhaustion", }, - []string{subnetLabel, subnetCIDRLabel, podnetARMIDLabel, subnetExhaustionStateLabel}, + []string{SubnetLabel, SubnetCIDRLabel, PodnetARMIDLabel, SubnetExhaustionStateLabel}, ) ) @@ -148,24 +148,3 @@ func init() { IpamSubnetExhaustionCount, ) } - -func observeIPPoolState(state ipPoolState, meta metaState) { - labels := []string{meta.subnet, meta.subnetCIDR, meta.subnetARMID} - IpamAllocatedIPCount.WithLabelValues(labels...).Set(float64(state.allocatedToPods)) - IpamAvailableIPCount.WithLabelValues(labels...).Set(float64(state.available)) - IpamBatchSize.WithLabelValues(labels...).Set(float64(meta.batch)) - IpamCurrentAvailableIPcount.WithLabelValues(labels...).Set(float64(state.currentAvailableIPs)) - IpamExpectedAvailableIPCount.WithLabelValues(labels...).Set(float64(state.expectedAvailableIPs)) - IpamMaxIPCount.WithLabelValues(labels...).Set(float64(meta.max)) - IpamPendingProgramIPCount.WithLabelValues(labels...).Set(float64(state.pendingProgramming)) - IpamPendingReleaseIPCount.WithLabelValues(labels...).Set(float64(state.pendingRelease)) - IpamPrimaryIPCount.WithLabelValues(labels...).Set(float64(len(meta.primaryIPAddresses))) - IpamRequestedIPConfigCount.WithLabelValues(labels...).Set(float64(state.requestedIPs)) - IpamSecondaryIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs)) - IpamTotalIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs + int64(len(meta.primaryIPAddresses)))) - if meta.exhausted { - IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(SubnetIPExhausted)) - } else { - IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(SubnetIPNotExhausted)) - } -} diff --git a/cns/ipampool/metrics/observer.go b/cns/ipampool/metrics/observer.go new file mode 100644 index 0000000000..2fef8e8d91 --- /dev/null +++ b/cns/ipampool/metrics/observer.go @@ -0,0 +1,199 @@ +package metrics + +import ( + "context" + "fmt" + "net/netip" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/types" + "github.com/Azure/azure-container-networking/crd/clustersubnetstate/api/v1alpha1" + "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// Subnet ARM ID /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/$(SUBNET) +const subnetARMIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" + +// ipPoolState is the current actual state of the CNS IP pool. +type ipPoolState struct { + // allocatedToPods are the IPs CNS gives to Pods. + allocatedToPods int64 + // available are the IPs in state "Available". + available int64 + // currentAvailableIPs are the current available IPs: allocated - assigned - pendingRelease. + currentAvailableIPs int64 + // expectedAvailableIPs are the "future" available IPs, if the requested IP count is honored: requested - assigned. + expectedAvailableIPs int64 + // pendingProgramming are the IPs in state "PendingProgramming". + pendingProgramming int64 + // pendingRelease are the IPs in state "PendingRelease". + pendingRelease int64 + // requestedIPs are the IPs CNS has requested that it be allocated by DNC. + requestedIPs int64 + // secondaryIPs are all the IPs given to CNS by DNC, not including the primary IP of the NC. + secondaryIPs int64 +} + +// metaState is the Monitor's configuration state for the IP pool. +type metaState struct { + batch int64 + exhausted bool + max int64 + primaryIPAddresses map[string]struct{} + subnet string + subnetARMID string + subnetCIDR string +} + +type observer struct { + ipSrc func() map[string]cns.IPConfigurationStatus + nncSrc func(context.Context) (*v1alpha.NodeNetworkConfig, error) + cssSrc func(context.Context) ([]v1alpha1.ClusterSubnetState, error) +} + +// NewLegacyMetricsObserver creates a closed functional scope which can be invoked to +// observe the legacy IPAM pool metrics. +// +//nolint:lll // ignore line length +func NewLegacyMetricsObserver(ipSrc func() map[string]cns.IPConfigurationStatus, nncSrc func(context.Context) (*v1alpha.NodeNetworkConfig, error), cssSrc func(context.Context) ([]v1alpha1.ClusterSubnetState, error)) func(context.Context) error { + return (&observer{ + ipSrc: ipSrc, + nncSrc: nncSrc, + cssSrc: cssSrc, + }).observeMetrics +} + +// generateARMID uses the Subnet ARM ID format to populate the ARM ID with the metadata. +// If either of the metadata attributes are empty, then the ARM ID will be an empty string. +func generateARMID(nc *v1alpha.NetworkContainer) string { + subscription := nc.SubscriptionID + resourceGroup := nc.ResourceGroupID + vnetID := nc.VNETID + subnetID := nc.SubnetID + + if subscription == "" || resourceGroup == "" || vnetID == "" || subnetID == "" { + return "" + } + return fmt.Sprintf(subnetARMIDTemplate, subscription, resourceGroup, vnetID, subnetID) +} + +// observeMetrics observes the IP pool and updates the metrics. Blocking. +// +//nolint:lll // ignore line length +func (o *observer) observeMetrics(ctx context.Context) error { + // The error group is used to allow individual metrics sources to fail without + // failing out the entire attempt to observe the Pool. This may happen if there is a + // transient issue with the source of the data, or if the source is not available + // (like if the CRD is not installed). + var g errgroup.Group + + // Get the current state of world. + var meta metaState + g.Go(func() error { + // Try to fetch the ClusterSubnetState, if available. + if o.cssSrc != nil { + csslist, err := o.cssSrc(ctx) + if err != nil { + return err + } + for i := range csslist { + if csslist[i].Status.Exhausted { + meta.exhausted = true + break + } + } + } + return nil + }) + + var state ipPoolState + g.Go(func() error { + // Try to fetch the NodeNetworkConfig, if available. + if o.nncSrc != nil { + nnc, err := o.nncSrc(ctx) + if err != nil { + return err + } + if len(nnc.Status.NetworkContainers) > 0 { + // Set SubnetName, SubnetAddressSpace and Pod Network ARM ID values to the global subnet, subnetCIDR and subnetARM variables. + meta.subnet = nnc.Status.NetworkContainers[0].SubnetName + meta.subnetCIDR = nnc.Status.NetworkContainers[0].SubnetAddressSpace + meta.subnetARMID = generateARMID(&nnc.Status.NetworkContainers[0]) + } + meta.primaryIPAddresses = make(map[string]struct{}) + // Add Primary IP to Map, if not present. + // This is only for Swift i.e. if NC Type is vnet. + for i := 0; i < len(nnc.Status.NetworkContainers); i++ { + nc := nnc.Status.NetworkContainers[i] + if nc.Type == "" || nc.Type == v1alpha.VNET { + meta.primaryIPAddresses[nc.PrimaryIP] = struct{}{} + } + + if nc.Type == v1alpha.VNETBlock { + primaryPrefix, err := netip.ParsePrefix(nc.PrimaryIP) + if err != nil { + return errors.Wrapf(err, "unable to parse ip prefix: %s", nc.PrimaryIP) + } + meta.primaryIPAddresses[primaryPrefix.Addr().String()] = struct{}{} + } + } + state.requestedIPs = nnc.Spec.RequestedIPCount + meta.batch = nnc.Status.Scaler.BatchSize + meta.max = nnc.Status.Scaler.MaxIPCount + } + return nil + }) + + g.Go(func() error { + // Try to fetch the IPConfigurations, if available. + if o.ipSrc != nil { + ips := o.ipSrc() + state.secondaryIPs = int64(len(ips)) + for i := range ips { + ip := ips[i] + switch ip.GetState() { + case types.Assigned: + state.allocatedToPods++ + case types.Available: + state.available++ + case types.PendingProgramming: + state.pendingProgramming++ + case types.PendingRelease: + state.pendingRelease++ + } + } + } + return nil + }) + + err := g.Wait() + + state.currentAvailableIPs = state.secondaryIPs - state.allocatedToPods - state.pendingRelease + state.expectedAvailableIPs = state.requestedIPs - state.allocatedToPods + + // Update the metrics. + labels := []string{meta.subnet, meta.subnetCIDR, meta.subnetARMID} + IpamAllocatedIPCount.WithLabelValues(labels...).Set(float64(state.allocatedToPods)) + IpamAvailableIPCount.WithLabelValues(labels...).Set(float64(state.available)) + IpamBatchSize.WithLabelValues(labels...).Set(float64(meta.batch)) + IpamCurrentAvailableIPcount.WithLabelValues(labels...).Set(float64(state.currentAvailableIPs)) + IpamExpectedAvailableIPCount.WithLabelValues(labels...).Set(float64(state.expectedAvailableIPs)) + IpamMaxIPCount.WithLabelValues(labels...).Set(float64(meta.max)) + IpamPendingProgramIPCount.WithLabelValues(labels...).Set(float64(state.pendingProgramming)) + IpamPendingReleaseIPCount.WithLabelValues(labels...).Set(float64(state.pendingRelease)) + IpamPrimaryIPCount.WithLabelValues(labels...).Set(float64(len(meta.primaryIPAddresses))) + IpamRequestedIPConfigCount.WithLabelValues(labels...).Set(float64(state.requestedIPs)) + IpamSecondaryIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs)) + IpamTotalIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs + int64(len(meta.primaryIPAddresses)))) + if meta.exhausted { + IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(SubnetIPExhausted)) + } else { + IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(SubnetIPNotExhausted)) + } + if err != nil { + return errors.Wrap(err, "failed to collect all metrics") + } + return nil +} diff --git a/cns/ipampool/monitor.go b/cns/ipampool/monitor.go index babf863964..3227840c06 100644 --- a/cns/ipampool/monitor.go +++ b/cns/ipampool/monitor.go @@ -9,6 +9,7 @@ import ( "time" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/ipampool/metrics" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/metric" "github.com/Azure/azure-container-networking/cns/types" @@ -105,9 +106,9 @@ func (pm *Monitor) Start(ctx context.Context) error { case css := <-pm.cssSource: // received an updated ClusterSubnetState pm.metastate.exhausted = css.Status.Exhausted logger.Printf("subnet exhausted status = %t", pm.metastate.exhausted) - IpamSubnetExhaustionCount.With(prometheus.Labels{ - subnetLabel: pm.metastate.subnet, subnetCIDRLabel: pm.metastate.subnetCIDR, - podnetARMIDLabel: pm.metastate.subnetARMID, subnetExhaustionStateLabel: strconv.FormatBool(pm.metastate.exhausted), + metrics.IpamSubnetExhaustionCount.With(prometheus.Labels{ + metrics.SubnetLabel: pm.metastate.subnet, metrics.SubnetCIDRLabel: pm.metastate.subnetCIDR, + metrics.PodnetARMIDLabel: pm.metastate.subnetARMID, metrics.SubnetExhaustionStateLabel: strconv.FormatBool(pm.metastate.exhausted), }).Inc() select { default: @@ -482,6 +483,27 @@ func (pm *Monitor) clampScaler(scaler *v1alpha.Scaler) { } } +func observeIPPoolState(state ipPoolState, meta metaState) { + labels := []string{meta.subnet, meta.subnetCIDR, meta.subnetARMID} + metrics.IpamAllocatedIPCount.WithLabelValues(labels...).Set(float64(state.allocatedToPods)) + metrics.IpamAvailableIPCount.WithLabelValues(labels...).Set(float64(state.available)) + metrics.IpamBatchSize.WithLabelValues(labels...).Set(float64(meta.batch)) + metrics.IpamCurrentAvailableIPcount.WithLabelValues(labels...).Set(float64(state.currentAvailableIPs)) + metrics.IpamExpectedAvailableIPCount.WithLabelValues(labels...).Set(float64(state.expectedAvailableIPs)) + metrics.IpamMaxIPCount.WithLabelValues(labels...).Set(float64(meta.max)) + metrics.IpamPendingProgramIPCount.WithLabelValues(labels...).Set(float64(state.pendingProgramming)) + metrics.IpamPendingReleaseIPCount.WithLabelValues(labels...).Set(float64(state.pendingRelease)) + metrics.IpamPrimaryIPCount.WithLabelValues(labels...).Set(float64(len(meta.primaryIPAddresses))) + metrics.IpamRequestedIPConfigCount.WithLabelValues(labels...).Set(float64(state.requestedIPs)) + metrics.IpamSecondaryIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs)) + metrics.IpamTotalIPCount.WithLabelValues(labels...).Set(float64(state.secondaryIPs + int64(len(meta.primaryIPAddresses)))) + if meta.exhausted { + metrics.IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(metrics.SubnetIPExhausted)) + } else { + metrics.IpamSubnetExhaustionState.WithLabelValues(labels...).Set(float64(metrics.SubnetIPNotExhausted)) + } +} + // CalculateMinFreeIPs calculates the minimum free IP quantity based on the Scaler // in the passed NodeNetworkConfig. // Half of odd batches are rounded up! diff --git a/cns/ipampool/v2/adapter.go b/cns/ipampool/v2/adapter.go index 3cc02d0e00..a04bec11c1 100644 --- a/cns/ipampool/v2/adapter.go +++ b/cns/ipampool/v2/adapter.go @@ -31,6 +31,14 @@ func (m *adapter) GetStateSnapshot() cns.IpamPoolMonitorStateSnapshot { func PodIPDemandListener(ch chan<- int) func([]v1.Pod) { return func(pods []v1.Pod) { - ch <- len(pods) + // Filter out Pods in terminal phases (Succeeded/Failed) since they no longer + // have network sandboxes and don't contribute to IP demand + activePods := 0 + for i := range pods { + if pods[i].Status.Phase != v1.PodSucceeded && pods[i].Status.Phase != v1.PodFailed { + activePods++ + } + } + ch <- activePods } } diff --git a/cns/ipampool/v2/adapter_test.go b/cns/ipampool/v2/adapter_test.go new file mode 100644 index 0000000000..70c3dfe15f --- /dev/null +++ b/cns/ipampool/v2/adapter_test.go @@ -0,0 +1,100 @@ +package v2 + +import ( + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPodIPDemandListener(t *testing.T) { + tests := []struct { + name string + pods []v1.Pod + expected int + }{ + { + name: "empty pod list", + pods: []v1.Pod{}, + expected: 0, + }, + { + name: "single running pod", + pods: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Status: v1.PodStatus{Phase: v1.PodRunning}, + }, + }, + expected: 1, + }, + { + name: "multiple running pods", + pods: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Status: v1.PodStatus{Phase: v1.PodRunning}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod2"}, + Status: v1.PodStatus{Phase: v1.PodPending}, + }, + }, + expected: 2, + }, + { + name: "mix of running and terminal pods - should exclude terminal", + pods: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Status: v1.PodStatus{Phase: v1.PodRunning}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod2"}, + Status: v1.PodStatus{Phase: v1.PodSucceeded}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod3"}, + Status: v1.PodStatus{Phase: v1.PodFailed}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod4"}, + Status: v1.PodStatus{Phase: v1.PodPending}, + }, + }, + expected: 2, // Only pod1 (Running) and pod4 (Pending) should be counted + }, + { + name: "only terminal pods - should count zero", + pods: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Status: v1.PodStatus{Phase: v1.PodSucceeded}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod2"}, + Status: v1.PodStatus{Phase: v1.PodFailed}, + }, + }, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ch := make(chan int, 1) + listener := PodIPDemandListener(ch) + + listener(tt.pods) + + select { + case result := <-ch: + if result != tt.expected { + t.Errorf("expected %d, got %d", tt.expected, result) + } + default: + t.Error("expected value in channel") + } + }) + } +} diff --git a/cns/ipampool/v2/monitor.go b/cns/ipampool/v2/monitor.go index 9f2081d3e7..d2f8a3c8df 100644 --- a/cns/ipampool/v2/monitor.go +++ b/cns/ipampool/v2/monitor.go @@ -4,6 +4,7 @@ import ( "context" "math" "sync" + "time" "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/crd/clustersubnetstate/api/v1alpha1" @@ -36,36 +37,39 @@ type scaler struct { } type Monitor struct { - z *zap.Logger - scaler scaler - nnccli nodeNetworkConfigSpecUpdater - store ipStateStore - demand int64 - request int64 - demandSource <-chan int - cssSource <-chan v1alpha1.ClusterSubnetState - nncSource <-chan v1alpha.NodeNetworkConfig - started chan interface{} - once sync.Once + z *zap.Logger + scaler scaler + nnccli nodeNetworkConfigSpecUpdater + store ipStateStore + demand int64 + request int64 + demandSource <-chan int + cssSource <-chan v1alpha1.ClusterSubnetState + nncSource <-chan v1alpha.NodeNetworkConfig + started chan interface{} + once sync.Once + legacyMetricsObserver func(context.Context) error } func NewMonitor(z *zap.Logger, store ipStateStore, nnccli nodeNetworkConfigSpecUpdater, demandSource <-chan int, nncSource <-chan v1alpha.NodeNetworkConfig, cssSource <-chan v1alpha1.ClusterSubnetState) *Monitor { //nolint:lll // it's fine return &Monitor{ - z: z.With(zap.String("component", "ipam-pool-monitor")), - store: store, - nnccli: nnccli, - demandSource: demandSource, - cssSource: cssSource, - nncSource: nncSource, - started: make(chan interface{}), + z: z.With(zap.String("component", "ipam-pool-monitor")), + store: store, + nnccli: nnccli, + demandSource: demandSource, + cssSource: cssSource, + nncSource: nncSource, + started: make(chan interface{}), + legacyMetricsObserver: func(context.Context) error { return nil }, } } // Start begins the Monitor's pool reconcile loop. // On first run, it will block until a NodeNetworkConfig is received (through a call to Update()). -// Subsequently, it will run run once per RefreshDelay and attempt to re-reconcile the pool. +// Subsequently, it will run run when Events happen or at least once per ReconcileDelay and attempt to re-reconcile the pool. func (pm *Monitor) Start(ctx context.Context) error { pm.z.Debug("starting") + maxReconcileDelay := time.NewTicker(60 * time.Second) //nolint:gomnd // 60 seconds for { // proceed when things happen: select { @@ -87,6 +91,7 @@ func (pm *Monitor) Start(ctx context.Context) error { pm.z.Debug("started", zap.Int64("initial request", pm.request)) }) pm.z.Info("scaler update", zap.Int64("batch", pm.scaler.batch), zap.Float64("buffer", pm.scaler.buffer), zap.Int64("max", pm.scaler.max), zap.Int64("request", pm.request)) + case <-maxReconcileDelay.C: // try to reconcile the pool every maxReconcileDelay to prevent drift or lockups. } select { case <-pm.started: // this blocks until we have initialized @@ -98,6 +103,9 @@ func (pm *Monitor) Start(ctx context.Context) error { if err := pm.reconcile(ctx); err != nil { pm.z.Error("reconcile failed", zap.Error(err)) } + if err := pm.legacyMetricsObserver(ctx); err != nil { + pm.z.Error("legacy metrics observer failed", zap.Error(err)) + } } } @@ -116,6 +124,7 @@ func (pm *Monitor) reconcile(ctx context.Context) error { pm.z.Info("calculated new request", zap.Int64("demand", pm.demand), zap.Int64("batch", s.batch), zap.Int64("max", s.max), zap.Float64("buffer", s.buffer), zap.Int64("target", target)) delta := target - pm.request if delta == 0 { + pm.z.Info("NNC already at target IPs, no scaling required") return nil } pm.z.Info("scaling pool", zap.Int64("delta", delta)) @@ -146,6 +155,10 @@ func (pm *Monitor) buildNNCSpec(request int64) v1alpha.NodeNetworkConfigSpec { return spec } +func (pm *Monitor) WithLegacyMetricsObserver(observer func(context.Context) error) { + pm.legacyMetricsObserver = observer +} + // calculateTargetIPCountOrMax calculates the target IP count request // using the scaling function and clamps the result at the max IPs. func calculateTargetIPCountOrMax(demand, batch, max int64, buffer float64) int64 { diff --git a/cns/kubecontroller/nodenetworkconfig/metrics.go b/cns/kubecontroller/nodenetworkconfig/metrics.go index a1ca124b6e..d85398bca6 100644 --- a/cns/kubecontroller/nodenetworkconfig/metrics.go +++ b/cns/kubecontroller/nodenetworkconfig/metrics.go @@ -24,6 +24,18 @@ var ( Help: "Unused IP count.", }, ) + hasNNC = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "nnc_has_nodenetworkconfig", + Help: "Has received a NodeNetworkConfig", + }, + ) + ncs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "nnc_ncs", + Help: "Network Container count in the NodeNetworkConfig", + }, + ) ) func init() { @@ -31,5 +43,7 @@ func init() { allocatedIPs, requestedIPs, unusedIPs, + hasNNC, + ncs, ) } diff --git a/cns/kubecontroller/nodenetworkconfig/reconciler.go b/cns/kubecontroller/nodenetworkconfig/reconciler.go index 10b252b48a..4d5eb1bcd1 100644 --- a/cns/kubecontroller/nodenetworkconfig/reconciler.go +++ b/cns/kubecontroller/nodenetworkconfig/reconciler.go @@ -64,13 +64,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco nnc, err := r.nnccli.Get(ctx, req.NamespacedName) if err != nil { if apierrors.IsNotFound(err) { + hasNNC.Set(0) logger.Printf("[cns-rc] CRD not found, ignoring %v", err) return reconcile.Result{}, errors.Wrapf(client.IgnoreNotFound(err), "NodeNetworkConfig %v not found", req.NamespacedName) } logger.Errorf("[cns-rc] Error retrieving CRD from cache : %v", err) return reconcile.Result{}, errors.Wrapf(err, "failed to get NodeNetworkConfig %v", req.NamespacedName) } - + hasNNC.Set(1) logger.Printf("[cns-rc] CRD Spec: %+v", nnc.Spec) ipAssignments := 0 @@ -78,7 +79,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // during node upgrades, an nnc may be updated with new ncs. at any given time, only the ncs // that exist in the nnc are valid. any others that may have been previously created and no // longer exist in the nnc should be considered stale. - validNCIDs := make([]string, len(nnc.Status.NetworkContainers)) + ncCount := len(nnc.Status.NetworkContainers) + ncs.Set(float64(ncCount)) + validNCIDs := make([]string, ncCount) for i := range nnc.Status.NetworkContainers { validNCIDs[i] = nnc.Status.NetworkContainers[i].ID } @@ -157,7 +160,9 @@ func (r *Reconciler) Started(ctx context.Context) (bool, error) { } // SetupWithManager Sets up the reconciler with a new manager, filtering using NodeNetworkConfigFilter on nodeName. -func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, node *v1.Node) error { +// filterGenerationChange will check the old and new object's generation and only reconcile updates where the +// generation is the same. This is typically used in IPAMv1 but should be set to false in IPAMv2. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, node *v1.Node, filterGenerationChange bool) error { r.nnccli = nodenetworkconfig.NewClient(mgr.GetClient()) err := ctrl.NewControllerManagedBy(mgr). For(&v1alpha.NodeNetworkConfig{}). @@ -166,20 +171,20 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, node *v1.Node) error { DeleteFunc: func(event.DeleteEvent) bool { return false }, - }). - WithEventFilter(predicate.NewPredicateFuncs(func(object client.Object) bool { - // match on node controller ref for all other events. - return metav1.IsControlledBy(object, node) - })). - WithEventFilter(predicate.Funcs{ - // check that the generation is the same - status changes don't update generation. UpdateFunc: func(ue event.UpdateEvent) bool { if ue.ObjectOld == nil || ue.ObjectNew == nil { return false } - return ue.ObjectOld.GetGeneration() == ue.ObjectNew.GetGeneration() + if filterGenerationChange { + return ue.ObjectOld.GetGeneration() == ue.ObjectNew.GetGeneration() + } + return true }, }). + WithEventFilter(predicate.NewPredicateFuncs(func(object client.Object) bool { + // match on node controller ref for all other events. + return metav1.IsControlledBy(object, node) + })). WithEventFilter(predicate.NewPredicateFuncs(func(object client.Object) bool { // only process events on objects that are not being deleted. return object.GetDeletionTimestamp().IsZero() diff --git a/cns/kubecontroller/pod/reconciler.go b/cns/kubecontroller/pod/reconciler.go index 9767ddad22..db817159d8 100644 --- a/cns/kubecontroller/pod/reconciler.go +++ b/cns/kubecontroller/pod/reconciler.go @@ -50,7 +50,7 @@ type limiter interface { Allow() bool } -// NotifierFunc returns a reconcile.Func that lists Pods to get the latest +// NewNotifierFunc returns a reconcile.Func that lists Pods to get the latest // state and notifies listeners of the resulting Pods. // listOpts are passed to the client.List call to filter the Pod list. // limiter is an optional rate limiter which may be used to limit the @@ -61,11 +61,11 @@ type limiter interface { // any events. // listeners are called with the new Pod list. func (p *watcher) NewNotifierFunc(listOpts *client.ListOptions, limiter limiter, listeners ...func([]v1.Pod)) reconcile.Func { - p.z.Debug("adding notified for listeners", zap.Int("listeners", len(listeners))) + p.z.Info("adding notifier for listeners", zap.Int("listeners", len(listeners))) return func(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { if !limiter.Allow() { // rate limit exceeded, requeue - p.z.Debug("rate limit exceeded") + p.z.Info("rate limit exceeded") return ctrl.Result{Requeue: true}, nil } podList := &v1.PodList{} @@ -88,12 +88,23 @@ var hostNetworkIndexer = client.IndexerFunc(func(o client.Object) []string { return []string{strconv.FormatBool(pod.Spec.HostNetwork)} }) +var statusPhaseIndexer = client.IndexerFunc(func(o client.Object) []string { + pod, ok := o.(*v1.Pod) + if !ok { + return nil + } + return []string{string(pod.Status.Phase)} +}) + // SetupWithManager Sets up the reconciler with a new manager, filtering using NodeNetworkConfigFilter on nodeName. func (p *watcher) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { p.cli = mgr.GetClient() if err := mgr.GetFieldIndexer().IndexField(ctx, &v1.Pod{}, "spec.hostNetwork", hostNetworkIndexer); err != nil { return errors.Wrap(err, "failed to set up hostNetwork indexer") } + if err := mgr.GetFieldIndexer().IndexField(ctx, &v1.Pod{}, "status.phase", statusPhaseIndexer); err != nil { + return errors.Wrap(err, "failed to set up status.phase indexer") + } if err := ctrl.NewControllerManagedBy(mgr). For(&v1.Pod{}). WithEventFilter(predicate.Funcs{ // we only want create/delete events diff --git a/cns/logger/cnslogger.go b/cns/logger/cnslogger.go index 51c93c0e0e..5289259903 100644 --- a/cns/logger/cnslogger.go +++ b/cns/logger/cnslogger.go @@ -2,10 +2,11 @@ package logger import ( "fmt" + "maps" "os" "sync" - "github.com/Azure/azure-container-networking/aitelemetry" + ai "github.com/Azure/azure-container-networking/aitelemetry" "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/log" "github.com/pkg/errors" @@ -13,21 +14,24 @@ import ( "go.uber.org/zap/zapcore" ) -type CNSLogger struct { - logger *log.Logger - th aitelemetry.TelemetryHandle - DisableTraceLogging bool - DisableMetricLogging bool - DisableEventLogging bool +// wait time for closing AI telemetry session. +const waitTimeInSecs = 10 +type logger struct { + logger *log.Logger zapLogger *zap.Logger + th ai.TelemetryHandle + + disableTraceLogging bool + disableMetricLogging bool + disableEventLogging bool - m sync.RWMutex - Orchestrator string - NodeID string + m sync.RWMutex + metadata map[string]string } -func NewCNSLogger(fileName string, logLevel, logTarget int, logDir string) (*CNSLogger, error) { +// Deprecated: The v1 logger is deprecated. Migrate to zap using the cns/logger/v2 package. +func New(fileName string, logLevel, logTarget int, logDir string) (loggershim, error) { l, err := log.NewLoggerE(fileName, logLevel, logTarget, logDir) if err != nil { return nil, errors.Wrap(err, "could not get new logger") @@ -43,193 +47,174 @@ func NewCNSLogger(fileName string, logLevel, logTarget int, logDir string) (*CNS } zapLogger := zap.New(platformCore, zap.AddCaller()).With(zap.Int("pid", os.Getpid())) - return &CNSLogger{ + return &logger{ logger: l, zapLogger: zapLogger, + metadata: map[string]string{}, }, nil } -func (c *CNSLogger) InitAI(aiConfig aitelemetry.AIConfig, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { +func (c *logger) InitAI(aiConfig ai.AIConfig, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { c.InitAIWithIKey(aiConfig, aiMetadata, disableTraceLogging, disableMetricLogging, disableEventLogging) } -func (c *CNSLogger) InitAIWithIKey(aiConfig aitelemetry.AIConfig, instrumentationKey string, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { - th, err := aitelemetry.NewAITelemetry("", instrumentationKey, aiConfig) +func (c *logger) InitAIWithIKey(aiConfig ai.AIConfig, instrumentationKey string, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { + th, err := ai.NewAITelemetry("", instrumentationKey, aiConfig) if err != nil { c.logger.Errorf("Error initializing AI Telemetry:%v", err) return } - c.th = th c.logger.Printf("AI Telemetry Handle created") - c.DisableMetricLogging = disableMetricLogging - c.DisableTraceLogging = disableTraceLogging - c.DisableEventLogging = disableEventLogging + c.disableMetricLogging = disableMetricLogging + c.disableTraceLogging = disableTraceLogging + c.disableEventLogging = disableEventLogging } -// wait time for closing AI telemetry session. -const waitTimeInSecs = 10 - -func (c *CNSLogger) Close() { +func (c *logger) Close() { c.logger.Close() if c.th != nil { c.th.Close(waitTimeInSecs) } } -func (c *CNSLogger) SetContextDetails(orchestrator, nodeID string) { +func (c *logger) SetContextDetails(orchestrator, nodeID string) { c.logger.Logf("SetContext details called with: %v orchestrator nodeID %v", orchestrator, nodeID) c.m.Lock() - c.Orchestrator = orchestrator - c.NodeID = nodeID + c.metadata[orchestratorTypeKey] = orchestrator + c.metadata[nodeIDKey] = nodeID + c.m.Unlock() +} + +func (c *logger) SetAPIServer(apiserver string) { + c.m.Lock() + c.metadata[apiServerKey] = apiserver c.m.Unlock() } -func (c *CNSLogger) Printf(format string, args ...any) { +func (c *logger) Printf(format string, args ...any) { c.logger.Logf(format, args...) c.zapLogger.Info(fmt.Sprintf(format, args...)) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - msg := fmt.Sprintf(format, args...) - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, ai.InfoLevel) } -func (c *CNSLogger) Debugf(format string, args ...any) { +func (c *logger) Debugf(format string, args ...any) { c.logger.Debugf(format, args...) c.zapLogger.Debug(fmt.Sprintf(format, args...)) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - msg := fmt.Sprintf(format, args...) - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, ai.DebugLevel) } -func (c *CNSLogger) Warnf(format string, args ...any) { +func (c *logger) Warnf(format string, args ...any) { c.logger.Warnf(format, args...) c.zapLogger.Warn(fmt.Sprintf(format, args...)) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - msg := fmt.Sprintf(format, args...) - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, ai.WarnLevel) } -func (c *CNSLogger) Errorf(format string, args ...any) { +func (c *logger) Errorf(format string, args ...any) { c.logger.Errorf(format, args...) c.zapLogger.Error(fmt.Sprintf(format, args...)) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - msg := fmt.Sprintf(format, args...) - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, ai.ErrorLevel) } -func (c *CNSLogger) Request(tag string, request any, err error) { +func (c *logger) Request(tag string, request any, err error) { c.logger.Request(tag, request, err) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - var msg string + lvl := ai.InfoLevel if err == nil { msg = fmt.Sprintf("[%s] Received %T %+v.", tag, request, request) } else { msg = fmt.Sprintf("[%s] Failed to decode %T %+v %s.", tag, request, request, err.Error()) + lvl = ai.ErrorLevel } - - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, lvl) } -func (c *CNSLogger) Response(tag string, response any, returnCode types.ResponseCode, err error) { +func (c *logger) Response(tag string, response any, returnCode types.ResponseCode, err error) { c.logger.Response(tag, response, int(returnCode), returnCode.String(), err) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - var msg string + lvl := ai.InfoLevel switch { case err == nil && returnCode == 0: msg = fmt.Sprintf("[%s] Sent %T %+v.", tag, response, response) case err != nil: msg = fmt.Sprintf("[%s] Code:%s, %+v %s.", tag, returnCode.String(), response, err.Error()) + lvl = ai.ErrorLevel default: msg = fmt.Sprintf("[%s] Code:%s, %+v.", tag, returnCode.String(), response) } - - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, lvl) } -func (c *CNSLogger) ResponseEx(tag string, request, response any, returnCode types.ResponseCode, err error) { +func (c *logger) ResponseEx(tag string, request, response any, returnCode types.ResponseCode, err error) { c.logger.ResponseEx(tag, request, response, int(returnCode), returnCode.String(), err) - - if c.th == nil || c.DisableTraceLogging { + if c.th == nil || c.disableTraceLogging { return } - var msg string + lvl := ai.InfoLevel switch { case err == nil && returnCode == 0: msg = fmt.Sprintf("[%s] Sent %T %+v %T %+v.", tag, request, request, response, response) case err != nil: msg = fmt.Sprintf("[%s] Code:%s, %+v, %+v, %s.", tag, returnCode.String(), request, response, err.Error()) + lvl = ai.ErrorLevel default: msg = fmt.Sprintf("[%s] Code:%s, %+v, %+v.", tag, returnCode.String(), request, response) } - - c.sendTraceInternal(msg) + c.sendTraceInternal(msg, lvl) } -func (c *CNSLogger) getOrchestratorAndNodeID() (orch, nodeID string) { +func (c *logger) sendTraceInternal(msg string, lvl ai.Level) { + report := ai.Report{ + Message: msg, + Level: lvl, + Context: c.metadata[nodeIDKey], + CustomDimensions: map[string]string{"Level": lvl.String()}, + } c.m.RLock() - orch, nodeID = c.Orchestrator, c.NodeID + maps.Copy(report.CustomDimensions, c.metadata) c.m.RUnlock() - return -} - -func (c *CNSLogger) sendTraceInternal(msg string) { - orch, nodeID := c.getOrchestratorAndNodeID() - - report := aitelemetry.Report{ - Message: msg, - Context: nodeID, - CustomDimensions: map[string]string{ - OrchestratorTypeStr: orch, - NodeIDStr: nodeID, - }, - } - c.th.TrackLog(report) } -func (c *CNSLogger) LogEvent(event aitelemetry.Event) { - if c.th == nil || c.DisableEventLogging { +func (c *logger) LogEvent(event ai.Event) { + if c.th == nil || c.disableEventLogging { return } - - orch, nodeID := c.getOrchestratorAndNodeID() - event.Properties[OrchestratorTypeStr] = orch - event.Properties[NodeIDStr] = nodeID + c.m.RLock() + maps.Copy(event.Properties, c.metadata) + c.m.RUnlock() c.th.TrackEvent(event) } -func (c *CNSLogger) SendMetric(metric aitelemetry.Metric) { - if c.th == nil || c.DisableMetricLogging { +func (c *logger) SendMetric(metric ai.Metric) { + if c.th == nil || c.disableMetricLogging { return } - - orch, nodeID := c.getOrchestratorAndNodeID() - metric.CustomDimensions[OrchestratorTypeStr] = orch - metric.CustomDimensions[NodeIDStr] = nodeID + c.m.RLock() + maps.Copy(metric.CustomDimensions, c.metadata) + c.m.RUnlock() c.th.TrackMetric(metric) } diff --git a/cns/logger/cnslogger_windows.go b/cns/logger/cnslogger_windows.go index b909991178..f1b3e6263e 100644 --- a/cns/logger/cnslogger_windows.go +++ b/cns/logger/cnslogger_windows.go @@ -19,7 +19,7 @@ func getPlatformCores(loggingLevel zapcore.Level, encoder zapcore.Encoder) (zapc } func getETWCore(loggingLevel zapcore.Level, encoder zapcore.Encoder) (zapcore.Core, error) { - etwcore, err := zapetw.NewETWCore(etwCNSEventName, encoder, loggingLevel) + etwcore, _, err := zapetw.New("ACN-Monitoring", etwCNSEventName, encoder, loggingLevel) if err != nil { return nil, errors.Wrap(err, "failed to create ETW core") } diff --git a/cns/logger/constants.go b/cns/logger/constants.go index 36a8724427..9258ecda25 100644 --- a/cns/logger/constants.go +++ b/cns/logger/constants.go @@ -3,17 +3,22 @@ package logger const ( // Metrics - HeartBeatMetricStr = "HeartBeat" + HeartBeatMetricStr = "HeartBeat" + ConfigSnapshotMetricsStr = "ConfigSnapshot" // Dimensions - OrchestratorTypeStr = "OrchestratorType" - NodeIDStr = "NodeID" - HomeAZStr = "HomeAZ" - IsAZRSupportedStr = "IsAZRSupported" - HomeAZErrorCodeStr = "HomeAZErrorCode" - HomeAZErrorMsgStr = "HomeAZErrorMsg" + orchestratorTypeKey = "OrchestratorType" + nodeIDKey = "NodeID" + HomeAZStr = "HomeAZ" + IsAZRSupportedStr = "IsAZRSupported" + IsAZRDualStackFixPresentStr = "IsAZRDualStackFixPresent" + HomeAZErrorCodeStr = "HomeAZErrorCode" + HomeAZErrorMsgStr = "HomeAZErrorMsg" + CNSConfigPropertyStr = "CNSConfiguration" + CNSConfigMD5CheckSumPropertyStr = "CNSConfigurationMD5Checksum" + apiServerKey = "APIServer" - // CNS Snspshot properties + // CNS NC Snspshot properties CnsNCSnapshotEventStr = "CNSNCSnapshot" IpConfigurationStr = "IPConfiguration" LocalIPConfigurationStr = "LocalIPConfiguration" diff --git a/cns/logger/log.go b/cns/logger/log.go index 1ded6d42f1..2a0d903a03 100644 --- a/cns/logger/log.go +++ b/cns/logger/log.go @@ -6,65 +6,95 @@ import ( "github.com/Azure/azure-container-networking/cns/types" ) +type loggershim interface { + Close() + InitAI(aitelemetry.AIConfig, bool, bool, bool) + InitAIWithIKey(aitelemetry.AIConfig, string, bool, bool, bool) + SetContextDetails(string, string) + SetAPIServer(string) + Printf(string, ...any) + Debugf(string, ...any) + Warnf(string, ...any) + LogEvent(aitelemetry.Event) + Errorf(string, ...any) + Request(string, any, error) + Response(string, any, types.ResponseCode, error) + ResponseEx(string, any, any, types.ResponseCode, error) + SendMetric(aitelemetry.Metric) +} + var ( - Log *CNSLogger - aiMetadata string // this var is set at build time. + Log loggershim + AppInsightsIKey = aiMetadata + aiMetadata string // this var is set at build time. ) -// todo: the functions below should be removed. CNSLogger should be injected where needed and not used from package level scope. - +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Close() { Log.Close() } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func InitLogger(fileName string, logLevel, logTarget int, logDir string) { - Log, _ = NewCNSLogger(fileName, logLevel, logTarget, logDir) + Log, _ = New(fileName, logLevel, logTarget, logDir) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func InitAI(aiConfig aitelemetry.AIConfig, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { Log.InitAI(aiConfig, disableTraceLogging, disableMetricLogging, disableEventLogging) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func InitAIWithIKey(aiConfig aitelemetry.AIConfig, instrumentationKey string, disableTraceLogging, disableMetricLogging, disableEventLogging bool) { Log.InitAIWithIKey(aiConfig, instrumentationKey, disableTraceLogging, disableMetricLogging, disableEventLogging) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func SetContextDetails(orchestrator, nodeID string) { Log.SetContextDetails(orchestrator, nodeID) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Printf(format string, args ...any) { Log.Printf(format, args...) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Debugf(format string, args ...any) { Log.Debugf(format, args...) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Warnf(format string, args ...any) { Log.Warnf(format, args...) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func LogEvent(event aitelemetry.Event) { Log.LogEvent(event) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Errorf(format string, args ...any) { Log.Errorf(format, args...) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Request(tag string, request any, err error) { Log.Request(tag, request, err) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func Response(tag string, response any, returnCode types.ResponseCode, err error) { Log.Response(tag, response, returnCode, err) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func ResponseEx(tag string, request, response any, returnCode types.ResponseCode, err error) { Log.ResponseEx(tag, request, response, returnCode, err) } +// Deprecated: The global logger is deprecated. Migrate to zap using the cns/logger/v2 package and pass the logger instead. func SendMetric(metric aitelemetry.Metric) { Log.SendMetric(metric) } diff --git a/cns/logger/v2/config.go b/cns/logger/v2/config.go new file mode 100644 index 0000000000..5f4a095fb3 --- /dev/null +++ b/cns/logger/v2/config.go @@ -0,0 +1,74 @@ +package logger + +import ( + "encoding/json" + + loggerv1 "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/internal/time" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +//nolint:unused // will be used +const ( + defaultMaxBackups = 10 + defaultMaxSize = 10 // MB + defaultMaxBatchInterval = 30 * time.Second + defaultMaxBatchSize = 32000 + defaultGracePeriod = 30 * time.Second +) + +//nolint:unused // will be used +var defaultIKey = loggerv1.AppInsightsIKey + +// UnmarshalJSON implements json.Unmarshaler for the Config. +// It only differs from the default by parsing the +// Level string into a zapcore.Level and setting the level field. +func (c *Config) UnmarshalJSON(data []byte) error { + type Alias Config + aux := &struct { + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { //nolint:musttag // doesn't understand the embedding strategy + return errors.Wrap(err, "failed to unmarshal Config") + } + lvl, err := zapcore.ParseLevel(c.Level) + if err != nil { + return errors.Wrap(err, "failed to parse Config Level") + } + c.level = lvl + return nil +} + +// Normalize checks the Config for missing/default values and sets them +// if appropriate. +func (c *Config) Normalize() { + if c.File != nil { + if c.File.Filepath == "" { + c.File.Filepath = defaultFilePath + } + if c.File.MaxBackups == 0 { + c.File.MaxBackups = defaultMaxBackups + } + if c.File.MaxSize == 0 { + c.File.MaxSize = defaultMaxSize + } + } + if c.AppInsights != nil { + if c.AppInsights.IKey == "" { + c.AppInsights.IKey = defaultIKey + } + if c.AppInsights.GracePeriod.Duration == 0 { + c.AppInsights.GracePeriod.Duration = defaultGracePeriod + } + if c.AppInsights.MaxBatchInterval.Duration == 0 { + c.AppInsights.MaxBatchInterval.Duration = defaultMaxBatchInterval + } + if c.AppInsights.MaxBatchSize == 0 { + c.AppInsights.MaxBatchSize = defaultMaxBatchSize + } + } + c.normalize() +} diff --git a/cns/logger/v2/config_linux.go b/cns/logger/v2/config_linux.go new file mode 100644 index 0000000000..0b109ebaa2 --- /dev/null +++ b/cns/logger/v2/config_linux.go @@ -0,0 +1,18 @@ +package logger + +import ( + cores "github.com/Azure/azure-container-networking/cns/logger/v2/cores" + "go.uber.org/zap/zapcore" +) + +const defaultFilePath = "/var/log/azure-cns.log" + +type Config struct { + // Level is the general logging Level. If cores have more specific config it will override this. + Level string `json:"level"` + level zapcore.Level `json:"-"` + AppInsights *cores.AppInsightsConfig `json:"appInsights,omitempty"` + File *cores.FileConfig `json:"file,omitempty"` +} + +func (c *Config) normalize() {} diff --git a/cns/logger/v2/config_test.go b/cns/logger/v2/config_test.go new file mode 100644 index 0000000000..1295cd663b --- /dev/null +++ b/cns/logger/v2/config_test.go @@ -0,0 +1,55 @@ +package logger + +import ( + "encoding/json" + "testing" + + cores "github.com/Azure/azure-container-networking/cns/logger/v2/cores" + "github.com/stretchr/testify/require" +) + +func TestUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + have []byte + want *Config + wantErr bool + }{ + { + name: "valid", + have: []byte(`{"level":"info"}`), + want: &Config{ + Level: "info", + level: 0, + }, + }, + { + name: "invalid level", + have: []byte(`{"level":"invalid"}`), + wantErr: true, + }, + { + name: "valid with file", + have: []byte(`{"level":"info","file":{"filepath":"/k/azurecns/azure-cns.log"}}`), + want: &Config{ + Level: "info", + level: 0, + File: &cores.FileConfig{ + Filepath: "/k/azurecns/azure-cns.log", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Config{} + err := json.Unmarshal(tt.have, c) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.want, c) + }) + } +} diff --git a/cns/logger/v2/config_windows.go b/cns/logger/v2/config_windows.go new file mode 100644 index 0000000000..f614e8797c --- /dev/null +++ b/cns/logger/v2/config_windows.go @@ -0,0 +1,28 @@ +package logger + +import ( + cores "github.com/Azure/azure-container-networking/cns/logger/v2/cores" + "go.uber.org/zap/zapcore" +) + +const defaultFilePath = "/k/azurecns/azure-cns.log" + +type Config struct { + // Level is the general logging Level. If cores have more specific config it will override this. + Level string `json:"level"` + level zapcore.Level `json:"-"` + AppInsights *cores.AppInsightsConfig `json:"appInsights,omitempty"` + File *cores.FileConfig `json:"file,omitempty"` + ETW *cores.ETWConfig `json:"etw,omitempty"` +} + +func (c *Config) normalize() { + if c.ETW != nil { + if c.ETW.EventName == "" { + c.ETW.EventName = "AzureCNS" + } + if c.ETW.ProviderName == "" { + c.ETW.ProviderName = "ACN-Monitoring" + } + } +} diff --git a/cns/logger/v2/cores/ai.go b/cns/logger/v2/cores/ai.go new file mode 100644 index 0000000000..d7cc82b801 --- /dev/null +++ b/cns/logger/v2/cores/ai.go @@ -0,0 +1,67 @@ +package logger + +import ( + "encoding/json" + + "github.com/Azure/azure-container-networking/internal/time" + "github.com/Azure/azure-container-networking/zapai" + "github.com/microsoft/ApplicationInsights-Go/appinsights" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type AppInsightsConfig struct { + level zapcore.Level `json:"-"` // Zero value is default Info level. + Level string `json:"level"` + IKey string `json:"ikey"` + GracePeriod time.Duration `json:"grace_period"` + MaxBatchInterval time.Duration `json:"max_batch_interval"` + MaxBatchSize int `json:"max_batch_size"` + Fields []zapcore.Field `json:"fields"` +} + +// UnmarshalJSON implements json.Unmarshaler for the Config. +// It only differs from the default by parsing the +// Level string into a zapcore.Level and setting the level field. +func (c *AppInsightsConfig) UnmarshalJSON(data []byte) error { + type Alias AppInsightsConfig + aux := &struct { + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return errors.Wrap(err, "failed to unmarshal AppInsightsConfig") + } + lvl, err := zapcore.ParseLevel(c.Level) + if err != nil { + return errors.Wrap(err, "failed to parse AppInsightsConfig Level") + } + c.level = lvl + return nil +} + +// ApplicationInsightsCore builds a zapcore.Core that sends logs to Application Insights. +// The first return is the core, the second is a function to close the sink. +func ApplicationInsightsCore(cfg *AppInsightsConfig) (zapcore.Core, func(), error) { + // build the AI config + aicfg := *appinsights.NewTelemetryConfiguration(cfg.IKey) + aicfg.MaxBatchSize = cfg.MaxBatchSize + aicfg.MaxBatchInterval = cfg.MaxBatchInterval.Duration + sinkcfg := zapai.SinkConfig{ + GracePeriod: cfg.GracePeriod.Duration, + TelemetryConfiguration: aicfg, + } + // open the AI zap sink + sink, aiclose, err := zap.Open(sinkcfg.URI()) + if err != nil { + return nil, aiclose, errors.Wrap(err, "failed to open AI sink") + } + // build the AI core + core := zapai.NewCore(cfg.level, sink) + core = core.WithFieldMappers(zapai.DefaultMappers) + // add normalized fields for the built-in AI Tags + + return core.With(cfg.Fields), aiclose, nil +} diff --git a/cns/logger/v2/cores/ai_test.go b/cns/logger/v2/cores/ai_test.go new file mode 100644 index 0000000000..963d8d37c4 --- /dev/null +++ b/cns/logger/v2/cores/ai_test.go @@ -0,0 +1,48 @@ +package logger + +import ( + "encoding/json" + "testing" + + "github.com/Azure/azure-container-networking/internal/time" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func TestAIConfigUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + have []byte + want *AppInsightsConfig + wantErr bool + }{ + { + name: "valid", + have: []byte(`{"grace_period":"30s","level":"panic","max_batch_interval":"30s","max_batch_size":32000}`), + want: &AppInsightsConfig{ + GracePeriod: time.Duration{Duration: 30 * time.Second}, + Level: "panic", + level: zapcore.PanicLevel, + MaxBatchInterval: time.Duration{Duration: 30 * time.Second}, + MaxBatchSize: 32000, + }, + }, + { + name: "invalid level", + have: []byte(`{"level":"invalid"}`), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &AppInsightsConfig{} + err := json.Unmarshal(tt.have, c) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.want, c) + }) + } +} diff --git a/cns/logger/v2/cores/etw_windows.go b/cns/logger/v2/cores/etw_windows.go new file mode 100644 index 0000000000..da0d49b672 --- /dev/null +++ b/cns/logger/v2/cores/etw_windows.go @@ -0,0 +1,48 @@ +package logger + +import ( + "encoding/json" + + "github.com/Azure/azure-container-networking/zapetw" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type ETWConfig struct { + EventName string `json:"eventname"` + Level string `json:"level"` + level zapcore.Level `json:"-"` + ProviderName string `json:"providername"` + Fields []zapcore.Field `json:"fields"` +} + +// UnmarshalJSON implements json.Unmarshaler for the Config. +// It only differs from the default by parsing the +// Level string into a zapcore.Level and setting the level field. +func (cfg *ETWConfig) UnmarshalJSON(data []byte) error { + type Alias ETWConfig + aux := &struct { + *Alias + }{ + Alias: (*Alias)(cfg), + } + if err := json.Unmarshal(data, &aux); err != nil { + return errors.Wrap(err, "failed to unmarshal ETWConfig") + } + lvl, err := zapcore.ParseLevel(cfg.Level) + if err != nil { + return errors.Wrap(err, "failed to parse ETWConfig Level") + } + cfg.level = lvl + return nil +} + +// ETWCore builds a zapcore.Core that sends logs to ETW. +// The first return is the core, the second is a function to close the sink. +func ETWCore(cfg *ETWConfig) (zapcore.Core, func(), error) { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + jsonEncoder := zapcore.NewJSONEncoder(encoderConfig) + return zapetw.New(cfg.ProviderName, cfg.EventName, jsonEncoder, cfg.level) //nolint:wrapcheck // ignore +} diff --git a/cns/logger/v2/cores/file.go b/cns/logger/v2/cores/file.go new file mode 100644 index 0000000000..7aa01bb8e7 --- /dev/null +++ b/cns/logger/v2/cores/file.go @@ -0,0 +1,54 @@ +package logger + +import ( + "encoding/json" + + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +type FileConfig struct { + Filepath string `json:"filepath"` + Level string `json:"level"` + level zapcore.Level `json:"-"` + MaxBackups int `json:"maxBackups"` + MaxSize int `json:"maxSize"` + Fields []zapcore.Field `json:"fields"` +} + +// UnmarshalJSON implements json.Unmarshaler for the Config. +// It only differs from the default by parsing the +// Level string into a zapcore.Level and setting the level field. +func (cfg *FileConfig) UnmarshalJSON(data []byte) error { + type Alias FileConfig + aux := &struct { + *Alias + }{ + Alias: (*Alias)(cfg), + } + if err := json.Unmarshal(data, &aux); err != nil { + return errors.Wrap(err, "failed to unmarshal FileConfig") + } + lvl, err := zapcore.ParseLevel(cfg.Level) + if err != nil { + return errors.Wrap(err, "failed to parse FileConfig Level") + } + cfg.level = lvl + return nil +} + +// FileCore builds a zapcore.Core that writes to a file. +// The first return is the core, the second is a function to close the file. +func FileCore(cfg *FileConfig) (zapcore.Core, func(), error) { + filesink := &lumberjack.Logger{ + Filename: cfg.Filepath, + MaxSize: cfg.MaxSize, // MB + MaxBackups: cfg.MaxBackups, + } + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + jsonEncoder := zapcore.NewJSONEncoder(encoderConfig) + return zapcore.NewCore(jsonEncoder, zapcore.AddSync(filesink), cfg.level), func() { _ = filesink.Close() }, nil +} diff --git a/cns/logger/v2/cores/file_test.go b/cns/logger/v2/cores/file_test.go new file mode 100644 index 0000000000..4fde7c3e04 --- /dev/null +++ b/cns/logger/v2/cores/file_test.go @@ -0,0 +1,47 @@ +package logger + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func TestFileConfig_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + have []byte + want *FileConfig + wantErr bool + }{ + { + name: "valid", + have: []byte(`{"filepath":"test.log","level":"debug","maxBackups":5,"maxSize":10}`), + want: &FileConfig{ + Filepath: "test.log", + Level: "debug", + level: zapcore.DebugLevel, + MaxBackups: 5, + MaxSize: 10, + }, + }, + { + name: "invalid level", + have: []byte(`{"filepath":"test.log","level":"invalid","maxBackups":5,"maxSize":10}`), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &FileConfig{} + err := json.Unmarshal(tt.have, c) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.want, c) + }) + } +} diff --git a/cns/logger/v2/cores/stdout.go b/cns/logger/v2/cores/stdout.go new file mode 100644 index 0000000000..9882483690 --- /dev/null +++ b/cns/logger/v2/cores/stdout.go @@ -0,0 +1,45 @@ +package logger + +import ( + "encoding/json" + "os" + + logfmt "github.com/jsternberg/zap-logfmt" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type StdoutConfig struct { + Level string `json:"level"` + level zapcore.Level `json:"-"` + Fields []zapcore.Field `json:"fields"` +} + +// UnmarshalJSON implements json.Unmarshaler for the Config. +// It only differs from the default by parsing the +// Level string into a zapcore.Level and setting the level field. +func (cfg *StdoutConfig) UnmarshalJSON(data []byte) error { + type Alias StdoutConfig + aux := &struct { + *Alias + }{ + Alias: (*Alias)(cfg), + } + if err := json.Unmarshal(data, &aux); err != nil { + return errors.Wrap(err, "failed to unmarshal StdoutConfig") + } + lvl, err := zapcore.ParseLevel(cfg.Level) + if err != nil { + return errors.Wrap(err, "failed to parse StdoutConfig Level") + } + cfg.level = lvl + return nil +} + +// StdoutCore builds a zapcore.Core that writes to stdout. +func StdoutCore(l zapcore.Level) zapcore.Core { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + return zapcore.NewCore(logfmt.NewEncoder(encoderConfig), os.Stdout, l) +} diff --git a/cns/logger/v2/fields.go b/cns/logger/v2/fields.go new file mode 100644 index 0000000000..926ac0720f --- /dev/null +++ b/cns/logger/v2/fields.go @@ -0,0 +1,23 @@ +package logger + +import ( + "github.com/Azure/azure-container-networking/common" + "go.uber.org/zap" +) + +// MetadataToFields transforms Az IMDS Metadata in to zap.Field for +// attaching to a root zap core or logger instance. +// This uses the nice-names from the zapai.DefaultMappers instead of +// raw AppInsights key names. +func MetadataToFields(meta common.Metadata) []zap.Field { + return []zap.Field{ + zap.String("account", meta.SubscriptionID), + zap.String("anonymous_user_id", meta.VMName), + zap.String("location", meta.Location), + zap.String("resource_group", meta.ResourceGroupName), + zap.String("vm_size", meta.VMSize), + zap.String("os_version", meta.OSVersion), + zap.String("vm_id", meta.VMID), + zap.String("session_id", meta.VMID), + } +} diff --git a/cns/logger/v2/logger.go b/cns/logger/v2/logger.go new file mode 100644 index 0000000000..f5c5c3a85b --- /dev/null +++ b/cns/logger/v2/logger.go @@ -0,0 +1,47 @@ +// Package logger provides an opinionated logger for CNS which knows how to +// log to Application Insights, file, stdout and ETW (based on platform). +package logger + +import ( + cores "github.com/Azure/azure-container-networking/cns/logger/v2/cores" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type compoundCloser []func() + +func (c compoundCloser) Close() { + for _, closer := range c { + closer() + } +} + +// New creates a v2 CNS logger built with Zap. +func New(cfg *Config) (*zap.Logger, func(), error) { + cfg.Normalize() + core := cores.StdoutCore(cfg.level) + closer := compoundCloser{} + if cfg.File != nil { + fileCore, fileCloser, err := cores.FileCore(cfg.File) + closer = append(closer, fileCloser) + if err != nil { + return nil, closer.Close, err //nolint:wrapcheck // it's an internal pkg + } + core = zapcore.NewTee(core, fileCore) + } + if cfg.AppInsights != nil { + aiCore, aiCloser, err := cores.ApplicationInsightsCore(cfg.AppInsights) + closer = append(closer, aiCloser) + if err != nil { + return nil, closer.Close, err //nolint:wrapcheck // it's an internal pkg + } + core = zapcore.NewTee(core, aiCore) + } + platformCore, platformCloser, err := platformCore(cfg) + closer = append(closer, platformCloser) + if err != nil { + return nil, closer.Close, err + } + core = zapcore.NewTee(core, platformCore) + return zap.New(core), closer.Close, nil +} diff --git a/cns/logger/v2/logger_linux.go b/cns/logger/v2/logger_linux.go new file mode 100644 index 0000000000..e0311284fc --- /dev/null +++ b/cns/logger/v2/logger_linux.go @@ -0,0 +1,10 @@ +package logger + +import ( + "go.uber.org/zap/zapcore" +) + +// On Linux, platformCore returns a no-op core. +func platformCore(*Config) (zapcore.Core, func(), error) { + return zapcore.NewNopCore(), func() {}, nil +} diff --git a/cns/logger/v2/logger_windows.go b/cns/logger/v2/logger_windows.go new file mode 100644 index 0000000000..186fae1f14 --- /dev/null +++ b/cns/logger/v2/logger_windows.go @@ -0,0 +1,14 @@ +package logger + +import ( + cores "github.com/Azure/azure-container-networking/cns/logger/v2/cores" + "go.uber.org/zap/zapcore" +) + +// On Windows, platformCore returns a zapcore.Core that sends logs to ETW. +func platformCore(cfg *Config) (zapcore.Core, func(), error) { + if cfg.ETW == nil { + return zapcore.NewNopCore(), func() {}, nil + } + return cores.ETWCore(cfg.ETW) //nolint:wrapcheck // ignore +} diff --git a/cns/logger/v2/shim.go b/cns/logger/v2/shim.go new file mode 100644 index 0000000000..b427cef8f4 --- /dev/null +++ b/cns/logger/v2/shim.go @@ -0,0 +1,64 @@ +package logger + +import ( + "github.com/Azure/azure-container-networking/aitelemetry" + "github.com/Azure/azure-container-networking/cns/types" + "go.uber.org/zap" +) + +// shim wraps the Zap logger to provide a compatible interface to the +// legacy CNS logger. This is temporary and exists to make migration +// feasible and optional. +type shim struct { + z *zap.Logger + closer func() +} + +func (s *shim) Close() { + _ = s.z.Sync() + s.closer() +} + +func (s *shim) Printf(format string, a ...any) { + s.z.Sugar().Infof(format, a...) +} + +func (s *shim) Debugf(format string, a ...any) { + s.z.Sugar().Debugf(format, a...) +} + +func (s *shim) Warnf(format string, a ...any) { + s.z.Sugar().Warnf(format, a...) +} + +func (s *shim) Errorf(format string, a ...any) { + s.z.Sugar().Errorf(format, a...) +} + +func (s *shim) Request(msg string, data any, err error) { + s.z.Sugar().Infow("Request", "message", msg, "data", data, "error", err) +} + +func (s *shim) Response(msg string, data any, code types.ResponseCode, err error) { + s.z.Sugar().Infow("Response", "message", msg, "data", data, "code", code, "error", err) +} + +func (s *shim) ResponseEx(msg string, request, response any, code types.ResponseCode, err error) { + s.z.Sugar().Infow("ResponseEx", "message", msg, "request", request, "response", response, "code", code, "error", err) +} + +func (*shim) InitAI(aitelemetry.AIConfig, bool, bool, bool) {} + +func (*shim) InitAIWithIKey(aitelemetry.AIConfig, string, bool, bool, bool) {} + +func (s *shim) SetContextDetails(string, string) {} + +func (s *shim) SetAPIServer(string) {} + +func (s *shim) SendMetric(aitelemetry.Metric) {} + +func (s *shim) LogEvent(aitelemetry.Event) {} + +func AsV1(z *zap.Logger, closer func()) *shim { //nolint:revive // I want it to be annoying to use. + return &shim{z: z, closer: closer} +} diff --git a/cns/metric/configsnapshot.go b/cns/metric/configsnapshot.go new file mode 100644 index 0000000000..6d70377f0e --- /dev/null +++ b/cns/metric/configsnapshot.go @@ -0,0 +1,58 @@ +package metric + +import ( + "context" + "crypto/md5" //nolint:gosec // used for checksum + "encoding/json" + "time" + + "github.com/Azure/azure-container-networking/aitelemetry" + "github.com/Azure/azure-container-networking/cns/configuration" + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/pkg/errors" +) + +// SendCNSConfigSnapshot emits CNS config periodically +func SendCNSConfigSnapshot(ctx context.Context, config *configuration.CNSConfig) { + ticker := time.NewTicker(time.Minute * time.Duration(config.TelemetrySettings.ConfigSnapshotIntervalInMins)) + defer ticker.Stop() + + event, err := createCNSConfigSnapshotEvent(config) + if err != nil { + logger.Errorf("[Azure CNS] SendCNSConfigSnapshot: %v", err) + return + } + + // Log the first event immediately + logger.LogEvent(event) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + logger.LogEvent(event) + } + } +} + +func createCNSConfigSnapshotEvent(config *configuration.CNSConfig) (aitelemetry.Event, error) { + bb, err := json.Marshal(config) //nolint:musttag // no tag needed for config + if err != nil { + return aitelemetry.Event{}, errors.Wrap(err, "failed to marshal config") + } + + cs := md5.Sum(bb) //nolint:gosec // used for checksum + csStr := string(cs[:]) + + event := aitelemetry.Event{ + EventName: logger.ConfigSnapshotMetricsStr, + ResourceID: csStr, // not guaranteed unique, instead use VM ID and Subscription to correlate + Properties: map[string]string{ + logger.CNSConfigPropertyStr: string(bb), + logger.CNSConfigMD5CheckSumPropertyStr: csStr, + }, + } + + return event, nil +} diff --git a/cns/metric/configsnapshot_test.go b/cns/metric/configsnapshot_test.go new file mode 100644 index 0000000000..d0e12c2539 --- /dev/null +++ b/cns/metric/configsnapshot_test.go @@ -0,0 +1,30 @@ +package metric + +import ( + "encoding/json" + "testing" + + "github.com/Azure/azure-container-networking/cns/configuration" + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateCNSConfigSnapshotEvent(t *testing.T) { + logger.InitLogger("testlogs", 0, 0, "./") + + config, err := configuration.ReadConfig("../configuration/testdata/good.json") + require.NoError(t, err) + + event, err := createCNSConfigSnapshotEvent(config) + require.NoError(t, err) + + assert.Equal(t, logger.ConfigSnapshotMetricsStr, event.EventName) + assert.NotEmpty(t, event.ResourceID) + assert.Contains(t, event.Properties[logger.CNSConfigPropertyStr], "\"TLSPort\":\"10091\"") + + eventConfig := &configuration.CNSConfig{} + err = json.Unmarshal([]byte(event.Properties[logger.CNSConfigPropertyStr]), eventConfig) //nolint:musttag // no tag needed for config + require.NoError(t, err) + assert.EqualValues(t, config, eventConfig) +} diff --git a/cns/metric/heartbeat.go b/cns/metric/heartbeat.go index de65e4e5ea..f37223acdc 100644 --- a/cns/metric/heartbeat.go +++ b/cns/metric/heartbeat.go @@ -30,7 +30,6 @@ func SendHeartBeat(ctx context.Context, heartbeatInterval time.Duration, homeAzM Value: 1.0, CustomDimensions: make(map[string]string), } - // add azr metrics when channel mode is direct if channelMode == cns.Direct { getHomeAzResp := homeAzMonitor.GetHomeAz(ctx) @@ -38,10 +37,10 @@ func SendHeartBeat(ctx context.Context, heartbeatInterval time.Duration, homeAzM case types.Success: metric.CustomDimensions[logger.IsAZRSupportedStr] = strconv.FormatBool(getHomeAzResp.HomeAzResponse.IsSupported) metric.CustomDimensions[logger.HomeAZStr] = strconv.FormatUint(uint64(getHomeAzResp.HomeAzResponse.HomeAz), 10) + metric.CustomDimensions[logger.IsAZRDualStackFixPresentStr] = strconv.FormatBool(getHomeAzResp.HomeAzResponse.NmaAppliedTheIPV6Fix) default: metric.CustomDimensions[logger.HomeAZErrorCodeStr] = getHomeAzResp.Response.ReturnCode.String() metric.CustomDimensions[logger.HomeAZErrorMsgStr] = getHomeAzResp.Response.Message - } } logger.SendMetric(metric) diff --git a/cns/middlewares/k8sSwiftV2.go b/cns/middlewares/k8sSwiftV2.go index 7b25bac8bb..44df3d3144 100644 --- a/cns/middlewares/k8sSwiftV2.go +++ b/cns/middlewares/k8sSwiftV2.go @@ -2,7 +2,6 @@ package middlewares import ( "context" - "fmt" "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/configuration" @@ -13,11 +12,13 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/kubelet" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( - errMTPNCNotReady = errors.New("mtpnc is not ready") + errMTPNCNotReady = errors.New(kubelet.NetworkNotReadyErrorMsg + " - mtpnc is not ready") + errGetMTPNC = errors.New(kubelet.NetworkNotReadyErrorMsg + " - failed to get MTPNC") errInvalidSWIFTv2NICType = errors.New("invalid NIC type for SWIFT v2 scenario") errInvalidMTPNCPrefixLength = errors.New("invalid prefix length for MTPNC primaryIP, must be 32") ) @@ -36,124 +37,33 @@ type K8sSWIFTv2Middleware struct { // Verify interface compliance at compile time var _ cns.IPConfigsHandlerMiddleware = (*K8sSWIFTv2Middleware)(nil) -// IPConfigsRequestHandlerWrapper is the middleware function for handling SWIFT v2 IP configs requests for AKS-SWIFT. This function wrapped the default SWIFT request -// and release IP configs handlers. -func (k *K8sSWIFTv2Middleware) IPConfigsRequestHandlerWrapper(defaultHandler, failureHandler cns.IPConfigsHandlerFunc) cns.IPConfigsHandlerFunc { - return func(ctx context.Context, req cns.IPConfigsRequest) (*cns.IPConfigsResponse, error) { - podInfo, respCode, message := k.validateIPConfigsRequest(ctx, &req) - - if respCode != types.Success { - return &cns.IPConfigsResponse{ - Response: cns.Response{ - ReturnCode: respCode, - Message: message, - }, - }, errors.New("failed to validate IP configs request") - } - ipConfigsResp, err := defaultHandler(ctx, req) - // If the pod is not v2, return the response from the handler - if !req.SecondaryInterfacesExist { - return ipConfigsResp, err - } - // If the pod is v2, get the infra IP configs from the handler first and then add the SWIFTv2 IP config - defer func() { - // Release the default IP config if there is an error - if err != nil { - _, err = failureHandler(ctx, req) - if err != nil { - logger.Errorf("failed to release default IP config : %v", err) - } - } - }() - if err != nil { - return ipConfigsResp, err - } - SWIFTv2PodIPInfos, err := k.getIPConfig(ctx, podInfo) - if err != nil { - return &cns.IPConfigsResponse{ - Response: cns.Response{ - ReturnCode: types.FailedToAllocateIPConfig, - Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), - }, - PodIPInfo: []cns.PodIpInfo{}, - }, errors.Wrapf(err, "failed to get SWIFTv2 IP config : %v", req) - } - ipConfigsResp.PodIPInfo = append(ipConfigsResp.PodIPInfo, SWIFTv2PodIPInfos...) - // Set routes for the pod - for i := range ipConfigsResp.PodIPInfo { - ipInfo := &ipConfigsResp.PodIPInfo[i] - // Backend nics doesn't need routes to be set - if ipInfo.NICType != cns.BackendNIC { - err = k.setRoutes(ipInfo) - if err != nil { - return &cns.IPConfigsResponse{ - Response: cns.Response{ - ReturnCode: types.FailedToAllocateIPConfig, - Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), - }, - PodIPInfo: []cns.PodIpInfo{}, - }, errors.Wrapf(err, "failed to set routes for pod %s", podInfo.Name()) - } - } - } - return ipConfigsResp, nil - } -} - -// validateIPConfigsRequest validates if pod is multitenant by checking the pod labels, used in SWIFT V2 AKS scenario. -// nolint -func (k *K8sSWIFTv2Middleware) validateIPConfigsRequest(ctx context.Context, req *cns.IPConfigsRequest) (podInfo cns.PodInfo, respCode types.ResponseCode, message string) { - // Retrieve the pod from the cluster - podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext) - if err != nil { - errBuf := errors.Wrapf(err, "failed to unmarshalling pod info from ipconfigs request %+v", req) - return nil, types.UnexpectedError, errBuf.Error() - } - logger.Printf("[SWIFTv2Middleware] validate ipconfigs request for pod %s", podInfo.Name()) - podNamespacedName := k8stypes.NamespacedName{Namespace: podInfo.Namespace(), Name: podInfo.Name()} - pod := v1.Pod{} - if err := k.Cli.Get(ctx, podNamespacedName, &pod); err != nil { - errBuf := errors.Wrapf(err, "failed to get pod %+v", podNamespacedName) - return nil, types.UnexpectedError, errBuf.Error() +func (k *K8sSWIFTv2Middleware) GetPodInfoForIPConfigsRequest(ctx context.Context, req *cns.IPConfigsRequest) (podInfo cns.PodInfo, respCode types.ResponseCode, message string) { + // gets pod info for the specified request + podInfo, pod, respCode, message := k.GetPodInfo(ctx, req) + if respCode != types.Success { + return nil, respCode, message } - // check the pod labels for Swift V2, set the request's SecondaryInterfaceSet flag to true and check if its MTPNC CRD is ready - _, swiftV2PodNetworkLabel := pod.Labels[configuration.LabelPodSwiftV2] - _, swiftV2PodNetworkInstanceLabel := pod.Labels[configuration.LabelPodNetworkInstanceSwiftV2] - if swiftV2PodNetworkLabel || swiftV2PodNetworkInstanceLabel { + // validates if pod is swiftv2 + isSwiftv2 := ValidateSwiftv2Pod(pod) - // Check if the MTPNC CRD exists for the pod, if not, return error - mtpnc := v1alpha1.MultitenantPodNetworkConfig{} - mtpncNamespacedName := k8stypes.NamespacedName{Namespace: podInfo.Namespace(), Name: podInfo.Name()} - if err := k.Cli.Get(ctx, mtpncNamespacedName, &mtpnc); err != nil { - return nil, types.UnexpectedError, fmt.Errorf("failed to get pod's mtpnc from cache : %w", err).Error() - } - // Check if the MTPNC CRD is ready. If one of the fields is empty, return error - if !mtpnc.IsReady() { - return nil, types.UnexpectedError, errMTPNCNotReady.Error() - } - // If primary Ip is set in status field, it indicates the presence of secondary interfaces - if mtpnc.Status.PrimaryIP != "" { - req.SecondaryInterfacesExist = true + var mtpnc v1alpha1.MultitenantPodNetworkConfig + // if swiftv2 is enabled, get mtpnc + if isSwiftv2 { + mtpnc, respCode, message = k.getMTPNC(ctx, podInfo) + if respCode != types.Success { + return nil, respCode, message } - interfaceInfos := mtpnc.Status.InterfaceInfos - for _, interfaceInfo := range interfaceInfos { - if interfaceInfo.DeviceType == v1alpha1.DeviceTypeInfiniBandNIC { - if interfaceInfo.MacAddress == "" || interfaceInfo.NCID == "" { - return nil, types.UnexpectedError, errMTPNCNotReady.Error() - } - req.BackendInterfaceExist = true - req.BackendInterfaceMacAddresses = append(req.BackendInterfaceMacAddresses, interfaceInfo.MacAddress) - } - if interfaceInfo.DeviceType == v1alpha1.DeviceTypeVnetNIC { - req.SecondaryInterfacesExist = true - } + // update ipConfigRequest + respCode, message = k.UpdateIPConfigRequest(mtpnc, req) + if respCode != types.Success { + return nil, respCode, message } } logger.Printf("[SWIFTv2Middleware] pod %s has secondary interface : %v", podInfo.Name(), req.SecondaryInterfacesExist) logger.Printf("[SWIFTv2Middleware] pod %s has backend interface : %v", podInfo.Name(), req.BackendInterfaceExist) - // retrieve podinfo from orchestrator context + return podInfo, types.Success, "" } @@ -163,7 +73,7 @@ func (k *K8sSWIFTv2Middleware) getIPConfig(ctx context.Context, podInfo cns.PodI mtpnc := v1alpha1.MultitenantPodNetworkConfig{} mtpncNamespacedName := k8stypes.NamespacedName{Namespace: podInfo.Namespace(), Name: podInfo.Name()} if err := k.Cli.Get(ctx, mtpncNamespacedName, &mtpnc); err != nil { - return nil, errors.Wrapf(err, "failed to get pod's mtpnc from cache") + return nil, errors.Wrap(err, errGetMTPNC.Error()) } // Check if the MTPNC CRD is ready. If one of the fields is empty, return error @@ -203,10 +113,8 @@ func (k *K8sSWIFTv2Middleware) getIPConfig(ctx context.Context, podInfo cns.PodI err error ) switch { - case interfaceInfo.DeviceType == v1alpha1.DeviceTypeVnetNIC && !interfaceInfo.AccelnetEnabled: + case interfaceInfo.DeviceType == v1alpha1.DeviceTypeVnetNIC: nicType = cns.DelegatedVMNIC - case interfaceInfo.DeviceType == v1alpha1.DeviceTypeVnetNIC && interfaceInfo.AccelnetEnabled: - nicType = cns.NodeNetworkInterfaceAccelnetFrontendNIC case interfaceInfo.DeviceType == v1alpha1.DeviceTypeInfiniBandNIC: nicType = cns.NodeNetworkInterfaceBackendNIC default: @@ -239,6 +147,8 @@ func (k *K8sSWIFTv2Middleware) getIPConfig(ctx context.Context, podInfo cns.PodI return nil, errors.Wrap(err, "failed to parse mtpnc subnetAddressSpace prefix") } podIPInfos = append(podIPInfos, podIPInfo) + // for windows scenario, it is required to add default route with gatewayIP from CNS + k.addDefaultRoute(&podIPInfo, interfaceInfo.GatewayIP) } } } @@ -249,3 +159,119 @@ func (k *K8sSWIFTv2Middleware) getIPConfig(ctx context.Context, podInfo cns.PodI func (k *K8sSWIFTv2Middleware) Type() cns.SWIFTV2Mode { return cns.K8sSWIFTV2 } + +// gets Pod Data +func (k *K8sSWIFTv2Middleware) GetPodInfo(ctx context.Context, req *cns.IPConfigsRequest) (podInfo cns.PodInfo, k8sPod v1.Pod, respCode types.ResponseCode, message string) { + // Retrieve the pod from the cluster + podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext) + if err != nil { + errBuf := errors.Wrapf(err, "failed to unmarshalling pod info from ipconfigs request %+v", req) + return nil, v1.Pod{}, types.UnexpectedError, errBuf.Error() + } + logger.Printf("[SWIFTv2Middleware] validate ipconfigs request for pod %s", podInfo.Name()) + podNamespacedName := k8stypes.NamespacedName{Namespace: podInfo.Namespace(), Name: podInfo.Name()} + pod := v1.Pod{} + if err := k.Cli.Get(ctx, podNamespacedName, &pod); err != nil { + errBuf := errors.Wrapf(err, "failed to get pod %+v", podNamespacedName) + return nil, v1.Pod{}, types.UnexpectedError, errBuf.Error() + } + return podInfo, pod, types.Success, "" +} + +// validates if pod is multitenant by checking the pod labels, used in SWIFT V2 AKS scenario. +func ValidateSwiftv2Pod(pod v1.Pod) bool { + // check the pod labels for Swift V2 + _, swiftV2PodNetworkLabel := pod.Labels[configuration.LabelPodSwiftV2] + _, swiftV2PodNetworkInstanceLabel := pod.Labels[configuration.LabelPodNetworkInstanceSwiftV2] + return swiftV2PodNetworkLabel || swiftV2PodNetworkInstanceLabel +} + +func (k *K8sSWIFTv2Middleware) getMTPNC(ctx context.Context, podInfo cns.PodInfo) (mtpncResource v1alpha1.MultitenantPodNetworkConfig, respCode types.ResponseCode, message string) { + // Check if the MTPNC CRD exists for the pod, if not, return error + mtpnc := v1alpha1.MultitenantPodNetworkConfig{} + mtpncNamespacedName := k8stypes.NamespacedName{Namespace: podInfo.Namespace(), Name: podInfo.Name()} + if err := k.Cli.Get(ctx, mtpncNamespacedName, &mtpnc); err != nil { + return v1alpha1.MultitenantPodNetworkConfig{}, types.UnexpectedError, errors.Wrap(err, errGetMTPNC.Error()).Error() + } + // Check if the MTPNC CRD is ready. If one of the fields is empty, return error + if !mtpnc.IsReady() { + return v1alpha1.MultitenantPodNetworkConfig{}, types.UnexpectedError, errMTPNCNotReady.Error() + } + return mtpnc, types.Success, "" +} + +// Updates Ip Config Request +func (k *K8sSWIFTv2Middleware) UpdateIPConfigRequest(mtpnc v1alpha1.MultitenantPodNetworkConfig, req *cns.IPConfigsRequest) ( + respCode types.ResponseCode, + message string, +) { + // If primary Ip is set in status field, it indicates the presence of secondary interfaces + if mtpnc.Status.PrimaryIP != "" { + req.SecondaryInterfacesExist = true + } + + interfaceInfos := mtpnc.Status.InterfaceInfos + for _, interfaceInfo := range interfaceInfos { + if interfaceInfo.DeviceType == v1alpha1.DeviceTypeInfiniBandNIC { + if interfaceInfo.MacAddress == "" || interfaceInfo.NCID == "" { + return types.UnexpectedError, errMTPNCNotReady.Error() + } + req.BackendInterfaceExist = true + req.BackendInterfaceMacAddresses = append(req.BackendInterfaceMacAddresses, interfaceInfo.MacAddress) + + } + if interfaceInfo.DeviceType == v1alpha1.DeviceTypeVnetNIC { + req.SecondaryInterfacesExist = true + } + } + + return types.Success, "" +} + +func (k *K8sSWIFTv2Middleware) AddRoutes(cidrs []string, gatewayIP string) []cns.Route { + routes := make([]cns.Route, len(cidrs)) + for i, cidr := range cidrs { + routes[i] = cns.Route{ + IPAddress: cidr, + GatewayIPAddress: gatewayIP, + } + } + return routes +} + +// Both Linux and Windows CNS gets infravnet and service CIDRs from configuration env +// GetInfravnetAndServiceCidrs() returns v4CIDRs(infravnet and service cidrs) as first []string and v6CIDRs(infravnet and service) as second []string +func (k *K8sSWIFTv2Middleware) GetInfravnetAndServiceCidrs() ([]string, []string, error) { //nolint + v4Cidrs := []string{} + v6Cidrs := []string{} + + // Get and parse infraVNETCIDRs from env + infraVNETCIDRs, err := configuration.InfraVNETCIDRs() + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get infraVNETCIDRs from env") + } + infraVNETCIDRsv4, infraVNETCIDRsv6, err := utils.ParseCIDRs(infraVNETCIDRs) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse infraVNETCIDRs") + } + + // Add infravnet CIDRs to v4 and v6 IPs + v4Cidrs = append(v4Cidrs, infraVNETCIDRsv4...) + v6Cidrs = append(v6Cidrs, infraVNETCIDRsv6...) + + // Get and parse serviceCIDRs from env + serviceCIDRs, err := configuration.ServiceCIDRs() + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get serviceCIDRs from env") + } + serviceCIDRsV4, serviceCIDRsV6, err := utils.ParseCIDRs(serviceCIDRs) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse serviceCIDRs") + } + + // Add service CIDRs to v4 and v6 IPs + v4Cidrs = append(v4Cidrs, serviceCIDRsV4...) + v6Cidrs = append(v6Cidrs, serviceCIDRsV6...) + + return v4Cidrs, v6Cidrs, nil +} diff --git a/cns/middlewares/k8sSwiftV2_linux.go b/cns/middlewares/k8sSwiftV2_linux.go index 680242c4aa..bb075cc3e0 100644 --- a/cns/middlewares/k8sSwiftV2_linux.go +++ b/cns/middlewares/k8sSwiftV2_linux.go @@ -1,6 +1,7 @@ package middlewares import ( + "context" "fmt" "net/netip" @@ -8,6 +9,7 @@ import ( "github.com/Azure/azure-container-networking/cns/configuration" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/middlewares/utils" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/pkg/errors" ) @@ -30,53 +32,15 @@ func (k *K8sSWIFTv2Middleware) setRoutes(podIPInfo *cns.PodIpInfo) error { routes = append(routes, virtualGWRoute, route) case cns.InfraNIC: - // Get and parse infraVNETCIDRs from env - infraVNETCIDRs, err := configuration.InfraVNETCIDRs() + // Linux CNS middleware sets the infra routes(pod, infravnet and service cidrs) to infraNIC interface for the podIPInfo used in SWIFT V2 Linux scenario + infraRoutes, err := k.getInfraRoutes(podIPInfo) if err != nil { - return errors.Wrapf(err, "failed to get infraVNETCIDRs from env") - } - infraVNETCIDRsv4, infraVNETCIDRsv6, err := utils.ParseCIDRs(infraVNETCIDRs) - if err != nil { - return errors.Wrapf(err, "failed to parse infraVNETCIDRs") - } - - // Get and parse podCIDRs from env - podCIDRs, err := configuration.PodCIDRs() - if err != nil { - return errors.Wrapf(err, "failed to get podCIDRs from env") - } - podCIDRsV4, podCIDRv6, err := utils.ParseCIDRs(podCIDRs) - if err != nil { - return errors.Wrapf(err, "failed to parse podCIDRs") - } - - // Get and parse serviceCIDRs from env - serviceCIDRs, err := configuration.ServiceCIDRs() - if err != nil { - return errors.Wrapf(err, "failed to get serviceCIDRs from env") - } - serviceCIDRsV4, serviceCIDRsV6, err := utils.ParseCIDRs(serviceCIDRs) - if err != nil { - return errors.Wrapf(err, "failed to parse serviceCIDRs") - } - - ip, err := netip.ParseAddr(podIPInfo.PodIPConfig.IPAddress) - if err != nil { - return errors.Wrapf(err, "failed to parse podIPConfig IP address %s", podIPInfo.PodIPConfig.IPAddress) - } - - if ip.Is4() { - routes = append(routes, addRoutes(podCIDRsV4, overlayGatewayv4)...) - routes = append(routes, addRoutes(serviceCIDRsV4, overlayGatewayv4)...) - routes = append(routes, addRoutes(infraVNETCIDRsv4, overlayGatewayv4)...) - } else { - routes = append(routes, addRoutes(podCIDRv6, overlayGatewayV6)...) - routes = append(routes, addRoutes(serviceCIDRsV6, overlayGatewayV6)...) - routes = append(routes, addRoutes(infraVNETCIDRsv6, overlayGatewayV6)...) + return errors.Wrap(err, "failed to get infra routes for infraNIC interface") } + routes = infraRoutes podIPInfo.SkipDefaultRoutes = true - case cns.NodeNetworkInterfaceBackendNIC, cns.NodeNetworkInterfaceAccelnetFrontendNIC: //nolint:exhaustive // ignore exhaustive types check + case cns.NodeNetworkInterfaceBackendNIC: //nolint:exhaustive // ignore exhaustive types check // No-op NIC types. default: return errInvalidSWIFTv2NICType @@ -86,18 +50,130 @@ func (k *K8sSWIFTv2Middleware) setRoutes(podIPInfo *cns.PodIpInfo) error { return nil } -func addRoutes(cidrs []string, gatewayIP string) []cns.Route { - routes := make([]cns.Route, len(cidrs)) - for i, cidr := range cidrs { - routes[i] = cns.Route{ - IPAddress: cidr, - GatewayIPAddress: gatewayIP, - } +// Linux CNS gets pod CIDRs from configuration env +// Containerd reassigns the IP to the adapter and kernel configures the pod cidr route by default on Windows VM +// Hence the windows swiftv2 scenario does not require pod cidr +// GetPodCidrs() will return v4PodCidrs as first []string and v6PodCidrs as second []string +func (k *K8sSWIFTv2Middleware) GetPodCidrs() ([]string, []string, error) { //nolint + v4PodCidrs := []string{} + v6PodCidrs := []string{} + + // Get and parse podCIDRs from env + podCIDRs, err := configuration.PodCIDRs() + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get podCIDRs from env") + } + podCIDRsV4, podCIDRv6, err := utils.ParseCIDRs(podCIDRs) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse podCIDRs") } - return routes + + v4PodCidrs = append(v4PodCidrs, podCIDRsV4...) + v6PodCidrs = append(v6PodCidrs, podCIDRv6...) + + return v4PodCidrs, v6PodCidrs, nil +} + +// getInfraRoutes() returns the infra routes including infravnet/pod/service cidrs for the podIPInfo used in SWIFT V2 Linux scenario +// Linux uses 169.254.1.1 as the default ipv4 gateway and fe80::1234:5678:9abc as the default ipv6 gateway +func (k *K8sSWIFTv2Middleware) getInfraRoutes(podIPInfo *cns.PodIpInfo) ([]cns.Route, error) { + var routes []cns.Route + + ip, err := netip.ParseAddr(podIPInfo.PodIPConfig.IPAddress) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse podIPConfig IP address %s", podIPInfo.PodIPConfig.IPAddress) + } + + v4IPs, v6IPs, err := k.GetInfravnetAndServiceCidrs() + if err != nil { + return nil, errors.Wrap(err, "failed to get infravnet and service CIDRs") + } + + v4PodIPs, v6PodIPs, err := k.GetPodCidrs() + if err != nil { + return nil, errors.Wrap(err, "failed to get pod CIDRs") + } + + v4IPs = append(v4IPs, v4PodIPs...) + v6IPs = append(v6IPs, v6PodIPs...) + + if ip.Is4() { + routes = append(routes, k.AddRoutes(v4IPs, overlayGatewayv4)...) + } else { + routes = append(routes, k.AddRoutes(v6IPs, overlayGatewayV6)...) + } + + return routes, nil } // assignSubnetPrefixLengthFields is a no-op for linux swiftv2 as the default prefix-length is sufficient func (k *K8sSWIFTv2Middleware) assignSubnetPrefixLengthFields(_ *cns.PodIpInfo, _ v1alpha1.InterfaceInfo, _ string) error { return nil } + +// add default route is done on setRoutes() for Linux swiftv2 +func (k *K8sSWIFTv2Middleware) addDefaultRoute(*cns.PodIpInfo, string) {} + +// IPConfigsRequestHandlerWrapper is the middleware function for handling SWIFT v2 IP configs requests for AKS-SWIFT. This function wrapped the default SWIFT request +// and release IP configs handlers. +func (k *K8sSWIFTv2Middleware) IPConfigsRequestHandlerWrapper(defaultHandler, failureHandler cns.IPConfigsHandlerFunc) cns.IPConfigsHandlerFunc { + return func(ctx context.Context, req cns.IPConfigsRequest) (*cns.IPConfigsResponse, error) { + podInfo, respCode, message := k.GetPodInfoForIPConfigsRequest(ctx, &req) + + if respCode != types.Success { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: respCode, + Message: message, + }, + }, errors.New("failed to validate IP configs request") + } + ipConfigsResp, err := defaultHandler(ctx, req) + // If the pod is not v2, return the response from the handler + if !req.SecondaryInterfacesExist { + return ipConfigsResp, err + } + // If the pod is v2, get the infra IP configs from the handler first and then add the SWIFTv2 IP config + defer func() { + // Release the default IP config if there is an error + if err != nil { + _, err = failureHandler(ctx, req) + if err != nil { + logger.Errorf("failed to release default IP config : %v", err) + } + } + }() + if err != nil { + return ipConfigsResp, err + } + SWIFTv2PodIPInfos, err := k.getIPConfig(ctx, podInfo) + if err != nil { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.FailedToAllocateIPConfig, + Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), + }, + PodIPInfo: []cns.PodIpInfo{}, + }, errors.Wrapf(err, "failed to get SWIFTv2 IP config : %v", req) + } + ipConfigsResp.PodIPInfo = append(ipConfigsResp.PodIPInfo, SWIFTv2PodIPInfos...) + // Set routes for the pod + for i := range ipConfigsResp.PodIPInfo { + ipInfo := &ipConfigsResp.PodIPInfo[i] + // Backend nics doesn't need routes to be set + if ipInfo.NICType != cns.BackendNIC { + err = k.setRoutes(ipInfo) + if err != nil { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.FailedToAllocateIPConfig, + Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), + }, + PodIPInfo: []cns.PodIpInfo{}, + }, errors.Wrapf(err, "failed to set routes for pod %s", podInfo.Name()) + } + } + } + return ipConfigsResp, nil + } +} diff --git a/cns/middlewares/k8sSwiftV2_linux_test.go b/cns/middlewares/k8sSwiftV2_linux_test.go index cf55f4cce9..6a0674c066 100644 --- a/cns/middlewares/k8sSwiftV2_linux_test.go +++ b/cns/middlewares/k8sSwiftV2_linux_test.go @@ -2,7 +2,9 @@ package middlewares import ( "context" + "errors" "fmt" + "strings" "testing" "github.com/Azure/azure-container-networking/cns" @@ -11,7 +13,9 @@ import ( "github.com/Azure/azure-container-networking/cns/middlewares/mock" "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" + "k8s.io/kubernetes/pkg/kubelet" ) var ( @@ -144,7 +148,7 @@ func TestValidateMultitenantIPConfigsRequestSuccess(t *testing.T) { happyReq.OrchestratorContext = b happyReq.SecondaryInterfacesExist = false - _, respCode, err := middleware.validateIPConfigsRequest(context.TODO(), happyReq) + _, respCode, err := middleware.GetPodInfoForIPConfigsRequest(context.TODO(), happyReq) assert.Equal(t, err, "") assert.Equal(t, respCode, types.Success) assert.Equal(t, happyReq.SecondaryInterfacesExist, true) @@ -158,7 +162,7 @@ func TestValidateMultitenantIPConfigsRequestSuccess(t *testing.T) { happyReq2.OrchestratorContext = b happyReq2.SecondaryInterfacesExist = false - _, respCode, err = middleware.validateIPConfigsRequest(context.TODO(), happyReq2) + _, respCode, err = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), happyReq2) assert.Equal(t, err, "") assert.Equal(t, respCode, types.Success) assert.Equal(t, happyReq.SecondaryInterfacesExist, true) @@ -172,7 +176,7 @@ func TestValidateMultitenantIPConfigsRequestSuccess(t *testing.T) { happyReq3.OrchestratorContext = b happyReq3.SecondaryInterfacesExist = false - _, respCode, err = middleware.validateIPConfigsRequest(context.TODO(), happyReq3) + _, respCode, err = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), happyReq3) assert.Equal(t, err, "") assert.Equal(t, respCode, types.Success) assert.Equal(t, happyReq3.SecondaryInterfacesExist, false) @@ -188,7 +192,7 @@ func TestValidateMultitenantIPConfigsRequestFailure(t *testing.T) { InfraContainerID: testPod1Info.InfraContainerID(), } failReq.OrchestratorContext = []byte("invalid") - _, respCode, _ := middleware.validateIPConfigsRequest(context.TODO(), failReq) + _, respCode, _ := middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) assert.Equal(t, respCode, types.UnexpectedError) // Pod doesn't exist in cache test @@ -198,20 +202,22 @@ func TestValidateMultitenantIPConfigsRequestFailure(t *testing.T) { } b, _ := testPod2Info.OrchestratorContext() failReq.OrchestratorContext = b - _, respCode, _ = middleware.validateIPConfigsRequest(context.TODO(), failReq) + _, respCode, _ = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) assert.Equal(t, respCode, types.UnexpectedError) // Failed to get MTPNC b, _ = testPod3Info.OrchestratorContext() failReq.OrchestratorContext = b - _, respCode, _ = middleware.validateIPConfigsRequest(context.TODO(), failReq) + _, respCode, msg := middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) assert.Equal(t, respCode, types.UnexpectedError) + assert.Assert(t, strings.Contains(msg, kubelet.NetworkNotReadyErrorMsg), "expected error message to contain '%s', got '%s'", kubelet.NetworkNotReadyErrorMsg, msg) // MTPNC not ready b, _ = testPod4Info.OrchestratorContext() failReq.OrchestratorContext = b - _, respCode, _ = middleware.validateIPConfigsRequest(context.TODO(), failReq) + _, respCode, msg = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) assert.Equal(t, respCode, types.UnexpectedError) + assert.Assert(t, strings.Contains(msg, kubelet.NetworkNotReadyErrorMsg), "expected error message to contain '%s', got '%s'", kubelet.NetworkNotReadyErrorMsg, msg) } func TestGetSWIFTv2IPConfigSuccess(t *testing.T) { @@ -235,11 +241,13 @@ func TestGetSWIFTv2IPConfigFailure(t *testing.T) { // Pod's MTPNC doesn't exist in cache test _, err := middleware.getIPConfig(context.TODO(), testPod3Info) - assert.ErrorContains(t, err, mock.ErrMTPNCNotFound.Error()) + assert.Assert(t, strings.Contains(err.Error(), errGetMTPNC.Error()), "expected error to wrap errMTPNCNotFound, got: %v", err) + assert.ErrorContains(t, err, kubelet.NetworkNotReadyErrorMsg) // Pod's MTPNC is not ready test _, err = middleware.getIPConfig(context.TODO(), testPod4Info) - assert.Error(t, err, errMTPNCNotReady.Error()) + assert.Assert(t, errors.Is(err, errMTPNCNotReady), "expected error to wrap errMTPNCNotReady, got: %v", err) + assert.ErrorContains(t, err, kubelet.NetworkNotReadyErrorMsg) } func TestSetRoutesSuccess(t *testing.T) { @@ -342,10 +350,10 @@ func TestSetRoutesSuccess(t *testing.T) { } else { assert.Equal(t, ipInfo.SkipDefaultRoutes, false) } - } + for i := range podIPInfo { - assert.DeepEqual(t, podIPInfo[i].Routes, desiredPodIPInfo[i].Routes) + cmp.Equal(podIPInfo[i].Routes, desiredPodIPInfo[i].Routes) } } @@ -378,9 +386,10 @@ func TestSetRoutesFailure(t *testing.T) { } func TestAddRoutes(t *testing.T) { + middleware := K8sSWIFTv2Middleware{Cli: mock.NewClient()} cidrs := []string{"10.0.0.0/24", "20.0.0.0/24"} gatewayIP := "192.168.1.1" - routes := addRoutes(cidrs, gatewayIP) + routes := middleware.AddRoutes(cidrs, gatewayIP) expectedRoutes := []cns.Route{ { IPAddress: "10.0.0.0/24", @@ -444,8 +453,6 @@ func TestGetSWIFTv2IPConfigMultiInterfaceSuccess(t *testing.T) { switch ipInfo.NICType { case cns.DelegatedVMNIC: assert.Equal(t, ipInfo.NICType, cns.DelegatedVMNIC) - case cns.NodeNetworkInterfaceAccelnetFrontendNIC: - assert.Equal(t, ipInfo.NICType, cns.NodeNetworkInterfaceAccelnetFrontendNIC) case cns.NodeNetworkInterfaceBackendNIC: assert.Equal(t, ipInfo.NICType, cns.NodeNetworkInterfaceBackendNIC) case cns.InfraNIC: diff --git a/cns/middlewares/k8sSwiftV2_windows.go b/cns/middlewares/k8sSwiftV2_windows.go index a2b34cd467..1dda583bd1 100644 --- a/cns/middlewares/k8sSwiftV2_windows.go +++ b/cns/middlewares/k8sSwiftV2_windows.go @@ -1,18 +1,47 @@ package middlewares import ( + "context" + "encoding/json" + "fmt" + "net/netip" + "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/middlewares/utils" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/Azure/azure-container-networking/network/policy" "github.com/pkg/errors" ) +var defaultDenyEgressPolicy policy.Policy = mustGetEndpointPolicy(cns.DirectionTypeOut) + +var defaultDenyIngressPolicy policy.Policy = mustGetEndpointPolicy(cns.DirectionTypeIn) + +const ( + defaultGateway = "0.0.0.0" +) + // for AKS L1VH, do not set default route on infraNIC to avoid customer pod reaching all infra vnet services // default route is set for secondary interface NIC(i.e,delegatedNIC) func (k *K8sSWIFTv2Middleware) setRoutes(podIPInfo *cns.PodIpInfo) error { if podIPInfo.NICType == cns.InfraNIC { - logger.Printf("[SWIFTv2Middleware] skip setting default route on InfraNIC interface") + // as a workaround, HNS will not set this dummy default route(0.0.0.0/0, nexthop:0.0.0.0) on infraVnet interface eth0 + // the only usage for this dummy default is to bypass HNS setting default route on eth0 + // TODO: Remove this once HNS fix is ready + route := cns.Route{ + IPAddress: "0.0.0.0/0", + GatewayIPAddress: defaultGateway, + } + podIPInfo.Routes = append(podIPInfo.Routes, route) + + // Windows CNS middleware sets the infra routes(infravnet and service cidrs) to infraNIC interface for the podIPInfo used in SWIFT V2 Windows scenario + infraRoutes, err := k.getInfraRoutes(podIPInfo) + if err != nil { + return errors.Wrap(err, "failed to set routes for infraNIC interface") + } + podIPInfo.Routes = append(podIPInfo.Routes, infraRoutes...) podIPInfo.SkipDefaultRoutes = true } return nil @@ -42,3 +71,175 @@ func (k *K8sSWIFTv2Middleware) assignSubnetPrefixLengthFields(podIPInfo *cns.Pod } return nil } + +// add default route with gateway IP to podIPInfo +func (k *K8sSWIFTv2Middleware) addDefaultRoute(podIPInfo *cns.PodIpInfo, gwIP string) { + route := cns.Route{ + IPAddress: "0.0.0.0/0", + GatewayIPAddress: gwIP, + } + podIPInfo.Routes = append(podIPInfo.Routes, route) +} + +func mustGetEndpointPolicy(direction string) policy.Policy { + endpointPolicy, err := getEndpointPolicy(direction) + if err != nil { + panic(err) + } + return endpointPolicy +} + +// get policy of type endpoint policy given the params +func getEndpointPolicy(direction string) (policy.Policy, error) { + endpointPolicy, err := createEndpointPolicy(direction) + if err != nil { + return policy.Policy{}, fmt.Errorf("error creating endpoint policy: %w", err) + } + + additionalArgs := policy.Policy{ + Type: policy.EndpointPolicy, + Data: endpointPolicy, + } + + return additionalArgs, nil +} + +// create policy given the params +func createEndpointPolicy(direction string) ([]byte, error) { + endpointPolicy := struct { + Type string `json:"Type"` + Action string `json:"Action"` + Direction string `json:"Direction"` + Priority int `json:"Priority"` + }{ + Type: string(policy.ACLPolicy), + Action: cns.ActionTypeBlock, + Direction: direction, + Priority: 10_000, + } + + rawPolicy, err := json.Marshal(endpointPolicy) + if err != nil { + return nil, fmt.Errorf("error marshalling policy to json, err is: %w", err) + } + + return rawPolicy, nil +} + +// IPConfigsRequestHandlerWrapper is the middleware function for handling SWIFT v2 IP configs requests for AKS-SWIFT. This function wrapped the default SWIFT request +// and release IP configs handlers. +func (k *K8sSWIFTv2Middleware) IPConfigsRequestHandlerWrapper(defaultHandler, failureHandler cns.IPConfigsHandlerFunc) cns.IPConfigsHandlerFunc { + return func(ctx context.Context, req cns.IPConfigsRequest) (*cns.IPConfigsResponse, error) { + podInfo, respCode, message := k.GetPodInfoForIPConfigsRequest(ctx, &req) + + if respCode != types.Success { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: respCode, + Message: message, + }, + }, errors.New("failed to validate IP configs request") + } + ipConfigsResp, err := defaultHandler(ctx, req) + // If the pod is not v2, return the response from the handler + if !req.SecondaryInterfacesExist { + return ipConfigsResp, err + } + + // Get MTPNC + mtpnc, respCode, message := k.getMTPNC(ctx, podInfo) + if respCode != types.Success { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: respCode, + Message: message, + }, + }, errors.New("failed to validate IP configs request") + } + + // GetDefaultDenyBool takes in mtpnc and returns the value of defaultDenyACLBool from it + defaultDenyACLBool := GetDefaultDenyBool(mtpnc) + + // ipConfigsResp has infra IP configs -> if defaultDenyACLbool is enabled, add the default deny endpoint policies as a property in PodIpInfo + for i := range ipConfigsResp.PodIPInfo { + ipInfo := &ipConfigsResp.PodIPInfo[i] + // there will be no pod connectivity to and from those pods + if defaultDenyACLBool && ipInfo.NICType == cns.InfraNIC { + ipInfo.EndpointPolicies = append(ipInfo.EndpointPolicies, defaultDenyEgressPolicy, defaultDenyIngressPolicy) + break + } + } + + // If the pod is v2, get the infra IP configs from the handler first and then add the SWIFTv2 IP config + defer func() { + // Release the default IP config if there is an error + if err != nil { + _, err = failureHandler(ctx, req) + if err != nil { + logger.Errorf("failed to release default IP config : %v", err) + } + } + }() + if err != nil { + return ipConfigsResp, err + } + SWIFTv2PodIPInfos, err := k.getIPConfig(ctx, podInfo) + if err != nil { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.FailedToAllocateIPConfig, + Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), + }, + PodIPInfo: []cns.PodIpInfo{}, + }, errors.Wrapf(err, "failed to get SWIFTv2 IP config : %v", req) + } + ipConfigsResp.PodIPInfo = append(ipConfigsResp.PodIPInfo, SWIFTv2PodIPInfos...) + // Set routes for the pod + for i := range ipConfigsResp.PodIPInfo { + ipInfo := &ipConfigsResp.PodIPInfo[i] + // Backend nics doesn't need routes to be set + if ipInfo.NICType != cns.BackendNIC { + err = k.setRoutes(ipInfo) + if err != nil { + return &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.FailedToAllocateIPConfig, + Message: fmt.Sprintf("AllocateIPConfig failed: %v, IP config request is %v", err, req), + }, + PodIPInfo: []cns.PodIpInfo{}, + }, errors.Wrapf(err, "failed to set routes for pod %s", podInfo.Name()) + } + } + } + return ipConfigsResp, nil + } +} + +func GetDefaultDenyBool(mtpnc v1alpha1.MultitenantPodNetworkConfig) bool { + // returns the value of DefaultDenyACL from mtpnc + return mtpnc.Status.DefaultDenyACL +} + +// getInfraRoutes() returns the infra routes including infravnet/ and service cidrs for the podIPInfo used in SWIFT V2 Windows scenario +// Windows uses default route 0.0.0.0 as the gateway IP for containerd to configure; +// For example, containerd would set route like: ip route 10.0.0.0/16 via 0.0.0.0 dev eth0 +func (k *K8sSWIFTv2Middleware) getInfraRoutes(podIPInfo *cns.PodIpInfo) ([]cns.Route, error) { + var routes []cns.Route + + ip, err := netip.ParseAddr(podIPInfo.PodIPConfig.IPAddress) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse podIPConfig IP address %s", podIPInfo.PodIPConfig.IPAddress) + } + + // TODO: add ipv6 when supported + v4IPs, _, err := k.GetInfravnetAndServiceCidrs() + if err != nil { + return nil, errors.Wrap(err, "failed to get infravnet and service CIDRs") + } + + if ip.Is4() { + routes = append(routes, k.AddRoutes(v4IPs, defaultGateway)...) + } + + return routes, nil +} diff --git a/cns/middlewares/k8sSwiftV2_windows_test.go b/cns/middlewares/k8sSwiftV2_windows_test.go index 945d650ae9..bc5464cbb9 100644 --- a/cns/middlewares/k8sSwiftV2_windows_test.go +++ b/cns/middlewares/k8sSwiftV2_windows_test.go @@ -1,21 +1,31 @@ package middlewares import ( + "encoding/json" + "fmt" + "reflect" "testing" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/configuration" "github.com/Azure/azure-container-networking/cns/middlewares/mock" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/Azure/azure-container-networking/network/policy" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" "gotest.tools/v3/assert" ) func TestSetRoutesSuccess(t *testing.T) { middleware := K8sSWIFTv2Middleware{Cli: mock.NewClient()} + t.Setenv(configuration.EnvServiceCIDRs, "10.0.0.0/16") + t.Setenv(configuration.EnvInfraVNETCIDRs, "10.240.0.10/16") + t.Setenv(configuration.EnvPodCIDRs, "10.1.0.10/24") // make sure windows swiftv2 does not set pod cidr route podIPInfo := []cns.PodIpInfo{ { PodIPConfig: cns.IPSubnet{ - IPAddress: "10.0.1.10", + IPAddress: "10.0.1.100", PrefixLength: 32, }, NICType: cns.InfraNIC, @@ -29,6 +39,30 @@ func TestSetRoutesSuccess(t *testing.T) { MacAddress: "12:34:56:78:9a:bc", }, } + + desiredPodIPInfo := []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "10.0.1.100", + PrefixLength: 32, + }, + NICType: cns.InfraNIC, + Routes: []cns.Route{ + { + IPAddress: "10.0.0.0/16", + GatewayIPAddress: "0.0.0.0", + }, + { + IPAddress: "10.240.0.10/16", + GatewayIPAddress: "0.0.0.0", + }, + { + IPAddress: "0.0.0.0/0", + GatewayIPAddress: "0.0.0.0", + }, + }, + }, + } for i := range podIPInfo { ipInfo := &podIPInfo[i] err := middleware.setRoutes(ipInfo) @@ -39,6 +73,9 @@ func TestSetRoutesSuccess(t *testing.T) { assert.Equal(t, ipInfo.SkipDefaultRoutes, false) } } + + // check if the routes are set as expected + reflect.DeepEqual(podIPInfo[0].Routes, desiredPodIPInfo[0].Routes) } func TestAssignSubnetPrefixSuccess(t *testing.T) { @@ -66,3 +103,119 @@ func TestAssignSubnetPrefixSuccess(t *testing.T) { assert.Equal(t, ipInfo.HostPrimaryIPInfo.Gateway, intInfo.GatewayIP) assert.Equal(t, ipInfo.HostPrimaryIPInfo.Subnet, intInfo.SubnetAddressSpace) } + +func TestAddDefaultRoute(t *testing.T) { + middleware := K8sSWIFTv2Middleware{Cli: mock.NewClient()} + + podIPInfo := cns.PodIpInfo{ + PodIPConfig: cns.IPSubnet{ + IPAddress: "20.240.1.242", + PrefixLength: 32, + }, + NICType: cns.DelegatedVMNIC, + MacAddress: "12:34:56:78:9a:bc", + } + + gatewayIP := "20.240.1.1" + intInfo := v1alpha1.InterfaceInfo{ + GatewayIP: gatewayIP, + SubnetAddressSpace: "20.240.1.0/16", + } + + ipInfo := podIPInfo + middleware.addDefaultRoute(&ipInfo, intInfo.GatewayIP) + + expectedRoutes := []cns.Route{ + { + IPAddress: "0.0.0.0/0", + GatewayIPAddress: gatewayIP, + }, + } + + if !reflect.DeepEqual(ipInfo.Routes, expectedRoutes) { + t.Errorf("got '%+v', expected '%+v'", ipInfo.Routes, expectedRoutes) + } +} + +func TestAddDefaultDenyACL(t *testing.T) { + const policyType = "ACL" + const action = "Block" + const ingressDir = "In" + const egressDir = "Out" + const priority = 10000 + + valueIn := []byte(fmt.Sprintf(`{ + "Type": "%s", + "Action": "%s", + "Direction": "%s", + "Priority": %d + }`, + policyType, + action, + ingressDir, + priority, + )) + + valueOut := []byte(fmt.Sprintf(`{ + "Type": "%s", + "Action": "%s", + "Direction": "%s", + "Priority": %d + }`, + policyType, + action, + egressDir, + priority, + )) + + expectedDefaultDenyEndpoint := []policy.Policy{ + { + Type: policy.EndpointPolicy, + Data: valueOut, + }, + { + Type: policy.EndpointPolicy, + Data: valueIn, + }, + } + var allEndpoints []policy.Policy + var defaultDenyEgressPolicy, defaultDenyIngressPolicy policy.Policy + var err error + + defaultDenyEgressPolicy = mustGetEndpointPolicy("Out") + defaultDenyIngressPolicy = mustGetEndpointPolicy("In") + + allEndpoints = append(allEndpoints, defaultDenyEgressPolicy, defaultDenyIngressPolicy) + + // Normalize both slices so there is no extra spacing, new lines, etc + normalizedExpected := normalizeKVPairs(t, expectedDefaultDenyEndpoint) + normalizedActual := normalizeKVPairs(t, allEndpoints) + if !cmp.Equal(normalizedExpected, normalizedActual) { + t.Error("received policy differs from expectation: diff", cmp.Diff(normalizedExpected, normalizedActual)) + } + assert.Equal(t, err, nil) +} + +// normalizeKVPairs normalizes the JSON values in the KV pairs by unmarshaling them into a map, then marshaling them back to compact JSON to remove any extra space, new lines, etc +func normalizeKVPairs(t *testing.T, policies []policy.Policy) []policy.Policy { + normalized := make([]policy.Policy, len(policies)) + + for i, kv := range policies { + var unmarshaledValue map[string]interface{} + // Unmarshal the Value into a map + err := json.Unmarshal(kv.Data, &unmarshaledValue) + require.NoError(t, err, "Failed to unmarshal JSON value") + + // Marshal it back to compact JSON + normalizedValue, err := json.Marshal(unmarshaledValue) + require.NoError(t, err, "Failed to re-marshal JSON value") + + // Replace Value with the normalized compact JSON + normalized[i] = policy.Policy{ + Type: policy.EndpointPolicy, + Data: normalizedValue, + } + } + + return normalized +} diff --git a/cns/middlewares/standaloneSwiftV2.go b/cns/middlewares/standaloneSwiftV2.go index f8392690ca..7d83166aaf 100644 --- a/cns/middlewares/standaloneSwiftV2.go +++ b/cns/middlewares/standaloneSwiftV2.go @@ -4,7 +4,6 @@ import ( "context" "github.com/Azure/azure-container-networking/cns" - "github.com/Azure/azure-container-networking/cns/types" "github.com/pkg/errors" ) @@ -16,8 +15,7 @@ func (m *StandaloneSWIFTv2Middleware) IPConfigsRequestHandlerWrapper(ipRequestHa return func(ctx context.Context, req cns.IPConfigsRequest) (*cns.IPConfigsResponse, error) { ipConfigsResp, err := ipRequestHandler(ctx, req) if err != nil { - ipConfigsResp.Response.ReturnCode = types.UnexpectedError - return ipConfigsResp, errors.Wrapf(err, "Failed to requestIPConfigs for Standalone-SwiftV2 from IPConfigsRequest %v", req) + return ipConfigsResp, errors.Wrapf(err, "Failed to requestIPConfigs for Standalone SwiftV2 from IPConfigsRequest %+v", req) } return ipConfigsResp, nil diff --git a/cns/nodesubnet/initialization.go b/cns/nodesubnet/initialization.go new file mode 100644 index 0000000000..53b2666682 --- /dev/null +++ b/cns/nodesubnet/initialization.go @@ -0,0 +1,36 @@ +package nodesubnet + +import ( + "context" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/logger" + cnstypes "github.com/Azure/azure-container-networking/cns/types" + "github.com/pkg/errors" + "golang.org/x/exp/maps" +) + +type ipamReconciler interface { + ReconcileIPAMStateForNodeSubnet(ncRequests []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo) cnstypes.ResponseCode +} + +func ReconcileInitialCNSState(_ context.Context, ipamReconciler ipamReconciler, podInfoByIPProvider cns.PodInfoByIPProvider) (int, error) { + // Get previous PodInfo state from podInfoByIPProvider + podInfoByIP, err := podInfoByIPProvider.PodInfoByIP() + if err != nil { + return 0, errors.Wrap(err, "provider failed to provide PodInfoByIP") + } + + logger.Printf("Reconciling initial CNS state with %d IPs", len(podInfoByIP)) + + // Create a network container request that holds all the IPs from PodInfoByIP + secondaryIPs := maps.Keys(podInfoByIP) + ncRequest := CreateNodeSubnetNCRequest(secondaryIPs) + responseCode := ipamReconciler.ReconcileIPAMStateForNodeSubnet([]*cns.CreateNetworkContainerRequest{ncRequest}, podInfoByIP) + + if responseCode != cnstypes.Success { + return 0, errors.Errorf("failed to reconcile initial CNS state: %d", responseCode) + } + + return len(secondaryIPs), nil +} diff --git a/cns/nodesubnet/initialization_test.go b/cns/nodesubnet/initialization_test.go new file mode 100644 index 0000000000..bfc0a75398 --- /dev/null +++ b/cns/nodesubnet/initialization_test.go @@ -0,0 +1,114 @@ +package nodesubnet_test + +import ( + "context" + "net" + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/nodesubnet" + "github.com/Azure/azure-container-networking/cns/restserver" + podprovider "github.com/Azure/azure-container-networking/cns/stateprovider/cns" + "github.com/Azure/azure-container-networking/cns/types" + "github.com/Azure/azure-container-networking/store" +) + +func getMockStore() store.KeyValueStore { + mockStore := store.NewMockStore("") + endpointState := map[string]*restserver.EndpointInfo{ + "12e65d89e58cb23c784e97840cf76866bfc9902089bdc8e87e9f64032e312b0b": { + PodName: "coredns-54b69f46b8-ldmwr", + PodNamespace: "kube-system", + IfnameToIPMap: map[string]*restserver.IPInfo{ + "eth0": { + IPv4: []net.IPNet{ + { + IP: net.IPv4(10, 10, 0, 52), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + }, + }, + "1fc5176913a3a1a7facfb823dde3b4ded404041134fef4f4a0c8bba140fc0413": { + PodName: "load-test-7f7d49687d-wxc9p", + PodNamespace: "load-test", + IfnameToIPMap: map[string]*restserver.IPInfo{ + "eth0": { + IPv4: []net.IPNet{ + { + IP: net.IPv4(10, 10, 0, 63), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + }, + }, + } + + err := mockStore.Write(restserver.EndpointStoreKey, endpointState) + if err != nil { + return nil + } + return mockStore +} + +type MockIpamStateReconciler struct{} + +func (m *MockIpamStateReconciler) ReconcileIPAMStateForNodeSubnet(ncRequests []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo) types.ResponseCode { + if len(ncRequests) == 1 && len(ncRequests[0].SecondaryIPConfigs) == len(podInfoByIP) { + return types.Success + } + + return types.UnexpectedError +} + +func TestNewCNSPodInfoProvider(t *testing.T) { + tests := []struct { + name string + store store.KeyValueStore + wantErr bool + reconciler *MockIpamStateReconciler + exp int + }{ + { + name: "happy_path", + store: getMockStore(), + wantErr: false, + reconciler: &MockIpamStateReconciler{}, + exp: 2, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := testContext(t) + defer cancel() + + podInfoByIPProvider, err := podprovider.New(tt.store) + checkErr(t, err, false) + + got, err := nodesubnet.ReconcileInitialCNSState(ctx, tt.reconciler, podInfoByIPProvider) + checkErr(t, err, tt.wantErr) + if got != tt.exp { + t.Errorf("got %d IPs reconciled, expected %d", got, tt.exp) + } + }) + } +} + +// testContext creates a context from the provided testing.T that will be +// canceled if the test suite is terminated. +func testContext(t *testing.T) (context.Context, context.CancelFunc) { + if deadline, ok := t.Deadline(); ok { + return context.WithDeadline(context.Background(), deadline) + } + return context.WithCancel(context.Background()) +} + +func init() { + logger.InitLogger("testlogs", 0, 0, "./") +} diff --git a/cns/nodesubnet/ip_fetcher.go b/cns/nodesubnet/ip_fetcher.go new file mode 100644 index 0000000000..7457d529e8 --- /dev/null +++ b/cns/nodesubnet/ip_fetcher.go @@ -0,0 +1,122 @@ +package nodesubnet + +import ( + "context" + "log" + "net/netip" + "time" + + "github.com/Azure/azure-container-networking/nmagent" + "github.com/Azure/azure-container-networking/refresh" + "github.com/pkg/errors" +) + +const ( + // Default minimum time between secondary IP fetches + DefaultMinRefreshInterval = 4 * time.Second + // Default maximum time between secondary IP fetches + DefaultMaxRefreshInterval = 1024 * time.Second +) + +var ErrRefreshSkipped = errors.New("refresh skipped due to throttling") + +// InterfaceRetriever is an interface is implemented by the NMAgent Client, and also a mock client for testing. +type InterfaceRetriever interface { + GetInterfaceIPInfo(ctx context.Context) (nmagent.Interfaces, error) +} + +// IPConsumer is an interface implemented by whoever consumes the secondary IPs fetched in nodesubnet +type IPConsumer interface { + UpdateIPsForNodeSubnet([]netip.Addr) error +} + +// IPFetcher fetches secondary IPs from NMAgent at regular intervals. The +// interval will vary within the range of minRefreshInterval and +// maxRefreshInterval. When no diff is observed after a fetch, the interval +// doubles (subject to the maximum interval). When a diff is observed, the +// interval resets to the minimum. +type IPFetcher struct { + // Node subnet config + intfFetcherClient InterfaceRetriever + consumer IPConsumer + fetcher *refresh.Fetcher[nmagent.Interfaces] +} + +// NewIPFetcher creates a new IPFetcher. If minInterval is 0, it will default to 4 seconds. +// If maxInterval is 0, it will default to 1024 seconds (or minInterval, if it is higher). +func NewIPFetcher( + client InterfaceRetriever, + consumer IPConsumer, + minInterval time.Duration, + maxInterval time.Duration, + logger refresh.Logger, +) *IPFetcher { + if minInterval == 0 { + minInterval = DefaultMinRefreshInterval + } + + if maxInterval == 0 { + maxInterval = DefaultMaxRefreshInterval + } + + maxInterval = max(maxInterval, minInterval) + + newIPFetcher := &IPFetcher{ + intfFetcherClient: client, + consumer: consumer, + fetcher: nil, + } + fetcher := refresh.NewFetcher[nmagent.Interfaces](client.GetInterfaceIPInfo, minInterval, maxInterval, newIPFetcher.ProcessInterfaces, logger) + newIPFetcher.fetcher = fetcher + return newIPFetcher +} + +// Start the IPFetcher. +func (c *IPFetcher) Start(ctx context.Context) { + c.fetcher.Start(ctx) +} + +// Fetch IPs from NMAgent and pass to the consumer +func (c *IPFetcher) ProcessInterfaces(response nmagent.Interfaces) error { + if len(response.Entries) == 0 { + return errors.New("no interfaces found in response from NMAgent") + } + + _, secondaryIPs := flattenIPListFromResponse(&response) + err := c.consumer.UpdateIPsForNodeSubnet(secondaryIPs) + if err != nil { + return errors.Wrap(err, "updating secondary IPs") + } + + return nil +} + +// Get the list of secondary IPs from fetched Interfaces +func flattenIPListFromResponse(resp *nmagent.Interfaces) (primary netip.Addr, secondaryIPs []netip.Addr) { + var primaryIP netip.Addr + // For each interface... + for _, intf := range resp.Entries { + if !intf.IsPrimary { + continue + } + + // For each subnet on the interface... + for _, s := range intf.InterfaceSubnets { + addressCount := 0 + // For each address in the subnet... + for _, a := range s.IPAddress { + // Primary addresses are reserved for the host. + if a.IsPrimary { + primaryIP = netip.Addr(a.Address) + continue + } + + secondaryIPs = append(secondaryIPs, netip.Addr(a.Address)) + addressCount++ + } + log.Printf("Got %d addresses from subnet %s", addressCount, s.Prefix) + } + } + + return primaryIP, secondaryIPs +} diff --git a/cns/nodesubnet/ip_fetcher_test.go b/cns/nodesubnet/ip_fetcher_test.go new file mode 100644 index 0000000000..b981fd552b --- /dev/null +++ b/cns/nodesubnet/ip_fetcher_test.go @@ -0,0 +1,117 @@ +package nodesubnet_test + +import ( + "context" + "net/netip" + "testing" + + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/nodesubnet" + "github.com/Azure/azure-container-networking/nmagent" +) + +// Mock client that simply consumes fetched IPs +type TestConsumer struct { + consumeCount int + secondaryIPCount int +} + +// FetchConsumeCount atomically fetches the consume count +func (c *TestConsumer) FetchConsumeCount() int { + return c.consumeCount +} + +// FetchSecondaryIPCount atomically fetches the last IP count +func (c *TestConsumer) FetchSecondaryIPCount() int { + return c.consumeCount +} + +// UpdateConsumeCount atomically updates the consume count +func (c *TestConsumer) updateCounts(ipCount int) { + c.consumeCount++ + c.secondaryIPCount = ipCount +} + +// Mock IP update +func (c *TestConsumer) UpdateIPsForNodeSubnet(ips []netip.Addr) error { + c.updateCounts(len(ips)) + return nil +} + +var _ nodesubnet.IPConsumer = &TestConsumer{} + +// Mock client that simply satisfies the interface +type TestClient struct{} + +// Mock refresh +func (c *TestClient) GetInterfaceIPInfo(_ context.Context) (nmagent.Interfaces, error) { + return nmagent.Interfaces{}, nil +} + +func TestEmptyResponse(t *testing.T) { + consumerPtr := &TestConsumer{} + fetcher := nodesubnet.NewIPFetcher(&TestClient{}, consumerPtr, 0, 0, logger.Log) + err := fetcher.ProcessInterfaces(nmagent.Interfaces{}) + checkErr(t, err, true) + + // No consumes, since the responses are empty + if consumerPtr.FetchConsumeCount() > 0 { + t.Error("Consume called unexpectedly, shouldn't be called since responses are empty") + } +} + +func TestFlatten(t *testing.T) { + interfaces := nmagent.Interfaces{ + Entries: []nmagent.Interface{ + { + MacAddress: nmagent.MACAddress{0x00, 0x0D, 0x3A, 0xF9, 0xDC, 0xA6}, + IsPrimary: true, + InterfaceSubnets: []nmagent.InterfaceSubnet{ + { + Prefix: "10.240.0.0/16", + IPAddress: []nmagent.NodeIP{ + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 5})), + IsPrimary: true, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 6})), + IsPrimary: false, + }, + }, + }, + }, + }, + }, + } + consumerPtr := &TestConsumer{} + fetcher := nodesubnet.NewIPFetcher(&TestClient{}, consumerPtr, 0, 0, logger.Log) + err := fetcher.ProcessInterfaces(interfaces) + checkErr(t, err, false) + + // 1 consume to be called + if consumerPtr.FetchConsumeCount() != 1 { + t.Error("Consume expected to be called, but not called") + } + + // 1 consume to be called + if consumerPtr.FetchSecondaryIPCount() != 1 { + t.Error("Wrong number of secondary IPs ", consumerPtr.FetchSecondaryIPCount()) + } +} + +// checkErr is an assertion of the presence or absence of an error +func checkErr(t *testing.T, err error, shouldErr bool) { + t.Helper() + if err != nil && !shouldErr { + t.Fatal("unexpected error: err:", err) + } + + if err == nil && shouldErr { + t.Fatal("expected error but received none") + } +} + +func init() { + logger.InitLogger("testlogs", 0, 0, "./") +} diff --git a/cns/nodesubnet/nodesubnet_nc.go b/cns/nodesubnet/nodesubnet_nc.go new file mode 100644 index 0000000000..608bb6d986 --- /dev/null +++ b/cns/nodesubnet/nodesubnet_nc.go @@ -0,0 +1,40 @@ +package nodesubnet + +import ( + "strconv" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" +) + +const ( + // ID for fake NC that we create to store NodeSubnet IPS + NodeSubnetNCID = "55022629-3854-499b-7133-5e6887959f4ea" // md5sum of "NodeSubnetNC_IPv4" + NodeSubnetNCVersion = 0 + NodeSubnetHostVersion = "0" + NodeSubnetNCStatus = v1alpha.NCUpdateSuccess + NodeSubnetHostPrimaryIP = "" +) + +// CreateNodeSubnetNCRequest generates a CreateNetworkContainerRequest that simply stores the static secondary IPs. +func CreateNodeSubnetNCRequest(secondaryIPs []string) *cns.CreateNetworkContainerRequest { + secondaryIPConfigs := map[string]cns.SecondaryIPConfig{} + + for _, secondaryIP := range secondaryIPs { + // iterate through all secondary IP addresses add them to the request as secondary IPConfigs. + secondaryIPConfigs[secondaryIP] = cns.SecondaryIPConfig{ + IPAddress: secondaryIP, + NCVersion: NodeSubnetNCVersion, + } + } + + return &cns.CreateNetworkContainerRequest{ + HostPrimaryIP: NodeSubnetHostPrimaryIP, + SecondaryIPConfigs: secondaryIPConfigs, + NetworkContainerid: NodeSubnetNCID, + NetworkContainerType: cns.Docker, // Using docker as the NC type for NodeSubnet to match Swift. (The NC is not real) + Version: strconv.FormatInt(NodeSubnetNCVersion, 10), //nolint:gomnd // it's decimal + IPConfiguration: cns.IPConfiguration{}, + NCStatus: NodeSubnetNCStatus, + } +} diff --git a/cns/nodesubnet/nodesubnet_nc_test.go b/cns/nodesubnet/nodesubnet_nc_test.go new file mode 100644 index 0000000000..20636c8cef --- /dev/null +++ b/cns/nodesubnet/nodesubnet_nc_test.go @@ -0,0 +1,54 @@ +package nodesubnet_test + +import ( + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/nodesubnet" + "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" + "github.com/google/go-cmp/cmp" +) + +func TestCreateNodeSubnetNCRequest_EmptySecondaryIPs(t *testing.T) { + secondaryIPs := []string{} + expectedRequest := &cns.CreateNetworkContainerRequest{ + HostPrimaryIP: nodesubnet.NodeSubnetHostPrimaryIP, + SecondaryIPConfigs: map[string]cns.SecondaryIPConfig{}, + NetworkContainerid: nodesubnet.NodeSubnetNCID, + NetworkContainerType: cns.Docker, + Version: "0", + IPConfiguration: cns.IPConfiguration{}, + NCStatus: v1alpha.NCUpdateSuccess, + } + + request := nodesubnet.CreateNodeSubnetNCRequest(secondaryIPs) + if !cmp.Equal(request, expectedRequest) { + t.Errorf("Unexepected diff in NodeSubnetNCRequest: %v", cmp.Diff(request, expectedRequest)) + } +} + +func TestCreateNodeSubnetNCRequest_NonEmptySecondaryIPs(t *testing.T) { + secondaryIPs := []string{"10.0.0.1", "10.0.0.2"} + expectedRequest := &cns.CreateNetworkContainerRequest{ + HostPrimaryIP: nodesubnet.NodeSubnetHostPrimaryIP, + SecondaryIPConfigs: map[string]cns.SecondaryIPConfig{ + "10.0.0.1": {IPAddress: "10.0.0.1", NCVersion: nodesubnet.NodeSubnetNCVersion}, + "10.0.0.2": {IPAddress: "10.0.0.2", NCVersion: nodesubnet.NodeSubnetNCVersion}, + }, + NetworkContainerid: nodesubnet.NodeSubnetNCID, + NetworkContainerType: cns.Docker, + Version: "0", + IPConfiguration: cns.IPConfiguration{}, + NCStatus: v1alpha.NCUpdateSuccess, + } + + request := nodesubnet.CreateNodeSubnetNCRequest(secondaryIPs) + if !cmp.Equal(request, expectedRequest) { + t.Errorf("Unexepected diff in NodeSubnetNCRequest: %v", cmp.Diff(request, expectedRequest)) + } +} + +func init() { + logger.InitLogger("testlogs", 0, 0, "./") +} diff --git a/cns/restserver/api.go b/cns/restserver/api.go index a3e38fdc69..4f46c0f65f 100644 --- a/cns/restserver/api.go +++ b/cns/restserver/api.go @@ -4,11 +4,11 @@ package restserver import ( + "bytes" "context" "encoding/json" "fmt" "io" - "net" "net/http" "net/url" "regexp" @@ -21,6 +21,7 @@ import ( "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/cns/wireserver" "github.com/Azure/azure-container-networking/common" + "github.com/Azure/azure-container-networking/nmagent" "github.com/pkg/errors" ) @@ -335,161 +336,6 @@ func (service *HTTPRestService) deleteHnsNetwork(w http.ResponseWriter, r *http. logger.Response(service.Name, resp, resp.ReturnCode, err) } -// Handles ip reservation requests. -func (service *HTTPRestService) reserveIPAddress(w http.ResponseWriter, r *http.Request) { - logger.Printf("[Azure CNS] reserveIPAddress") - - var req cns.ReserveIPAddressRequest - var returnCode types.ResponseCode - returnMessage := "" - addr := "" - address := "" - err := common.Decode(w, r, &req) - - logger.Request(service.Name, &req, err) - - if err != nil { - return - } - - if req.ReservationID == "" { - returnCode = types.ReservationNotFound - returnMessage = "[Azure CNS] Error. ReservationId is empty" - } - - switch r.Method { - case http.MethodPost: - ic := service.ipamClient - - var ifInfo *wireserver.InterfaceInfo - ifInfo, err = service.getPrimaryHostInterface(context.TODO()) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPrimaryIfaceInfo failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - asID, err := ic.GetAddressSpace() - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetAddressSpace failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - poolID, err := ic.GetPoolID(asID, ifInfo.Subnet) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPoolID failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - addr, err = ic.ReserveIPAddress(poolID, req.ReservationID) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] ReserveIpAddress failed with %+v", err.Error()) - returnCode = types.AddressUnavailable - break - } - - addressIP, _, err := net.ParseCIDR(addr) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] ParseCIDR failed with %+v", err.Error()) - returnCode = types.UnexpectedError - break - } - address = addressIP.String() - - default: - returnMessage = "[Azure CNS] Error. ReserveIP did not receive a POST." - returnCode = types.InvalidParameter - - } - - resp := cns.Response{ - ReturnCode: returnCode, - Message: returnMessage, - } - - if resp.ReturnCode == 0 { - // If Response is success i.e. code 0, then publish metrics. - publishIPStateMetrics(service.buildIPState()) - } - - reserveResp := &cns.ReserveIPAddressResponse{Response: resp, IPAddress: address} - err = common.Encode(w, &reserveResp) - logger.Response(service.Name, reserveResp, resp.ReturnCode, err) -} - -// Handles release ip reservation requests. -func (service *HTTPRestService) releaseIPAddress(w http.ResponseWriter, r *http.Request) { - logger.Printf("[Azure CNS] releaseIPAddress") - - var req cns.ReleaseIPAddressRequest - var returnCode types.ResponseCode - returnMessage := "" - - err := common.Decode(w, r, &req) - logger.Request(service.Name, &req, err) - - if err != nil { - return - } - - if req.ReservationID == "" { - returnCode = types.ReservationNotFound - returnMessage = "[Azure CNS] Error. ReservationId is empty" - } - - switch r.Method { - case http.MethodPost: - ic := service.ipamClient - - var ifInfo *wireserver.InterfaceInfo - ifInfo, err = service.getPrimaryHostInterface(context.TODO()) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPrimaryIfaceInfo failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - asID, err := ic.GetAddressSpace() - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetAddressSpace failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - poolID, err := ic.GetPoolID(asID, ifInfo.Subnet) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPoolID failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - err = ic.ReleaseIPAddress(poolID, req.ReservationID) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] ReleaseIpAddress failed with %+v", err.Error()) - returnCode = types.ReservationNotFound - } - - default: - returnMessage = "[Azure CNS] Error. ReleaseIP did not receive a POST." - returnCode = types.InvalidParameter - } - - resp := cns.Response{ - ReturnCode: returnCode, - Message: returnMessage, - } - - if resp.ReturnCode == 0 { - // If Response is success i.e. code 0, then publish metrics. - publishIPStateMetrics(service.buildIPState()) - } - - err = common.Encode(w, &resp) - logger.Response(service.Name, resp, resp.ReturnCode, err) -} - // Retrieves the host local ip address. Containers can talk to host using this IP address. func (service *HTTPRestService) getHostLocalIP(w http.ResponseWriter, r *http.Request) { logger.Printf("[Azure CNS] getHostLocalIP") @@ -542,71 +388,6 @@ func (service *HTTPRestService) getHostLocalIP(w http.ResponseWriter, r *http.Re logger.Response(service.Name, hostLocalIPResponse, resp.ReturnCode, err) } -// Handles ip address utilization requests. -func (service *HTTPRestService) getIPAddressUtilization(w http.ResponseWriter, r *http.Request) { - logger.Printf("[Azure CNS] getIPAddressUtilization") - logger.Request(service.Name, "getIPAddressUtilization", nil) - - var returnCode types.ResponseCode - returnMessage := "" - capacity := 0 - available := 0 - var unhealthyAddrs []string - - switch r.Method { - case http.MethodGet: - ic := service.ipamClient - - ifInfo, err := service.getPrimaryHostInterface(context.TODO()) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPrimaryIfaceInfo failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - asID, err := ic.GetAddressSpace() - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetAddressSpace failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - poolID, err := ic.GetPoolID(asID, ifInfo.Subnet) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPoolID failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - capacity, available, unhealthyAddrs, err = ic.GetIPAddressUtilization(poolID) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetIPUtilization failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - logger.Printf("[Azure CNS] Capacity %v Available %v UnhealthyAddrs %v", capacity, available, unhealthyAddrs) - - default: - returnMessage = "[Azure CNS] Error. GetIPUtilization did not receive a GET." - returnCode = types.InvalidParameter - } - - resp := cns.Response{ - ReturnCode: returnCode, - Message: returnMessage, - } - - utilResponse := &cns.IPAddressesUtilizationResponse{ - Response: resp, - Available: available, - Reserved: capacity - available, - Unhealthy: len(unhealthyAddrs), - } - - err := common.Encode(w, &utilResponse) - logger.Response(service.Name, utilResponse, resp.ReturnCode, err) -} - // Handles retrieval of ip addresses that are available to be reserved from ipam driver. func (service *HTTPRestService) getAvailableIPAddresses(w http.ResponseWriter, r *http.Request) { logger.Printf("[Azure CNS] getAvailableIPAddresses") @@ -631,69 +412,6 @@ func (service *HTTPRestService) getReservedIPAddresses(w http.ResponseWriter, r logger.Response(service.Name, ipResp, resp.ReturnCode, err) } -// Handles retrieval of ghost ip addresses from ipam driver. -func (service *HTTPRestService) getUnhealthyIPAddresses(w http.ResponseWriter, r *http.Request) { - logger.Printf("[Azure CNS] getUnhealthyIPAddresses") - logger.Request(service.Name, "getUnhealthyIPAddresses", nil) - - var returnCode types.ResponseCode - returnMessage := "" - capacity := 0 - available := 0 - var unhealthyAddrs []string - - switch r.Method { - case http.MethodGet: - ic := service.ipamClient - - ifInfo, err := service.getPrimaryHostInterface(context.TODO()) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPrimaryIfaceInfo failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - asID, err := ic.GetAddressSpace() - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetAddressSpace failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - poolID, err := ic.GetPoolID(asID, ifInfo.Subnet) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetPoolID failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - - capacity, available, unhealthyAddrs, err = ic.GetIPAddressUtilization(poolID) - if err != nil { - returnMessage = fmt.Sprintf("[Azure CNS] Error. GetIPUtilization failed %v", err.Error()) - returnCode = types.UnexpectedError - break - } - logger.Printf("[Azure CNS] Capacity %v Available %v UnhealthyAddrs %v", capacity, available, unhealthyAddrs) - - default: - returnMessage = "[Azure CNS] Error. GetUnhealthyIP did not receive a POST." - returnCode = types.InvalidParameter - } - - resp := cns.Response{ - ReturnCode: returnCode, - Message: returnMessage, - } - - ipResp := &cns.GetIPAddressesResponse{ - Response: resp, - IPAddresses: unhealthyAddrs, - } - - err := common.Encode(w, &ipResp) - logger.Response(service.Name, ipResp, resp.ReturnCode, err) -} - // getAllIPAddresses retrieves all ip addresses from ipam driver. func (service *HTTPRestService) getAllIPAddresses(w http.ResponseWriter, r *http.Request) { logger.Printf("[Azure CNS] getAllIPAddresses") @@ -1313,7 +1031,28 @@ func (service *HTTPRestService) unpublishNetworkContainer(w http.ResponseWriter, ctx := r.Context() - if !service.isNetworkJoined(req.NetworkID) { + var unpublishBody nmagent.DeleteContainerRequest + var azrNC bool + err = json.Unmarshal(req.DeleteNetworkContainerRequestBody, &unpublishBody) + if err != nil { + // If the body contains only `""\n`, it is non-AZR NC + // In this case, we should not return an error + // However, if the body is not `""\n`, it is invalid and therefore, we must return an error + // []byte{34, 34, 10} here represents []byte(`""`+"\n") + if !bytes.Equal(req.DeleteNetworkContainerRequestBody, []byte{34, 34, 10}) { + http.Error(w, fmt.Sprintf("could not unmarshal delete network container body: %v", err), http.StatusBadRequest) + return + } + } else { + // If unmarshalling was successful, it is an AZR NC + azrNC = true + } + + /* For AZR scenarios, if NMAgent is restarted, it loses state and does not know what VNETs to subscribe to. + As it no longer has VNET state, delete nc calls would fail. We need to add join VNET call for all AZR + nc unpublish calls just like publish nc calls. + */ + if azrNC || !service.isNetworkJoined(req.NetworkID) { joinResp, err := service.wsproxy.JoinNetwork(ctx, req.NetworkID) //nolint:govet // ok to shadow if err != nil { resp := cns.UnpublishNetworkContainerResponse{ @@ -1346,7 +1085,7 @@ func (service *HTTPRestService) unpublishNetworkContainer(w http.ResponseWriter, } service.setNetworkStateJoined(req.NetworkID) - logger.Printf("[Azure-CNS] joined vnet %s during nc %s unpublish. wireserver response: %v", req.NetworkID, req.NetworkContainerID, string(joinBytes)) + logger.Printf("[Azure-CNS] joined vnet %s during nc %s unpublish. AZREnabled: %t, wireserver response: %v", req.NetworkID, req.NetworkContainerID, unpublishBody.AZREnabled, string(joinBytes)) } publishResp, err := service.wsproxy.UnpublishNC(ctx, ncParams, req.DeleteNetworkContainerRequestBody) @@ -1561,3 +1300,40 @@ func (service *HTTPRestService) getVMUniqueID(w http.ResponseWriter, r *http.Req }) } } + +// This function is used to query all NCs on a node from NMAgent +func (service *HTTPRestService) nmAgentNCListHandler(w http.ResponseWriter, r *http.Request) { + logger.Request(service.Name, "nmAgentNCListHandler", nil) + var ( + returnCode types.ResponseCode + networkContainerList []string + ) + + returnMessage := "Successfully fetched NC list from NMAgent" + ctx := r.Context() + switch r.Method { + case http.MethodGet: + ncVersionList, ncVersionerr := service.nma.GetNCVersionList(ctx) + if ncVersionerr != nil { + returnCode = types.NmAgentNCVersionListError + returnMessage = "[Azure-CNS] " + ncVersionerr.Error() + break + } + + for _, container := range ncVersionList.Containers { + networkContainerList = append(networkContainerList, container.NetworkContainerID) + } + + default: + returnMessage = "[Azure-CNS] NmAgentNCList API expects a GET method." + } + + resp := cns.Response{ReturnCode: returnCode, Message: returnMessage} + NCListResponse := &cns.NCListResponse{ + Response: resp, + NCList: networkContainerList, + } + + serviceErr := common.Encode(w, &NCListResponse) + logger.Response(service.Name, NCListResponse, resp.ReturnCode, serviceErr) +} diff --git a/cns/restserver/api_test.go b/cns/restserver/api_test.go index 002eddfd84..b6c45795b6 100644 --- a/cns/restserver/api_test.go +++ b/cns/restserver/api_test.go @@ -1057,7 +1057,7 @@ func TestUnpublishNCViaCNS(t *testing.T) { "/machine/plugins/?comp=nmagent&type=NetworkManagement/interfaces/dummyIntf/networkContainers/dummyNCURL/authenticationToke/" + "8636c99d-7861-401f-b0d3-7e5b7dc8183c" + "/api-version/1/method/DELETE" - err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL) + err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL, []byte(`""`+"\n")) if err == nil { t.Fatal("Expected a bad request error due to delete network url being incorrect") } @@ -1068,7 +1068,7 @@ func TestUnpublishNCViaCNS(t *testing.T) { "/machine/plugins/?comp=nmagent&NetworkManagement/interfaces/dummyIntf/networkContainers/dummyNCURL/authenticationToken/" + "8636c99d-7861-401f-b0d3-7e5b7dc8183c8636c99d-7861-401f-b0d3-7e5b7dc8183c" + "/api-version/1/method/DELETE" - err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL) + err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL, []byte(`""`+"\n")) if err == nil { t.Fatal("Expected a bad request error due to create network url having more characters than permitted in auth token") } @@ -1076,12 +1076,64 @@ func TestUnpublishNCViaCNS(t *testing.T) { // now actually perform the deletion: deleteNetworkContainerURL = "http://" + nmagentEndpoint + "/machine/plugins/?comp=nmagent&type=NetworkManagement/interfaces/dummyIntf/networkContainers/dummyNCURL/authenticationToken/dummyT/api-version/1/method/DELETE" - err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL) + err = unpublishNCViaCNS("vnet1", "ethWebApp", deleteNetworkContainerURL, []byte(`""`+"\n")) if err != nil { t.Fatal(err) } } +func TestUnpublishViaCNSRequestBody(t *testing.T) { + createNetworkContainerURL := "http://" + nmagentEndpoint + + "/machine/plugins/?comp=nmagent&type=NetworkManagement/interfaces/dummyIntf/networkContainers/dummyNCURL/authenticationToken/dummyT/api-version/1" + deleteNetworkContainerURL := "http://" + nmagentEndpoint + + "/machine/plugins/?comp=nmagent&type=NetworkManagement/interfaces/dummyIntf/networkContainers/dummyNCURL/authenticationToken/dummyT/api-version/1/method/DELETE" + vnet := "vnet1" + wsProxy := fakes.WireserverProxyFake{} + cleanup := setWireserverProxy(svc, &wsProxy) + defer cleanup() + + tests := []struct { + name string + ncID string + body []byte + requireError bool + }{ + { + name: "Delete NC with invalid body", + ncID: "ncID1", + body: []byte(`invalid` + "\n"), + requireError: true, + }, + { + name: "Delete NC with valid non-AZR body", + ncID: "ncID2", + body: []byte(`""` + "\n"), + requireError: false, + }, + { + name: "Delete NC with valid AZR body", + ncID: "ncID3", + body: []byte(`{"azID":1,"azrEnabled":true}`), + requireError: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + errPublish := publishNCViaCNS(vnet, tt.ncID, createNetworkContainerURL) + require.NoError(t, errPublish) + errUnpublish := unpublishNCViaCNS(vnet, tt.ncID, deleteNetworkContainerURL, tt.body) + if tt.requireError { + require.Error(t, errUnpublish) + require.Contains(t, errUnpublish.Error(), "error decoding json") + } else { + require.NoError(t, errUnpublish) + } + }) + } +} + func TestUnpublishNCViaCNS401(t *testing.T) { wsproxy := fakes.WireserverProxyFake{ UnpublishNCFunc: func(_ context.Context, _ cns.NetworkContainerParameters, i []byte) (*http.Response, error) { @@ -1161,7 +1213,7 @@ func TestUnpublishNCViaCNS401(t *testing.T) { } } -func unpublishNCViaCNS(networkID, networkContainerID, deleteNetworkContainerURL string) error { +func unpublishNCViaCNS(networkID, networkContainerID, deleteNetworkContainerURL string, bodyBytes []byte) error { joinNetworkURL := "http://" + nmagentEndpoint + "/dummyVnetURL" unpublishNCRequest := &cns.UnpublishNetworkContainerRequest{ @@ -1169,7 +1221,7 @@ func unpublishNCViaCNS(networkID, networkContainerID, deleteNetworkContainerURL NetworkContainerID: networkContainerID, JoinNetworkURL: joinNetworkURL, DeleteNetworkContainerURL: deleteNetworkContainerURL, - DeleteNetworkContainerRequestBody: []byte("{}"), + DeleteNetworkContainerRequestBody: bodyBytes, } var body bytes.Buffer @@ -1250,6 +1302,70 @@ func TestNmAgentSupportedApisHandler(t *testing.T) { fmt.Printf("nmAgentSupportedApisHandler Responded with %+v\n", nmAgentSupportedApisResponse) } +func TestNMAgentNCListHandler(t *testing.T) { + fmt.Println("Test: nmAgentNCListHandler") + + setEnv(t) + errSetOrch := setOrchestratorType(t, cns.Kubernetes) + if errSetOrch != nil { + t.Fatalf("TestNMAgentNCListHandler failed with error:%+v", errSetOrch) + } + + mnma := &fakes.NMAgentClientFake{} + cleanupNMA := setMockNMAgent(svc, mnma) + defer cleanupNMA() + + wsproxy := fakes.WireserverProxyFake{} + cleanupWSP := setWireserverProxy(svc, &wsproxy) + defer cleanupWSP() + + params := createOrUpdateNetworkContainerParams{ + ncID: "f47ac10b-58cc-0372-8567-0e02b2c3d475", // random guid + ncIP: "11.0.0.5", + ncType: cns.AzureContainerInstance, + ncVersion: "0", + vnetID: "vnet1", + podName: "testpod", + podNamespace: "testpodnamespace", + } + + err := createNC(params) + if err != nil { + t.Fatal("error creating NC: err:", err) + } + + mnma.GetNCVersionListF = func(_ context.Context) (nmagent.NCVersionList, error) { + return nmagent.NCVersionList{ + Containers: []nmagent.NCVersion{ + { + // Must set it as params.ncID without cns.SwiftPrefix to mock real nmagent nc format. + NetworkContainerID: params.ncID, + Version: params.ncVersion, + }, + }, + }, nil + } + + // test CNS' new GET /ncList API + var req *http.Request + req, err = http.NewRequestWithContext(context.TODO(), http.MethodGet, cns.NMAgentGetNCListAPIPath, http.NoBody) + if err != nil { + t.Fatal(err) + } + + w := httptest.NewRecorder() + mux.ServeHTTP(w, req) + var nmAgentNCListResponse cns.NCListResponse + + err = decodeResponse(w, &nmAgentNCListResponse) + if err != nil || nmAgentNCListResponse.Response.ReturnCode != 0 { + t.Errorf("nmAgentNCListHandler failed with response %+v", nmAgentNCListResponse) + } + + fmt.Printf("nmAgentNCListHandler responded with %+v\n", nmAgentNCListResponse) + require.Equal(t, params.ncID, nmAgentNCListResponse.NCList[0]) +} + // Testing GetHomeAz API handler, return UnsupportedVerb if http method is not supported func TestGetHomeAz_UnsupportedHttpMethod(t *testing.T) { req, err := http.NewRequestWithContext(context.TODO(), http.MethodPost, cns.GetHomeAz, http.NoBody) @@ -1683,7 +1799,7 @@ func startService(serviceConfig common.ServiceConfig, _ configuration.CNSConfig) config.Store = fileStore nmagentClient := &fakes.NMAgentClientFake{} - service, err = NewHTTPRestService(&config, &fakes.WireserverClientFake{}, &fakes.WireserverProxyFake{}, + service, err = NewHTTPRestService(&config, &fakes.WireserverClientFake{}, &fakes.WireserverProxyFake{}, &IPtablesProvider{}, nmagentClient, nil, nil, nil, fakes.NewMockIMDSClient()) if err != nil { return err diff --git a/cns/restserver/helper_for_nodesubnet_test.go b/cns/restserver/helper_for_nodesubnet_test.go new file mode 100644 index 0000000000..5719757429 --- /dev/null +++ b/cns/restserver/helper_for_nodesubnet_test.go @@ -0,0 +1,89 @@ +package restserver + +import ( + "context" + "net/netip" + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/common" + "github.com/Azure/azure-container-networking/cns/fakes" + "github.com/Azure/azure-container-networking/cns/nodesubnet" + acn "github.com/Azure/azure-container-networking/common" + "github.com/Azure/azure-container-networking/nmagent" + "github.com/Azure/azure-container-networking/store" +) + +// GetRestServiceObjectForNodeSubnetTest creates a new HTTPRestService object for use in nodesubnet unit tests. +func GetRestServiceObjectForNodeSubnetTest(t *testing.T, generator CNIConflistGenerator) *HTTPRestService { + config := &common.ServiceConfig{ + Name: "test", + Version: "1.0", + ChannelMode: "AzureHost", + Store: store.NewMockStore("test"), + } + interfaces := nmagent.Interfaces{ + Entries: []nmagent.Interface{ + { + MacAddress: nmagent.MACAddress{0x00, 0x0D, 0x3A, 0xF9, 0xDC, 0xA6}, + IsPrimary: true, + InterfaceSubnets: []nmagent.InterfaceSubnet{ + { + Prefix: "10.0.0.0/24", + IPAddress: []nmagent.NodeIP{ + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 0, 0, 4})), + IsPrimary: true, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 0, 0, 52})), + IsPrimary: false, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 0, 0, 63})), + IsPrimary: false, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 0, 0, 45})), + IsPrimary: false, + }, + }, + }, + }, + }, + }, + } + + svc, err := cns.NewService(config.Name, config.Version, config.ChannelMode, config.Store) + if err != nil { + return nil + } + + svc.SetOption(acn.OptCnsURL, "") + svc.SetOption(acn.OptCnsPort, "") + err = svc.Initialize(config) + if err != nil { + return nil + } + + t.Cleanup(func() { svc.Uninitialize() }) + + return &HTTPRestService{ + Service: svc, + cniConflistGenerator: generator, + state: &httpRestServiceState{}, + PodIPConfigState: make(map[string]cns.IPConfigurationStatus), + PodIPIDByPodInterfaceKey: make(map[string][]string), + nma: &fakes.NMAgentClientFake{ + GetInterfaceIPInfoF: func(_ context.Context) (nmagent.Interfaces, error) { + return interfaces, nil + }, + }, + wscli: &fakes.WireserverClientFake{}, + } +} + +// GetNodesubnetIPFetcher gets the nodesubnetIPFetcher from the HTTPRestService. +func (service *HTTPRestService) GetNodesubnetIPFetcher() *nodesubnet.IPFetcher { + return service.nodesubnetIPFetcher +} diff --git a/cns/restserver/homeazmonitor.go b/cns/restserver/homeazmonitor.go index 3fb21e590f..f73c6e35af 100644 --- a/cns/restserver/homeazmonitor.go +++ b/cns/restserver/homeazmonitor.go @@ -154,7 +154,7 @@ func (h *HomeAzMonitor) Populate(ctx context.Context) { h.update(returnCode, returnMessage, cns.HomeAzResponse{IsSupported: true}) return } - h.update(types.Success, "Get Home Az succeeded", cns.HomeAzResponse{IsSupported: true, HomeAz: azResponse.HomeAz}) + h.update(types.Success, "Get Home Az succeeded", cns.HomeAzResponse{IsSupported: true, HomeAz: azResponse.HomeAz, NmaAppliedTheIPV6Fix: azResponse.ContainsFixes(nmagent.HomeAZFixIPv6)}) } // update constructs a GetHomeAzResponse entity and update its cache diff --git a/cns/restserver/homeazmonitor_test.go b/cns/restserver/homeazmonitor_test.go index b435f3cde1..241f2089db 100644 --- a/cns/restserver/homeazmonitor_test.go +++ b/cns/restserver/homeazmonitor_test.go @@ -24,23 +24,23 @@ func TestHomeAzMonitor(t *testing.T) { { "happy path", &fakes.NMAgentClientFake{ - SupportedAPIsF: func(ctx context.Context) ([]string, error) { + SupportedAPIsF: func(_ context.Context) ([]string, error) { return []string{"GetHomeAz"}, nil }, - GetHomeAzF: func(ctx context.Context) (nmagent.AzResponse, error) { - return nmagent.AzResponse{HomeAz: uint(1)}, nil + GetHomeAzF: func(_ context.Context) (nmagent.AzResponse, error) { + return nmagent.AzResponse{HomeAz: uint(1), AppliedFixes: []nmagent.HomeAZFix{nmagent.HomeAZFixIPv6}}, nil }, }, - cns.HomeAzResponse{IsSupported: true, HomeAz: uint(1)}, + cns.HomeAzResponse{IsSupported: true, HomeAz: uint(1), NmaAppliedTheIPV6Fix: true}, false, }, { "getHomeAz is not supported in nmagent", &fakes.NMAgentClientFake{ - SupportedAPIsF: func(ctx context.Context) ([]string, error) { + SupportedAPIsF: func(_ context.Context) ([]string, error) { return []string{"dummy"}, nil }, - GetHomeAzF: func(ctx context.Context) (nmagent.AzResponse, error) { + GetHomeAzF: func(_ context.Context) (nmagent.AzResponse, error) { return nmagent.AzResponse{}, nil }, }, @@ -50,23 +50,36 @@ func TestHomeAzMonitor(t *testing.T) { { "api supported but home az value is not valid", &fakes.NMAgentClientFake{ - SupportedAPIsF: func(ctx context.Context) ([]string, error) { + SupportedAPIsF: func(_ context.Context) ([]string, error) { return []string{GetHomeAzAPIName}, nil }, - GetHomeAzF: func(ctx context.Context) (nmagent.AzResponse, error) { + GetHomeAzF: func(_ context.Context) (nmagent.AzResponse, error) { return nmagent.AzResponse{HomeAz: 0}, nil }, }, cns.HomeAzResponse{IsSupported: true}, true, }, + { + "api supported but apiVersion value is not valid", + &fakes.NMAgentClientFake{ + SupportedAPIsF: func(_ context.Context) ([]string, error) { + return []string{GetHomeAzAPIName}, nil + }, + GetHomeAzF: func(_ context.Context) (nmagent.AzResponse, error) { + return nmagent.AzResponse{HomeAz: uint(1), AppliedFixes: []nmagent.HomeAZFix{nmagent.HomeAZFixInvalid}}, nil + }, + }, + cns.HomeAzResponse{IsSupported: true, HomeAz: uint(1), NmaAppliedTheIPV6Fix: false}, + false, + }, { "api supported but got unexpected errors", &fakes.NMAgentClientFake{ - SupportedAPIsF: func(ctx context.Context) ([]string, error) { + SupportedAPIsF: func(_ context.Context) ([]string, error) { return []string{GetHomeAzAPIName}, nil }, - GetHomeAzF: func(ctx context.Context) (nmagent.AzResponse, error) { + GetHomeAzF: func(_ context.Context) (nmagent.AzResponse, error) { return nmagent.AzResponse{}, errors.New("unexpected error") }, }, diff --git a/cns/restserver/internalapi.go b/cns/restserver/internalapi.go index 6f9df7cf33..8d477cbab8 100644 --- a/cns/restserver/internalapi.go +++ b/cns/restserver/internalapi.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/nodesubnet" "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" @@ -224,6 +225,7 @@ func (service *HTTPRestService) syncHostNCVersion(ctx context.Context, channelMo for _, nc := range ncVersionListResp.Containers { nmaNCs[strings.ToLower(nc.NetworkContainerID)] = nc.Version } + hasNC.Set(float64(len(nmaNCs))) for ncID := range outdatedNCs { nmaNCVersionStr, ok := nmaNCs[ncID] if !ok { @@ -275,22 +277,7 @@ func (service *HTTPRestService) syncHostNCVersion(ctx context.Context, channelMo return len(programmedNCs), nil } -func (service *HTTPRestService) ReconcileIPAMState(ncReqs []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo, nnc *v1alpha.NodeNetworkConfig) types.ResponseCode { - logger.Printf("Reconciling CNS IPAM state with nc requests: [%+v], PodInfo [%+v], NNC: [%+v]", ncReqs, podInfoByIP, nnc) - // if no nc reqs, there is no CRD state yet - if len(ncReqs) == 0 { - logger.Printf("CNS starting with no NC state, podInfoMap count %d", len(podInfoByIP)) - return types.Success - } - - // first step in reconciliation is to create all the NCs in CNS, no IP assignment yet. - for _, ncReq := range ncReqs { - returnCode := service.CreateOrUpdateNetworkContainerInternal(ncReq) - if returnCode != types.Success { - return returnCode - } - } - +func (service *HTTPRestService) ReconcileIPAssignment(podInfoByIP map[string]cns.PodInfo, ncReqs []*cns.CreateNetworkContainerRequest) types.ResponseCode { // index all the secondary IP configs for all the nc reqs, for easier lookup later on. allSecIPsIdx := make(map[string]*cns.CreateNetworkContainerRequest) for i := range ncReqs { @@ -321,6 +308,7 @@ func (service *HTTPRestService) ReconcileIPAMState(ncReqs []*cns.CreateNetworkCo // } // // such that we can iterate over pod interfaces, and assign all IPs for it at once. + podKeyToPodIPs, err := newPodKeyToPodIPsMap(podInfoByIP) if err != nil { logger.Errorf("could not transform pods indexed by IP address to pod IPs indexed by interface: %v", err) @@ -378,12 +366,69 @@ func (service *HTTPRestService) ReconcileIPAMState(ncReqs []*cns.CreateNetworkCo } } + return types.Success +} + +func (service *HTTPRestService) CreateNCs(ncReqs []*cns.CreateNetworkContainerRequest) types.ResponseCode { + for _, ncReq := range ncReqs { + returnCode := service.CreateOrUpdateNetworkContainerInternal(ncReq) + if returnCode != types.Success { + return returnCode + } + } + + return types.Success +} + +func (service *HTTPRestService) ReconcileIPAMStateForSwift(ncReqs []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo, nnc *v1alpha.NodeNetworkConfig) types.ResponseCode { + logger.Printf("Reconciling CNS IPAM state with nc requests: [%+v], PodInfo [%+v], NNC: [%+v]", ncReqs, podInfoByIP, nnc) + // if no nc reqs, there is no CRD state yet + if len(ncReqs) == 0 { + logger.Printf("CNS starting with no NC state, podInfoMap count %d", len(podInfoByIP)) + return types.Success + } + + // first step in reconciliation is to create all the NCs in CNS, no IP assignment yet. + if returnCode := service.CreateNCs(ncReqs); returnCode != types.Success { + return returnCode + } + + logger.Debugf("ncReqs created successfully, now save IPs") + // now reconcile IPAM state. + if returnCode := service.ReconcileIPAssignment(podInfoByIP, ncReqs); returnCode != types.Success { + return returnCode + } + if err := service.MarkExistingIPsAsPendingRelease(nnc.Spec.IPsNotInUse); err != nil { logger.Errorf("[Azure CNS] Error. Failed to mark IPs as pending %v", nnc.Spec.IPsNotInUse) return types.UnexpectedError } - return 0 + return types.Success +} + +// todo: there is some redundancy between this funcation and ReconcileIPAMStateForNodeSubnet. The difference is that this one +// doesn't include the NNC parameter. We may want to unify the common parts. +func (service *HTTPRestService) ReconcileIPAMStateForNodeSubnet(ncReqs []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo) types.ResponseCode { + logger.Printf("Reconciling CNS IPAM state with nc requests: [%+v], PodInfo [%+v]", ncReqs, podInfoByIP) + + if len(ncReqs) != 1 { + logger.Errorf("Nodesubnet should always have 1 NC to hold secondary IPs") + return types.NetworkContainerNotSpecified + } + + // first step in reconciliation is to create all the NCs in CNS, no IP assignment yet. + if returnCode := service.CreateNCs(ncReqs); returnCode != types.Success { + return returnCode + } + + logger.Debugf("ncReqs created successfully, now save IPs") + // now reconcile IPAM state. + if returnCode := service.ReconcileIPAssignment(podInfoByIP, ncReqs); returnCode != types.Success { + return returnCode + } + + return types.Success } var ( @@ -526,11 +571,19 @@ func (service *HTTPRestService) CreateOrUpdateNetworkContainerInternal(req *cns. return types.UnsupportedOrchestratorType } - // Validate PrimaryCA must never be empty - err := validateIPSubnet(req.IPConfiguration.IPSubnet) - if err != nil { - logger.Errorf("[Azure CNS] Error. PrimaryCA is invalid, NC Req: %v", req) - return types.InvalidPrimaryIPConfig + if req.NetworkContainerid == nodesubnet.NodeSubnetNCID { + // For NodeSubnet scenarios, Validate PrimaryCA must be empty + if req.IPConfiguration.IPSubnet.IPAddress != "" { + logger.Errorf("[Azure CNS] Error. PrimaryCA is invalid, NC Req: %v", req) + return types.InvalidPrimaryIPConfig + } + } else { + // For Swift scenarios, Validate PrimaryCA must never be empty + err := validateIPSubnet(req.IPConfiguration.IPSubnet) + if err != nil { + logger.Errorf("[Azure CNS] Error. PrimaryCA is invalid, NC Req: %v", req) + return types.InvalidPrimaryIPConfig + } } // Validate SecondaryIPConfig @@ -563,8 +616,7 @@ func (service *HTTPRestService) CreateOrUpdateNetworkContainerInternal(req *cns. // If the NC was created successfully, log NC snapshot. if returnCode == 0 { logNCSnapshot(*req) - - publishIPStateMetrics(service.buildIPState()) + service.publishIPStateMetrics() } else { logger.Errorf(returnMessage) } diff --git a/cns/restserver/internalapi_linux.go b/cns/restserver/internalapi_linux.go index cc68d31d7a..ef30dabf03 100644 --- a/cns/restserver/internalapi_linux.go +++ b/cns/restserver/internalapi_linux.go @@ -11,18 +11,28 @@ import ( "github.com/Azure/azure-container-networking/iptables" "github.com/Azure/azure-container-networking/network/networkutils" goiptables "github.com/coreos/go-iptables/iptables" + "github.com/pkg/errors" ) const SWIFT = "SWIFT-POSTROUTING" +type IPtablesProvider struct{} + +func (c *IPtablesProvider) GetIPTables() (iptablesClient, error) { + client, err := goiptables.New() + return client, errors.Wrap(err, "failed to get iptables client") +} + // nolint func (service *HTTPRestService) programSNATRules(req *cns.CreateNetworkContainerRequest) (types.ResponseCode, string) { service.Lock() defer service.Unlock() // Parse primary ip and ipnet from nnc - ncPrimaryIP, ncIPNet, _ := net.ParseCIDR(req.IPConfiguration.IPSubnet.IPAddress + "/" + fmt.Sprintf("%d", req.IPConfiguration.IPSubnet.PrefixLength)) - ipt, err := goiptables.New() + // in podsubnet case, ncPrimaryIP is the pod subnet's primary ip + // in vnet scale case, ncPrimaryIP is the node's ip + ncPrimaryIP, _, _ := net.ParseCIDR(req.IPConfiguration.IPSubnet.IPAddress + "/" + fmt.Sprintf("%d", req.IPConfiguration.IPSubnet.PrefixLength)) + ipt, err := service.iptables.GetIPTables() if err != nil { return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to create iptables interface : %v", err) } @@ -56,41 +66,51 @@ func (service *HTTPRestService) programSNATRules(req *cns.CreateNetworkContainer } } - snatUDPRuleexist, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) - if err != nil { - return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of SNAT UDP rule : %v", err) - } - if !snatUDPRuleexist { - logger.Printf("[Azure CNS] Inserting SNAT UDP rule ...") - err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) + // use any secondary ip + the nnc prefix length to get an iptables rule to allow dns and imds traffic from the pods + for _, v := range req.SecondaryIPConfigs { + // put the ip address in standard cidr form (where we zero out the parts that are not relevant) + _, podSubnet, _ := net.ParseCIDR(v.IPAddress + "/" + fmt.Sprintf("%d", req.IPConfiguration.IPSubnet.PrefixLength)) + + snatUDPRuleExists, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) if err != nil { - return types.FailedToRunIPTableCmd, "[Azure CNS] failed to inset SNAT UDP rule : " + err.Error() + return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of pod SNAT UDP rule : %v", err) + } + if !snatUDPRuleExists { + logger.Printf("[Azure CNS] Inserting pod SNAT UDP rule ...") + err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) + if err != nil { + return types.FailedToRunIPTableCmd, "[Azure CNS] failed to insert pod SNAT UDP rule : " + err.Error() + } } - } - snatTCPRuleexist, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) - if err != nil { - return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of SNAT TCP rule : %v", err) - } - if !snatTCPRuleexist { - logger.Printf("[Azure CNS] Inserting SNAT TCP rule ...") - err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) + snatPodTCPRuleExists, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) if err != nil { - return types.FailedToRunIPTableCmd, "[Azure CNS] failed to insert SNAT TCP rule : " + err.Error() + return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of pod SNAT TCP rule : %v", err) + } + if !snatPodTCPRuleExists { + logger.Printf("[Azure CNS] Inserting pod SNAT TCP rule ...") + err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", ncPrimaryIP.String()) + if err != nil { + return types.FailedToRunIPTableCmd, "[Azure CNS] failed to insert pod SNAT TCP rule : " + err.Error() + } } - } - snatIMDSRuleexist, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", req.HostPrimaryIP) - if err != nil { - return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of SNAT IMDS rule : %v", err) - } - if !snatIMDSRuleexist { - logger.Printf("[Azure CNS] Inserting SNAT IMDS rule ...") - err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", ncIPNet.String(), "-d", networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", req.HostPrimaryIP) + snatIMDSRuleexist, err := ipt.Exists(iptables.Nat, SWIFT, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", req.HostPrimaryIP) if err != nil { - return types.FailedToRunIPTableCmd, "[Azure CNS] failed to insert SNAT IMDS rule : " + err.Error() + return types.UnexpectedError, fmt.Sprintf("[Azure CNS] Error. Failed to check for existence of pod SNAT IMDS rule : %v", err) + } + if !snatIMDSRuleexist { + logger.Printf("[Azure CNS] Inserting pod SNAT IMDS rule ...") + err = ipt.Insert(iptables.Nat, SWIFT, 1, "-m", "addrtype", "!", "--dst-type", "local", "-s", podSubnet.String(), "-d", networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", req.HostPrimaryIP) + if err != nil { + return types.FailedToRunIPTableCmd, "[Azure CNS] failed to insert pod SNAT IMDS rule : " + err.Error() + } } + + // we only need to run this code once as the iptable rule applies to all secondary ip configs in the same subnet + break } + return types.Success, "" } diff --git a/cns/restserver/internalapi_linux_test.go b/cns/restserver/internalapi_linux_test.go new file mode 100644 index 0000000000..731ca4d989 --- /dev/null +++ b/cns/restserver/internalapi_linux_test.go @@ -0,0 +1,148 @@ +// Copyright 2020 Microsoft. All rights reserved. +// MIT License + +package restserver + +import ( + "strconv" + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/fakes" + "github.com/Azure/azure-container-networking/cns/types" + "github.com/Azure/azure-container-networking/iptables" + "github.com/Azure/azure-container-networking/network/networkutils" +) + +type FakeIPTablesProvider struct { + iptables *fakes.IPTablesMock +} + +func (c *FakeIPTablesProvider) GetIPTables() (iptablesClient, error) { + // persist iptables in testing + if c.iptables == nil { + c.iptables = fakes.NewIPTablesMock() + } + return c.iptables, nil +} + +func TestAddSNATRules(t *testing.T) { + type expectedScenario struct { + table string + chain string + rule []string + } + + tests := []struct { + name string + input *cns.CreateNetworkContainerRequest + expected []expectedScenario + }{ + { + // in pod subnet, the primary nic ip is in the same address space as the pod subnet + name: "podsubnet", + input: &cns.CreateNetworkContainerRequest{ + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "240.1.2.1", + PrefixLength: 24, + }, + }, + SecondaryIPConfigs: map[string]cns.SecondaryIPConfig{ + "abc": { + IPAddress: "240.1.2.7", + }, + }, + HostPrimaryIP: "10.0.0.4", + }, + expected: []expectedScenario{ + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/24", "-d", + networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", "240.1.2.1", + }, + }, + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/24", "-d", + networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", "240.1.2.1", + }, + }, + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/24", "-d", + networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", "10.0.0.4", + }, + }, + }, + }, + { + // in vnet scale, the primary nic ip becomes the node ip (diff address space from pod subnet) + name: "vnet scale", + input: &cns.CreateNetworkContainerRequest{ + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "10.0.0.4", + PrefixLength: 28, + }, + }, + SecondaryIPConfigs: map[string]cns.SecondaryIPConfig{ + "abc": { + IPAddress: "240.1.2.15", + }, + }, + HostPrimaryIP: "10.0.0.4", + }, + expected: []expectedScenario{ + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/28", "-d", + networkutils.AzureDNS, "-p", iptables.UDP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", "10.0.0.4", + }, + }, + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/28", "-d", + networkutils.AzureDNS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.DNSPort), "-j", iptables.Snat, "--to", "10.0.0.4", + }, + }, + { + table: iptables.Nat, + chain: SWIFT, + rule: []string{ + "-m", "addrtype", "!", "--dst-type", "local", "-s", "240.1.2.0/28", "-d", + networkutils.AzureIMDS, "-p", iptables.TCP, "--dport", strconv.Itoa(iptables.HTTPPort), "-j", iptables.Snat, "--to", "10.0.0.4", + }, + }, + }, + }, + } + + for _, tt := range tests { + service := getTestService(cns.KubernetesCRD) + service.iptables = &FakeIPTablesProvider{} + resp, msg := service.programSNATRules(tt.input) + if resp != types.Success { + t.Fatal("failed to program snat rules", msg, " case: ", tt.name) + } + finalState, _ := service.iptables.GetIPTables() + for _, ex := range tt.expected { + exists, err := finalState.Exists(ex.table, ex.chain, ex.rule...) + if err != nil || !exists { + t.Fatal("rule not found", ex.rule, " case: ", tt.name) + } + } + } +} diff --git a/cns/restserver/internalapi_test.go b/cns/restserver/internalapi_test.go index 50fc5042a3..8d8ea18ebd 100644 --- a/cns/restserver/internalapi_test.go +++ b/cns/restserver/internalapi_test.go @@ -93,7 +93,7 @@ func TestReconcileNCStatePrimaryIPChangeShouldFail(t *testing.T) { } // now try to reconcile the state where the NC primary IP has changed - resp := svc.ReconcileIPAMState(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) + resp := svc.ReconcileIPAMStateForSwift(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) assert.Equal(t, types.PrimaryCANotSame, resp) } @@ -140,7 +140,7 @@ func TestReconcileNCStateGatewayChange(t *testing.T) { } // now try to reconcile the state where the NC gateway has changed - resp := svc.ReconcileIPAMState(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) + resp := svc.ReconcileIPAMStateForSwift(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) assert.Equal(t, types.Success, resp) // assert the new state reflects the gateway update @@ -337,7 +337,7 @@ func TestReconcileNCWithEmptyState(t *testing.T) { expectedNcCount := len(svc.state.ContainerStatus) expectedAssignedPods := make(map[string]cns.PodInfo) - returnCode := svc.ReconcileIPAMState(nil, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift(nil, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -387,7 +387,7 @@ func TestReconcileNCWithEmptyStateAndPendingRelease(t *testing.T) { return pendingIPs }() req := generateNetworkContainerRequest(secondaryIPConfigs, "reconcileNc1", "-1") - returnCode := svc.ReconcileIPAMState([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Spec: v1alpha.NodeNetworkConfigSpec{ IPsNotInUse: pending, }, @@ -434,7 +434,7 @@ func TestReconcileNCWithExistingStateAndPendingRelease(t *testing.T) { req := generateNetworkContainerRequest(secondaryIPConfigs, "reconcileNc1", "-1") expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileIPAMState([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Spec: v1alpha.NodeNetworkConfigSpec{ IPsNotInUse: maps.Keys(pendingIPIDs), }, @@ -471,7 +471,7 @@ func TestReconcileNCWithExistingState(t *testing.T) { } expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileIPAMState([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -522,7 +522,7 @@ func TestReconcileCNSIPAMWithDualStackPods(t *testing.T) { ncReqs := []*cns.CreateNetworkContainerRequest{ipv4NC, ipv6NC} - returnCode := svc.ReconcileIPAMState(ncReqs, podByIP, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift(ncReqs, podByIP, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -570,7 +570,7 @@ func TestReconcileCNSIPAMWithMultipleIPsPerFamilyPerPod(t *testing.T) { ncReqs := []*cns.CreateNetworkContainerRequest{ipv4NC, ipv6NC} - returnCode := svc.ReconcileIPAMState(ncReqs, podByIP, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift(ncReqs, podByIP, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -602,12 +602,12 @@ func TestPodIPsIndexedByInterface(t *testing.T) { "fe80::7": cns.NewPodInfo("some-guid-2", "def-eth0", "reconcilePod2", "PodNS2"), }, expectedOutput: map[string]podIPs{ - "reconcilePod1:PodNS1": { + "abc-eth0": { PodInfo: cns.NewPodInfo("some-guid-1", "abc-eth0", "reconcilePod1", "PodNS1"), v4IP: net.IPv4(10, 0, 0, 6), v6IP: []byte{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x06}, }, - "reconcilePod2:PodNS2": { + "def-eth0": { PodInfo: cns.NewPodInfo("some-guid-2", "def-eth0", "reconcilePod2", "PodNS2"), v4IP: net.IPv4(10, 0, 0, 7), v6IP: []byte{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07}, @@ -677,7 +677,6 @@ func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { setEnv(t) setOrchestratorTypeInternal(cns.KubernetesCRD) cns.GlobalPodInfoScheme = cns.InterfaceIDPodInfoScheme - defer func() { cns.GlobalPodInfoScheme = cns.KubernetesPodInfoScheme }() secondaryIPConfigs := make(map[string]cns.SecondaryIPConfig) @@ -697,7 +696,7 @@ func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { } expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileIPAMState([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileIPAMStateForSwift([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -716,52 +715,6 @@ func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { validateNCStateAfterReconcile(t, req, expectedNcCount+1, expectedAssignedPods, nil) } -func TestReconcileCNSIPAMWithKubePodInfoProvider(t *testing.T) { - restartService() - setEnv(t) - setOrchestratorTypeInternal(cns.KubernetesCRD) - - secondaryIPConfigs := make(map[string]cns.SecondaryIPConfig) - - startingIndex := 6 - for i := 0; i < 4; i++ { - ipaddress := "10.0.0." + strconv.Itoa(startingIndex) - secIpConfig := newSecondaryIPConfig(ipaddress, -1) - ipId := uuid.New() - secondaryIPConfigs[ipId.String()] = secIpConfig - startingIndex++ - } - req := generateNetworkContainerRequest(secondaryIPConfigs, uuid.New().String(), "-1") - - // the following pod info constructors leave container id and interface id blank on purpose. - // this is to simulate the information we get from the kube info provider - expectedAssignedPods := make(map[string]cns.PodInfo) - expectedAssignedPods["10.0.0.6"] = cns.NewPodInfo("", "", "customerpod1", "PodNS1") - - // allocate non-vnet IP for pod in host network - expectedAssignedPods["192.168.0.1"] = cns.NewPodInfo("", "", "systempod", "kube-system") - - expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileIPAMState([]*cns.CreateNetworkContainerRequest{req}, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ - Status: v1alpha.NodeNetworkConfigStatus{ - Scaler: v1alpha.Scaler{ - BatchSize: batchSize, - ReleaseThresholdPercent: releasePercent, - RequestThresholdPercent: requestPercent, - }, - }, - Spec: v1alpha.NodeNetworkConfigSpec{ - RequestedIPCount: initPoolSize, - }, - }) - if returnCode != types.Success { - t.Errorf("Unexpected failure on reconcile with no state %d", returnCode) - } - - delete(expectedAssignedPods, "192.168.0.1") - validateNCStateAfterReconcile(t, req, expectedNcCount, expectedAssignedPods, nil) -} - func setOrchestratorTypeInternal(orchestratorType string) { fmt.Println("setOrchestratorTypeInternal") svc.state.OrchestratorType = orchestratorType diff --git a/cns/restserver/internalapi_windows.go b/cns/restserver/internalapi_windows.go index b82de16cb6..1bc9dab9a8 100644 --- a/cns/restserver/internalapi_windows.go +++ b/cns/restserver/internalapi_windows.go @@ -16,6 +16,14 @@ const ( pwshTimeout = 120 * time.Second ) +var errUnsupportedAPI = errors.New("unsupported api") + +type IPtablesProvider struct{} + +func (*IPtablesProvider) GetIPTables() (iptablesClient, error) { + return nil, errUnsupportedAPI +} + // nolint func (service *HTTPRestService) programSNATRules(req *cns.CreateNetworkContainerRequest) (types.ResponseCode, string) { return types.Success, "" diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index 6ee1965882..883d9aaef3 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -132,17 +132,22 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte return &cns.IPConfigsResponse{}, fmt.Errorf("error getting orchestrator context from PodInfo %w", err) } cnsRequest := cns.GetNetworkContainerRequest{OrchestratorContext: orchestratorContext} + + // IMPORTANT: although SwiftV2 reuses the concept of NCs, NMAgent doesn't program NCs for SwiftV2, but + // instead programs NICs. When getting SwiftV2 NCs, we want the NIC type and MAC address of the NCs. + // TODO: we need another way to verify and sync NMAgent's NIC programming status. pending new NMAgent API or NIC programming status to be passed in the SwiftV2 create NC request. resp := service.getAllNetworkContainerResponses(cnsRequest) //nolint:contextcheck // not passed in any methods, appease linter // return err if returned list has no NCs if len(resp) == 0 { return &cns.IPConfigsResponse{ Response: cns.Response{ ReturnCode: types.FailedToAllocateIPConfig, - Message: fmt.Sprintf("AllocateIPConfig failed due to not getting NC Response from statefile, IP config request is %v", ipconfigsRequest), + Message: fmt.Sprintf("AllocateIPConfig failed due to not getting NC Response from statefile, IP config request is %+v", ipconfigsRequest), }, }, ErrGetAllNCResponseEmpty } + // assign NICType and MAC Address for SwiftV2. we assume that there won't be any SwiftV1 NCs here podIPInfoList := make([]cns.PodIpInfo, 0, len(resp)) for i := range resp { podIPInfo := cns.PodIpInfo{ @@ -166,7 +171,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte return &cns.IPConfigsResponse{ Response: cns.Response{ ReturnCode: types.FailedToAllocateIPConfig, - Message: fmt.Sprintf("AllocateIPConfig failed while updating pod with interfaces: %v, IP config request is %v", err, ipconfigsRequest), + Message: fmt.Sprintf("AllocateIPConfig failed while updating pod with interfaces: %v, IP config request is %+v", err, ipconfigsRequest), }, }, err } @@ -191,10 +196,11 @@ func (service *HTTPRestService) updatePodInfoWithInterfaces(ctx context.Context, // RequestIPConfigHandler requests an IPConfig from the CNS state func (service *HTTPRestService) RequestIPConfigHandler(w http.ResponseWriter, r *http.Request) { + opName := "requestIPConfigHandler" + defer service.publishIPStateMetrics() var ipconfigRequest cns.IPConfigRequest err := common.Decode(w, r, &ipconfigRequest) - operationName := "requestIPConfigHandler" - logger.Request(service.Name+operationName, ipconfigRequest, err) + logger.Request(opName, ipconfigRequest, err) if err != nil { return } @@ -210,7 +216,7 @@ func (service *HTTPRestService) RequestIPConfigHandler(w http.ResponseWriter, r } w.Header().Set(cnsReturnCode, reserveResp.Response.ReturnCode.String()) err = common.Encode(w, &reserveResp) - logger.ResponseEx(service.Name+operationName, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) return } @@ -235,7 +241,7 @@ func (service *HTTPRestService) RequestIPConfigHandler(w http.ResponseWriter, r } w.Header().Set(cnsReturnCode, reserveResp.Response.ReturnCode.String()) err = common.Encode(w, &reserveResp) - logger.ResponseEx(service.Name+operationName, ipconfigsRequest, reserveResp, reserveResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, reserveResp, reserveResp.Response.ReturnCode, err) return } @@ -251,7 +257,7 @@ func (service *HTTPRestService) RequestIPConfigHandler(w http.ResponseWriter, r } w.Header().Set(cnsReturnCode, reserveResp.Response.ReturnCode.String()) err = common.Encode(w, &reserveResp) - logger.ResponseEx(service.Name+operationName, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) return } // As this API is expected to return IPConfigResponse, generate it from the IPConfigsResponse returned above. @@ -261,15 +267,16 @@ func (service *HTTPRestService) RequestIPConfigHandler(w http.ResponseWriter, r } w.Header().Set(cnsReturnCode, reserveResp.Response.ReturnCode.String()) err = common.Encode(w, &reserveResp) - logger.ResponseEx(service.Name+operationName, ipconfigsRequest, reserveResp, reserveResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, reserveResp, reserveResp.Response.ReturnCode, err) } // RequestIPConfigsHandler requests multiple IPConfigs from the CNS state func (service *HTTPRestService) RequestIPConfigsHandler(w http.ResponseWriter, r *http.Request) { + opName := "requestIPConfigsHandler" + defer service.publishIPStateMetrics() var ipconfigsRequest cns.IPConfigsRequest err := common.Decode(w, r, &ipconfigsRequest) - operationName := "requestIPConfigsHandler" - logger.Request(service.Name+operationName, ipconfigsRequest, err) + logger.Request(opName, ipconfigsRequest, err) if err != nil { return } @@ -295,13 +302,13 @@ func (service *HTTPRestService) RequestIPConfigsHandler(w http.ResponseWriter, r if err != nil { w.Header().Set(cnsReturnCode, ipConfigsResp.Response.ReturnCode.String()) err = common.Encode(w, &ipConfigsResp) - logger.ResponseEx(service.Name+operationName, ipconfigsRequest, ipConfigsResp, ipConfigsResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, ipConfigsResp, ipConfigsResp.Response.ReturnCode, err) return } w.Header().Set(cnsReturnCode, ipConfigsResp.Response.ReturnCode.String()) err = common.Encode(w, &ipConfigsResp) - logger.ResponseEx(service.Name+operationName, ipconfigsRequest, ipConfigsResp, ipConfigsResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, ipConfigsResp, ipConfigsResp.Response.ReturnCode, err) } func (service *HTTPRestService) updateEndpointState(ipconfigsRequest cns.IPConfigsRequest, podInfo cns.PodInfo, podIPInfo []cns.PodIpInfo) error { @@ -409,9 +416,11 @@ func (service *HTTPRestService) ReleaseIPConfigHandlerHelper(ctx context.Context // ReleaseIPConfigHandler frees the IP assigned to a pod from CNS func (service *HTTPRestService) ReleaseIPConfigHandler(w http.ResponseWriter, r *http.Request) { + opName := "releaseIPConfigHandler" + defer service.publishIPStateMetrics() var ipconfigRequest cns.IPConfigRequest err := common.Decode(w, r, &ipconfigRequest) - logger.Request(service.Name+"releaseIPConfigHandler", ipconfigRequest, err) + logger.Request(opName, ipconfigRequest, err) if err != nil { resp := cns.Response{ ReturnCode: types.UnexpectedError, @@ -420,7 +429,7 @@ func (service *HTTPRestService) ReleaseIPConfigHandler(w http.ResponseWriter, r logger.Errorf("releaseIPConfigHandler decode failed becase %v, release IP config info %s", resp.Message, ipconfigRequest) w.Header().Set(cnsReturnCode, resp.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigRequest, resp, resp.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, resp, resp.ReturnCode, err) return } @@ -434,7 +443,7 @@ func (service *HTTPRestService) ReleaseIPConfigHandler(w http.ResponseWriter, r } w.Header().Set(cnsReturnCode, reserveResp.Response.ReturnCode.String()) err = common.Encode(w, &reserveResp) - logger.ResponseEx(service.Name, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, reserveResp, reserveResp.Response.ReturnCode, err) return } @@ -452,19 +461,21 @@ func (service *HTTPRestService) ReleaseIPConfigHandler(w http.ResponseWriter, r if err != nil { w.Header().Set(cnsReturnCode, resp.Response.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigRequest, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, resp, resp.Response.ReturnCode, err) } w.Header().Set(cnsReturnCode, resp.Response.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigRequest, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigRequest, resp, resp.Response.ReturnCode, err) } // ReleaseIPConfigsHandler frees multiple IPConfigs from the CNS state func (service *HTTPRestService) ReleaseIPConfigsHandler(w http.ResponseWriter, r *http.Request) { + opName := "releaseIPConfigsHandler" + defer service.publishIPStateMetrics() var ipconfigsRequest cns.IPConfigsRequest err := common.Decode(w, r, &ipconfigsRequest) - logger.Request(service.Name+"releaseIPConfigsHandler", ipconfigsRequest, err) + logger.Request("releaseIPConfigsHandler", ipconfigsRequest, err) if err != nil { resp := cns.Response{ ReturnCode: types.UnexpectedError, @@ -473,7 +484,7 @@ func (service *HTTPRestService) ReleaseIPConfigsHandler(w http.ResponseWriter, r logger.Errorf("releaseIPConfigsHandler decode failed because %v, release IP config info %+v", resp.Message, ipconfigsRequest) w.Header().Set(cnsReturnCode, resp.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigsRequest, resp, resp.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, resp, resp.ReturnCode, err) return } @@ -481,12 +492,12 @@ func (service *HTTPRestService) ReleaseIPConfigsHandler(w http.ResponseWriter, r if err != nil { w.Header().Set(cnsReturnCode, resp.Response.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigsRequest, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, resp, resp.Response.ReturnCode, err) } w.Header().Set(cnsReturnCode, resp.Response.ReturnCode.String()) err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, ipconfigsRequest, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, ipconfigsRequest, resp, resp.Response.ReturnCode, err) } func (service *HTTPRestService) removeEndpointState(podInfo cns.PodInfo) error { @@ -511,6 +522,7 @@ func (service *HTTPRestService) removeEndpointState(podInfo cns.PodInfo) error { // MarkIPAsPendingRelease will set the IPs which are in PendingProgramming or Available to PendingRelease state // It will try to update [totalIpsToRelease] number of ips. func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (map[string]cns.IPConfigurationStatus, error) { + defer service.publishIPStateMetrics() pendingReleasedIps := make(map[string]cns.IPConfigurationStatus) service.Lock() defer service.Unlock() @@ -556,6 +568,7 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m // and return an error. // MarkNIPsPendingRelease is no-op if [n] is not a positive integer. func (service *HTTPRestService) MarkNIPsPendingRelease(n int) (map[string]cns.IPConfigurationStatus, error) { + defer service.publishIPStateMetrics() service.Lock() defer service.Unlock() // try to release from PendingProgramming @@ -667,16 +680,18 @@ func (service *HTTPRestService) GetPodIPConfigState() map[string]cns.IPConfigura } func (service *HTTPRestService) HandleDebugPodContext(w http.ResponseWriter, r *http.Request) { //nolint + opName := "handleDebugPodContext" service.RLock() defer service.RUnlock() resp := cns.GetPodContextResponse{ PodContext: service.PodIPIDByPodInterfaceKey, } err := common.Encode(w, &resp) - logger.Response(service.Name, resp, resp.Response.ReturnCode, err) + logger.Response(opName, resp, resp.Response.ReturnCode, err) } func (service *HTTPRestService) HandleDebugRestData(w http.ResponseWriter, r *http.Request) { //nolint + opName := "handleDebugRestData" service.RLock() defer service.RUnlock() resp := GetHTTPServiceDataResponse{ @@ -686,10 +701,11 @@ func (service *HTTPRestService) HandleDebugRestData(w http.ResponseWriter, r *ht }, } err := common.Encode(w, &resp) - logger.Response(service.Name, resp, resp.Response.ReturnCode, err) + logger.Response(opName, resp, resp.Response.ReturnCode, err) } func (service *HTTPRestService) HandleDebugIPAddresses(w http.ResponseWriter, r *http.Request) { + opName := "handleDebugIPAddresses" var req cns.GetIPAddressesRequest if err := common.Decode(w, r, &req); err != nil { resp := cns.GetIPAddressStatusResponse{ @@ -699,7 +715,7 @@ func (service *HTTPRestService) HandleDebugIPAddresses(w http.ResponseWriter, r }, } err = common.Encode(w, &resp) - logger.ResponseEx(service.Name, req, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, req, resp, resp.Response.ReturnCode, err) return } // Get all IPConfigs matching a state and return in the response @@ -707,7 +723,7 @@ func (service *HTTPRestService) HandleDebugIPAddresses(w http.ResponseWriter, r IPConfigurationStatus: filter.MatchAnyIPConfigState(service.PodIPConfigState, filter.PredicatesForStates(req.IPConfigStateFilter...)...), } err := common.Encode(w, &resp) - logger.ResponseEx(service.Name, req, resp, resp.Response.ReturnCode, err) + logger.ResponseEx(opName, req, resp, resp.Response.ReturnCode, err) } // GetAssignedIPConfigs returns a filtered list of IPs which are in @@ -1088,6 +1104,7 @@ func validateDesiredIPAddresses(desiredIPs []string) error { // EndpointHandlerAPI forwards the endpoint related APIs to GetEndpointHandler or UpdateEndpointHandler based on the http method func (service *HTTPRestService) EndpointHandlerAPI(w http.ResponseWriter, r *http.Request) { + opName := "endpointHandler" logger.Printf("[EndpointHandlerAPI] EndpointHandlerAPI received request with http Method %s", r.Method) service.Lock() defer service.Unlock() @@ -1098,7 +1115,7 @@ func (service *HTTPRestService) EndpointHandlerAPI(w http.ResponseWriter, r *htt Message: fmt.Sprintf("[EndpointHandlerAPI] EndpointHandlerAPI failed with error: %s", ErrOptManageEndpointState), } err := common.Encode(w, &response) - logger.Response(service.Name, response, response.ReturnCode, err) + logger.Response(opName, response, response.ReturnCode, err) return } switch r.Method { @@ -1113,6 +1130,7 @@ func (service *HTTPRestService) EndpointHandlerAPI(w http.ResponseWriter, r *htt // GetEndpointHandler handles the incoming GetEndpoint requests with http Get method func (service *HTTPRestService) GetEndpointHandler(w http.ResponseWriter, r *http.Request) { + opName := "getEndpointState" logger.Printf("[GetEndpointState] GetEndpoint for %s", r.URL.Path) endpointID := strings.TrimPrefix(r.URL.Path, cns.EndpointPath) endpointInfo, err := service.GetEndpointHelper(endpointID) @@ -1134,7 +1152,7 @@ func (service *HTTPRestService) GetEndpointHandler(w http.ResponseWriter, r *htt } w.Header().Set(cnsReturnCode, response.Response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.Response.ReturnCode, err) + logger.Response(opName, response, response.Response.ReturnCode, err) return } response := GetEndpointResponse{ @@ -1146,7 +1164,7 @@ func (service *HTTPRestService) GetEndpointHandler(w http.ResponseWriter, r *htt } w.Header().Set(cnsReturnCode, response.Response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.Response.ReturnCode, err) + logger.Response(opName, response, response.Response.ReturnCode, err) } // GetEndpointHelper returns the state of the given endpointId @@ -1168,7 +1186,7 @@ func (service *HTTPRestService) GetEndpointHelper(endpointID string) (*EndpointI } else { logger.Errorf("[GetEndpointState] Failed to retrieve state, err:%v", err) } - return nil, errors.Wrap(err, "[GetEndpointState] Failed to retrieve state") + return nil, ErrEndpointStateNotFound } if endpointInfo, ok := service.EndpointState[endpointID]; ok { logger.Warnf("[GetEndpointState] Found existing endpoint state for container %s", endpointID) @@ -1187,12 +1205,13 @@ func (service *HTTPRestService) GetEndpointHelper(endpointID string) (*EndpointI // UpdateEndpointHandler handles the incoming UpdateEndpoint requests with http Patch method func (service *HTTPRestService) UpdateEndpointHandler(w http.ResponseWriter, r *http.Request) { + opName := "UpdateEndpointHandler" logger.Printf("[updateEndpoint] updateEndpoint for %s", r.URL.Path) var req map[string]*IPInfo err := common.Decode(w, r, &req) endpointID := strings.TrimPrefix(r.URL.Path, cns.EndpointPath) - logger.Request(service.Name, &req, err) + logger.Request(opName, &req, err) // Check if the request is valid if err != nil { response := cns.Response{ @@ -1201,7 +1220,7 @@ func (service *HTTPRestService) UpdateEndpointHandler(w http.ResponseWriter, r * } w.Header().Set(cnsReturnCode, response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.ReturnCode, err) + logger.Response(opName, response, response.ReturnCode, err) return } if err = verifyUpdateEndpointStateRequest(req); err != nil { @@ -1211,7 +1230,7 @@ func (service *HTTPRestService) UpdateEndpointHandler(w http.ResponseWriter, r * } w.Header().Set(cnsReturnCode, response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.ReturnCode, err) + logger.Response(opName, response, response.ReturnCode, err) return } // Update the endpoint state @@ -1223,7 +1242,7 @@ func (service *HTTPRestService) UpdateEndpointHandler(w http.ResponseWriter, r * } w.Header().Set(cnsReturnCode, response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.ReturnCode, err) + logger.Response(opName, response, response.ReturnCode, err) return } response := cns.Response{ @@ -1232,7 +1251,7 @@ func (service *HTTPRestService) UpdateEndpointHandler(w http.ResponseWriter, r * } w.Header().Set(cnsReturnCode, response.ReturnCode.String()) err = common.Encode(w, &response) - logger.Response(service.Name, response, response.ReturnCode, err) + logger.Response(opName, response, response.ReturnCode, err) } // UpdateEndpointHelper updates the state of the given endpointId with HNSId, VethName or other InterfaceInfo fields @@ -1241,20 +1260,25 @@ func (service *HTTPRestService) UpdateEndpointHelper(endpointID string, req map[ return ErrStoreEmpty } logger.Printf("[updateEndpoint] Updating endpoint state for infra container %s", endpointID) - if endpointInfo, ok := service.EndpointState[endpointID]; ok { - // Updating the InterfaceInfo map of endpoint states with the interfaceInfo map that is given by Stateless Azure CNI - for ifName, interfaceInfo := range req { - // updating the ipInfoMap - updateIPInfoMap(endpointInfo.IfnameToIPMap, interfaceInfo, ifName, endpointID) - } - err := service.EndpointStateStore.Write(EndpointStoreKey, service.EndpointState) - if err != nil { - return fmt.Errorf("[updateEndpoint] failed to write endpoint state to store for pod %s : %w", endpointInfo.PodName, err) - } - logger.Printf("[updateEndpoint] successfully write the state to the file %s", endpointID) - return nil + endpointInfo, endpointExist := service.EndpointState[endpointID] + // create a new entry in case the ednpoint does not exist in the statefile. + // this applies to the ACI scenario when the endpoint is not added to the statefile when the goalstate is sent to CNI + if !endpointExist { + logger.Printf("[updateEndpoint] endpoint could not be found in the statefile %s, new entry is being added", endpointID) + endpointInfo = &EndpointInfo{PodName: "", PodNamespace: "", IfnameToIPMap: make(map[string]*IPInfo)} + service.EndpointState[endpointID] = endpointInfo + } + // updating the InterfaceInfo map of endpoint states with the interfaceInfo map that is given by Stateless Azure CNI + for ifName, interfaceInfo := range req { + // updating the ipInfoMap + updateIPInfoMap(endpointInfo.IfnameToIPMap, interfaceInfo, ifName, endpointID) + } + err := service.EndpointStateStore.Write(EndpointStoreKey, service.EndpointState) + if err != nil { + return fmt.Errorf("[updateEndpoint] failed to write endpoint state to store for pod %s : %w", endpointInfo.PodName, err) } - return errors.New("[updateEndpoint] endpoint could not be found in the statefile") + logger.Printf("[updateEndpoint] successfully write the state to the file %s", endpointID) + return nil } // updateIPInfoMap updates the IfnameToIPMap of endpoint states with the interfaceInfo map that is given by Stateless Azure CNI diff --git a/cns/restserver/ipam_test.go b/cns/restserver/ipam_test.go index 0d9b7f5024..52c5b6d0d6 100644 --- a/cns/restserver/ipam_test.go +++ b/cns/restserver/ipam_test.go @@ -2,6 +2,7 @@ package restserver import ( "context" + "encoding/json" "fmt" "net" "net/netip" @@ -15,9 +16,11 @@ import ( "github.com/Azure/azure-container-networking/cns/middlewares" "github.com/Azure/azure-container-networking/cns/middlewares/mock" "github.com/Azure/azure-container-networking/cns/types" + nma "github.com/Azure/azure-container-networking/nmagent" "github.com/Azure/azure-container-networking/store" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -70,13 +73,13 @@ type ncState struct { ips []string } -func getTestService() *HTTPRestService { +func getTestService(orchestratorType string) *HTTPRestService { var config common.ServiceConfig httpsvc, _ := NewHTTPRestService(&config, &fakes.WireserverClientFake{}, &fakes.WireserverProxyFake{}, - &fakes.NMAgentClientFake{}, store.NewMockStore(""), nil, nil, + &IPtablesProvider{}, &fakes.NMAgentClientFake{}, store.NewMockStore(""), nil, nil, fakes.NewMockIMDSClient()) svc = httpsvc - setOrchestratorTypeInternal(cns.KubernetesCRD) + setOrchestratorTypeInternal(orchestratorType) return httpsvc } @@ -88,7 +91,7 @@ func newSecondaryIPConfig(ipAddress string, ncVersion int) cns.SecondaryIPConfig } } -func NewPodState(ipaddress, id, ncid string, state types.IPState, ncVersion int) cns.IPConfigurationStatus { +func newPodState(ipaddress, id, ncid string, state types.IPState, ncVersion int) cns.IPConfigurationStatus { //nolint:unparam // ignore unused parameter ipconfig := newSecondaryIPConfig(ipaddress, ncVersion) status := &cns.IPConfigurationStatus{ IPAddress: ipconfig.IPAddress, @@ -116,7 +119,7 @@ func requestIPAddressAndGetState(t *testing.T, req cns.IPConfigsRequest) ([]cns. } // retrieve podinfo from orchestrator context - podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext) + podInfo, err := cns.NewPodInfoFromIPConfigsRequest(req) if err != nil { return []cns.IPConfigurationStatus{}, errors.Wrap(err, "failed to unmarshal pod info") } @@ -128,7 +131,8 @@ func requestIPAddressAndGetState(t *testing.T, req cns.IPConfigsRequest) ([]cns. return ipConfigStatus, nil } -func NewPodStateWithOrchestratorContext(ipaddress, id, ncid string, state types.IPState, prefixLength uint8, ncVersion int, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { +// nolint:unparam // ignore unused inputs +func newPodStateWithOrchestratorContext(ipaddress, id, ncid string, state types.IPState, _ uint8, ncVersion int, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { ipconfig := newSecondaryIPConfig(ipaddress, ncVersion) status := &cns.IPConfigurationStatus{ IPAddress: ipconfig.IPAddress, @@ -141,7 +145,8 @@ func NewPodStateWithOrchestratorContext(ipaddress, id, ncid string, state types. } // Test function to populate the IPConfigState -func UpdatePodIPConfigState(t *testing.T, svc *HTTPRestService, ipconfigs map[string]cns.IPConfigurationStatus, ncID string) error { +// nolint: unparam // ignore unused return +func updatePodIPConfigState(t *testing.T, svc *HTTPRestService, ipconfigs map[string]cns.IPConfigurationStatus, ncID string) error { // Create the NC secondaryIPConfigs := make(map[string]cns.SecondaryIPConfig) // Get each of the ipconfigs associated with that NC @@ -183,7 +188,7 @@ func TestEndpointStateReadAndWriteSingleNC(t *testing.T) { }, }, } - EndpointStateReadAndWrite(t, ncStates) + endpointStateReadAndWrite(t, ncStates) } // create an endpoint with one IP from each NC @@ -202,17 +207,17 @@ func TestEndpointStateReadAndWriteMultipleNCs(t *testing.T) { }, }, } - EndpointStateReadAndWrite(t, ncStates) + endpointStateReadAndWrite(t, ncStates) } // Tests the creation of an endpoint using the NCs and IPs as input and then tests the deletion of that endpoint -func EndpointStateReadAndWrite(t *testing.T, ncStates []ncState) { - svc := getTestService() +func endpointStateReadAndWrite(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) for i := range ncStates { - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail update service with config: %+v", err) } @@ -286,7 +291,7 @@ func TestIPAMGetAvailableIPConfigSingleNC(t *testing.T) { }, }, } - IPAMGetAvailableIPConfig(t, ncStates) + ipamGetAvailableIPConfig(t, ncStates) } // assign one IP per NC to the pod @@ -305,18 +310,18 @@ func TestIPAMGetAvailableIPConfigMultipleNCs(t *testing.T) { }, }, } - IPAMGetAvailableIPConfig(t, nsStates) + ipamGetAvailableIPConfig(t, nsStates) } // Add one IP per NC to the pool and request those IPs -func IPAMGetAvailableIPConfig(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamGetAvailableIPConfig(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -336,7 +341,7 @@ func IPAMGetAvailableIPConfig(t *testing.T, ncStates []ncState) { desiredState := make([]cns.IPConfigurationStatus, len(ncStates)) for i := range ncStates { - desiredState[i] = NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) + desiredState[i] = newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) desiredState[i].PodInfo = testPod1Info } @@ -364,7 +369,7 @@ func TestIPAMGetNextAvailableIPConfigSingleNC(t *testing.T) { }, }, } - IPAMGetNextAvailableIPConfig(t, ncStates) + getNextAvailableIPConfig(t, ncStates) } func TestIPAMGetNextAvailableIPConfigMultipleNCs(t *testing.T) { @@ -384,22 +389,22 @@ func TestIPAMGetNextAvailableIPConfigMultipleNCs(t *testing.T) { }, }, } - IPAMGetNextAvailableIPConfig(t, ncStates) + getNextAvailableIPConfig(t, ncStates) } // First IP is already assigned to a pod, want second IP -func IPAMGetNextAvailableIPConfig(t *testing.T, ncStates []ncState) { - svc := getTestService() +func getNextAvailableIPConfig(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // Add already assigned pod ip to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = append(svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()], ncStates[i].ips[0]) - state1, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) - state2 := NewPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) + state1, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state2 := newPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) ipconfigs[state1.ID] = state1 ipconfigs[state2.ID] = state2 - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -419,7 +424,7 @@ func IPAMGetNextAvailableIPConfig(t *testing.T, ncStates []ncState) { // want second available Pod IP State as first has been assigned desiredState := make([]cns.IPConfigurationStatus, len(ncStates)) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod2Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod2Info) desiredState[i] = state } @@ -446,7 +451,7 @@ func TestIPAMGetAlreadyAssignedIPConfigForSamePodSingleNC(t *testing.T) { }, }, } - IPAMGetAlreadyAssignedIPConfigForSamePod(t, ncStates) + ipamGetAlreadyAssignedIPConfigForSamePod(t, ncStates) } func TestIPAMGetAlreadyAssignedIPConfigForSamePodMultipleNCs(t *testing.T) { @@ -464,18 +469,18 @@ func TestIPAMGetAlreadyAssignedIPConfigForSamePodMultipleNCs(t *testing.T) { }, }, } - IPAMGetAlreadyAssignedIPConfigForSamePod(t, ncStates) + ipamGetAlreadyAssignedIPConfigForSamePod(t, ncStates) } -func IPAMGetAlreadyAssignedIPConfigForSamePod(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamGetAlreadyAssignedIPConfigForSamePod(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // Add Assigned Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -494,7 +499,7 @@ func IPAMGetAlreadyAssignedIPConfigForSamePod(t *testing.T, ncStates []ncState) } desiredState := make([]cns.IPConfigurationStatus, len(ncStates)) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) desiredState[i] = state } @@ -522,7 +527,7 @@ func TestIPAMAttemptToRequestIPNotFoundInPoolSingleNC(t *testing.T) { }, }, } - IPAMAttemptToRequestIPNotFoundInPool(t, ncStates) + ipamAttemptToRequestIPNotFoundInPool(t, ncStates) } func TestIPAMAttemptToRequestIPNotFoundInPoolMultipleNCs(t *testing.T) { @@ -542,18 +547,18 @@ func TestIPAMAttemptToRequestIPNotFoundInPoolMultipleNCs(t *testing.T) { }, }, } - IPAMAttemptToRequestIPNotFoundInPool(t, ncStates) + ipamAttemptToRequestIPNotFoundInPool(t, ncStates) } -func IPAMAttemptToRequestIPNotFoundInPool(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamAttemptToRequestIPNotFoundInPool(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -584,7 +589,7 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIPSingleNC(t *testing.T) { }, }, } - IPAMGetDesiredIPConfigWithSpecfiedIP(t, ncStates) + ipamGetDesiredIPConfigWithSpecfiedIP(t, ncStates) } func TestIPAMGetDesiredIPConfigWithSpecfiedIPMultipleNCs(t *testing.T) { @@ -602,18 +607,18 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIPMultipleNCs(t *testing.T) { }, }, } - IPAMGetDesiredIPConfigWithSpecfiedIP(t, ncStates) + ipamGetDesiredIPConfigWithSpecfiedIP(t, ncStates) } -func IPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamGetDesiredIPConfigWithSpecfiedIP(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -636,7 +641,7 @@ func IPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T, ncStates []ncState) { desiredState := make([]cns.IPConfigurationStatus, len(ncStates)) for i := range ncStates { - desiredState[i] = NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) + desiredState[i] = newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) desiredState[i].PodInfo = testPod1Info } @@ -663,7 +668,7 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIPSingleNC(t *te }, }, } - IPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t, ncStates) + ipamFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t, ncStates) } func TestIPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIPMultipleNCs(t *testing.T) { @@ -681,18 +686,18 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIPMultipleNCs(t }, }, } - IPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t, ncStates) + ipamFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t, ncStates) } -func IPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // set state as already assigned ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -725,7 +730,7 @@ func TestIPAMFailToGetIPWhenAllIPsAreAssignedSingleNC(t *testing.T) { }, }, } - IPAMFailToGetIPWhenAllIPsAreAssigned(t, ncStates) + ipamFailToGetIPWhenAllIPsAreAssigned(t, ncStates) } func TestIPAMFailToGetIPWhenAllIPsAreAssignedMultipleNCs(t *testing.T) { @@ -745,20 +750,20 @@ func TestIPAMFailToGetIPWhenAllIPsAreAssignedMultipleNCs(t *testing.T) { }, }, } - IPAMFailToGetIPWhenAllIPsAreAssigned(t, ncStates) + ipamFailToGetIPWhenAllIPsAreAssigned(t, ncStates) } -func IPAMFailToGetIPWhenAllIPsAreAssigned(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamFailToGetIPWhenAllIPsAreAssigned(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) // Add already assigned pod ip to state for i := range ncStates { - state1, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) - state2, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod2Info) + state1, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state2, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod2Info) ipconfigs[state1.ID] = state1 ipconfigs[state2.ID] = state2 - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -784,7 +789,7 @@ func TestIPAMRequestThenReleaseThenRequestAgainSingleNC(t *testing.T) { }, }, } - IPAMRequestThenReleaseThenRequestAgain(t, ncStates) + ipamRequestThenReleaseThenRequestAgain(t, ncStates) } func TestIPAMRequestThenReleaseThenRequestAgainMultipleNCs(t *testing.T) { @@ -802,22 +807,22 @@ func TestIPAMRequestThenReleaseThenRequestAgainMultipleNCs(t *testing.T) { }, }, } - IPAMRequestThenReleaseThenRequestAgain(t, ncStates) + ipamRequestThenReleaseThenRequestAgain(t, ncStates) } // 10.0.0.1 = PodInfo1 // Request 10.0.0.1 with PodInfo2 (Fail) // Release PodInfo1 // Request 10.0.0.1 with PodInfo2 (Success) -func IPAMRequestThenReleaseThenRequestAgain(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamRequestThenReleaseThenRequestAgain(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // set state as already assigned for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -863,7 +868,7 @@ func IPAMRequestThenReleaseThenRequestAgain(t *testing.T, ncStates []ncState) { desiredState := make([]cns.IPConfigurationStatus, len(ncStates)) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) // want first available Pod IP State desiredState[i] = state desiredState[i].IPAddress = ncStates[i].ips[0] @@ -893,7 +898,7 @@ func TestIPAMReleaseIPIdempotencySingleNC(t *testing.T) { }, }, } - IPAMReleaseIPIdempotency(t, ncStates) + ipamReleaseIPIdempotency(t, ncStates) } func TestIPAMReleaseIPIdempotencyMultipleNCs(t *testing.T) { @@ -911,17 +916,17 @@ func TestIPAMReleaseIPIdempotencyMultipleNCs(t *testing.T) { }, }, } - IPAMReleaseIPIdempotency(t, ncStates) + ipamReleaseIPIdempotency(t, ncStates) } -func IPAMReleaseIPIdempotency(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamReleaseIPIdempotency(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // set state as already assigned ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -949,7 +954,7 @@ func TestIPAMAllocateIPIdempotencySingleNC(t *testing.T) { }, }, } - IPAMAllocateIPIdempotency(t, ncStates) + ipamAllocateIPIdempotency(t, ncStates) } func TestIPAMAllocateIPIdempotencyMultipleNCs(t *testing.T) { @@ -967,22 +972,22 @@ func TestIPAMAllocateIPIdempotencyMultipleNCs(t *testing.T) { }, }, } - IPAMAllocateIPIdempotency(t, ncStates) + ipamAllocateIPIdempotency(t, ncStates) } -func IPAMAllocateIPIdempotency(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamAllocateIPIdempotency(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // set state as already assigned ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - err = UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err = updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1000,7 +1005,7 @@ func TestAvailableIPConfigsSingleNC(t *testing.T) { }, }, } - AvailableIPConfigs(t, ncStates) + availableIPConfigs(t, ncStates) } func TestAvailableIPConfigsMultipleNCs(t *testing.T) { @@ -1022,24 +1027,24 @@ func TestAvailableIPConfigsMultipleNCs(t *testing.T) { }, }, } - AvailableIPConfigs(t, ncStates) + availableIPConfigs(t, ncStates) } -func AvailableIPConfigs(t *testing.T, ncStates []ncState) { - svc := getTestService() +func availableIPConfigs(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) IDsToBeDeleted := make([]string, len(ncStates)) ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) // Add already assigned pod ip to state for i := range ncStates { - state1 := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) - state2 := NewPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) - state3 := NewPodState(ncStates[i].ips[2], ipIDs[i][2], ncStates[i].ncID, types.Available, 0) + state1 := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state2 := newPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) + state3 := newPodState(ncStates[i].ips[2], ipIDs[i][2], ncStates[i].ncID, types.Available, 0) IDsToBeDeleted[i] = state1.ID ipconfigs[state1.ID] = state1 ipconfigs[state2.ID] = state2 ipconfigs[state3.ID] = state3 - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1078,7 +1083,7 @@ func AvailableIPConfigs(t *testing.T, ncStates []ncState) { validateIpState(t, availableIps, desiredAvailableIps) for i := range ncStates { - desiredState := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) + desiredState := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, 0) desiredState.PodInfo = testPod1Info desiredAssignedIPConfigs[desiredState.ID] = desiredState } @@ -1117,7 +1122,7 @@ func TestIPAMMarkIPCountAsPendingSingleNC(t *testing.T) { }, }, } - IPAMMarkIPCountAsPending(t, ncStates) + ipamMarkIPCountAsPending(t, ncStates) } func TestIPAMMarkIPCountAsPendingMultipleNCs(t *testing.T) { @@ -1135,17 +1140,17 @@ func TestIPAMMarkIPCountAsPendingMultipleNCs(t *testing.T) { }, }, } - IPAMMarkIPCountAsPending(t, ncStates) + ipamMarkIPCountAsPending(t, ncStates) } -func IPAMMarkIPCountAsPending(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamMarkIPCountAsPending(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // set state as already assigned ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) for i := range ncStates { - state, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, prefixes[i], 0, testPod1Info) + state, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, prefixes[i], 0, testPod1Info) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1188,7 +1193,7 @@ func IPAMMarkIPCountAsPending(t *testing.T, ncStates []ncState) { } func TestIPAMMarkIPAsPendingWithPendingProgrammingIPs(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) secondaryIPConfigs := make(map[string]cns.SecondaryIPConfig) // Default Programmed NC version is -1, set nc version as 0 will result in pending programming state. @@ -1277,7 +1282,7 @@ func TestIPAMMarkExistingIPConfigAsPendingSingleNC(t *testing.T) { }, }, } - IPAMMarkExistingIPConfigAsPending(t, ncStates) + ipamMarkExistingIPConfigAsPending(t, ncStates) } func TestIPAMMarkExistingIPConfigAsPendingMultipleNCs(t *testing.T) { @@ -1297,21 +1302,21 @@ func TestIPAMMarkExistingIPConfigAsPendingMultipleNCs(t *testing.T) { }, }, } - IPAMMarkExistingIPConfigAsPending(t, ncStates) + ipamMarkExistingIPConfigAsPending(t, ncStates) } -func IPAMMarkExistingIPConfigAsPending(t *testing.T, ncStates []ncState) { - svc := getTestService() +func ipamMarkExistingIPConfigAsPending(t *testing.T, ncStates []ncState) { + svc := getTestService(cns.KubernetesCRD) // Add already assigned pod ip to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = append(svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()], ncStates[i].ips[0]) - state1, _ := NewPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) - state2 := NewPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) + state1, _ := newPodStateWithOrchestratorContext(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Assigned, prefixes[i], 0, testPod1Info) + state2 := newPodState(ncStates[i].ips[1], ipIDs[i][1], ncStates[i].ncID, types.Available, 0) ipconfigs[state1.ID] = state1 ipconfigs[state2.ID] = state2 - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1363,7 +1368,7 @@ func IPAMMarkExistingIPConfigAsPending(t *testing.T, ncStates []ncState) { } func TestIPAMFailToRequestIPsWithNoNCsSpecificIP(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) req := cns.IPConfigsRequest{ PodInterfaceID: testPod1Info.InterfaceID(), InfraContainerID: testPod1Info.InfraContainerID(), @@ -1381,7 +1386,7 @@ func TestIPAMFailToRequestIPsWithNoNCsSpecificIP(t *testing.T) { } func TestIPAMFailToRequestIPsWithNoNCsAnyIP(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) req := cns.IPConfigsRequest{ PodInterfaceID: testPod1Info.InterfaceID(), InfraContainerID: testPod1Info.InfraContainerID(), @@ -1397,20 +1402,20 @@ func TestIPAMFailToRequestIPsWithNoNCsAnyIP(t *testing.T) { } func TestIPAMReleaseOneIPWhenExpectedToHaveTwo(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) // set state as already assigned - testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) + testState, _ := newPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } emptyIpconfigs := map[string]cns.IPConfigurationStatus{} - err := UpdatePodIPConfigState(t, svc, ipconfigs, testNCID) + err := updatePodIPConfigState(t, svc, ipconfigs, testNCID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - err = UpdatePodIPConfigState(t, svc, emptyIpconfigs, testNCIDv6) + err = updatePodIPConfigState(t, svc, emptyIpconfigs, testNCIDv6) if err != nil { t.Fatalf("Expected to not fail adding empty NC to state: %+v", err) } @@ -1427,21 +1432,21 @@ func TestIPAMReleaseOneIPWhenExpectedToHaveTwo(t *testing.T) { } func TestIPAMFailToRequestOneIPWhenExpectedToHaveTwo(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) // set state as already assigned - testState := NewPodState(testIP1, ipIDs[0][0], testNCID, types.Available, 0) + testState := newPodState(testIP1, ipIDs[0][0], testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } emptyIpconfigs := map[string]cns.IPConfigurationStatus{} - err := UpdatePodIPConfigState(t, svc, ipconfigs, testNCID) + err := updatePodIPConfigState(t, svc, ipconfigs, testNCID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - err = UpdatePodIPConfigState(t, svc, emptyIpconfigs, testNCIDv6) + err = updatePodIPConfigState(t, svc, emptyIpconfigs, testNCIDv6) if err != nil { t.Fatalf("Expected to not fail adding empty NC to state: %+v", err) } @@ -1463,23 +1468,23 @@ func TestIPAMFailToRequestOneIPWhenExpectedToHaveTwo(t *testing.T) { } func TestIPAMFailToReleasePartialIPsInPool(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) // set state as already assigned - testState, _ := NewPodStateWithOrchestratorContext(testIP1, testIPID1, testNCID, types.Assigned, 24, 0, testPod1Info) + testState, _ := newPodStateWithOrchestratorContext(testIP1, testIPID1, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } - testStatev6, _ := NewPodStateWithOrchestratorContext(testIP1v6, testIPID1v6, testNCIDv6, types.Assigned, 120, 0, testPod1Info) + testStatev6, _ := newPodStateWithOrchestratorContext(testIP1v6, testIPID1v6, testNCIDv6, types.Assigned, 120, 0, testPod1Info) ipconfigsv6 := map[string]cns.IPConfigurationStatus{ testStatev6.ID: testStatev6, } - err := UpdatePodIPConfigState(t, svc, ipconfigs, testNCID) + err := updatePodIPConfigState(t, svc, ipconfigs, testNCID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - err = UpdatePodIPConfigState(t, svc, ipconfigsv6, testNCIDv6) + err = updatePodIPConfigState(t, svc, ipconfigsv6, testNCIDv6) if err != nil { t.Fatalf("Expected to not fail adding empty NC to state: %+v", err) } @@ -1493,23 +1498,23 @@ func TestIPAMFailToReleasePartialIPsInPool(t *testing.T) { } func TestIPAMFailToRequestPartialIPsInPool(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) // set state as already assigned - testState := NewPodState(testIP1, testIPID1, testNCID, types.Available, 0) + testState := newPodState(testIP1, testIPID1, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } - testStatev6 := NewPodState(testIP1v6, testIPID1v6, testNCIDv6, types.Available, 0) + testStatev6 := newPodState(testIP1v6, testIPID1v6, testNCIDv6, types.Available, 0) ipconfigsv6 := map[string]cns.IPConfigurationStatus{ testStatev6.ID: testStatev6, } - err := UpdatePodIPConfigState(t, svc, ipconfigs, testNCID) + err := updatePodIPConfigState(t, svc, ipconfigs, testNCID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - err = UpdatePodIPConfigState(t, svc, ipconfigsv6, testNCIDv6) + err = updatePodIPConfigState(t, svc, ipconfigsv6, testNCIDv6) if err != nil { t.Fatalf("Expected to not fail adding empty NC to state: %+v", err) } @@ -1533,7 +1538,7 @@ func TestIPAMFailToRequestPartialIPsInPool(t *testing.T) { } func TestIPAMReleaseSWIFTV2PodIPSuccess(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) @@ -1559,9 +1564,9 @@ func TestIPAMReleaseSWIFTV2PodIPSuccess(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1584,7 +1589,7 @@ func TestIPAMReleaseSWIFTV2PodIPSuccess(t *testing.T) { } func TestIPAMGetK8sSWIFTv2IPSuccess(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) @@ -1610,9 +1615,9 @@ func TestIPAMGetK8sSWIFTv2IPSuccess(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1647,7 +1652,7 @@ func TestIPAMGetK8sSWIFTv2IPSuccess(t *testing.T) { } func TestIPAMGetK8sSWIFTv2IPFailure(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) ncStates := []ncState{ @@ -1667,9 +1672,9 @@ func TestIPAMGetK8sSWIFTv2IPFailure(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1717,7 +1722,7 @@ func TestIPAMGetK8sSWIFTv2IPFailure(t *testing.T) { } func TestIPAMGetK8sInfinibandSuccess(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) updatePnpIDMacAddressState(svc) @@ -1744,9 +1749,9 @@ func TestIPAMGetK8sInfinibandSuccess(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1782,7 +1787,7 @@ func TestIPAMGetK8sInfinibandSuccess(t *testing.T) { // Test intednd to check for on single backend nic without the delegaed nic func TestIPAMGetK8sInfinibandSuccessOneNic(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) updatePnpIDMacAddressState(svc) @@ -1809,9 +1814,9 @@ func TestIPAMGetK8sInfinibandSuccessOneNic(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1842,7 +1847,7 @@ func TestIPAMGetK8sInfinibandSuccessOneNic(t *testing.T) { } func TestIPAMGetK8sInfinibandFailure(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) middleware := middlewares.K8sSWIFTv2Middleware{Cli: mock.NewClient()} svc.AttachIPConfigsHandlerMiddleware(&middleware) updatePnpIDMacAddressState(svc) @@ -1869,9 +1874,9 @@ func TestIPAMGetK8sInfinibandFailure(t *testing.T) { // Add Available Pod IP to state for i := range ncStates { ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) - state := NewPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) ipconfigs[state.ID] = state - err := UpdatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) if err != nil { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } @@ -1893,3 +1898,386 @@ func TestIPAMGetK8sInfinibandFailure(t *testing.T) { t.Fatalf("Expected failing requesting IPs due to not able to set routes") } } + +func TestIPAMGetStandaloneSWIFTv2(t *testing.T) { + svc := getTestService(cns.ServiceFabric) + middleware := middlewares.StandaloneSWIFTv2Middleware{} + svc.AttachIPConfigsHandlerMiddleware(&middleware) + + orchestratorContext, _ := testPod1Info.OrchestratorContext() + mockMACAddress := "00:00:00:00:00:00" + mockGatewayIP := "10.0.0.1" // from mock wireserver gateway calculation on host subnet + + tt := []struct { + name string + req cns.IPConfigsRequest + mockNMAgent *fakes.NMAgentClientFake + expectedResponse *cns.IPConfigsResponse + }{ + { + name: "Successful single IPAM for Standalone SwiftV2 pod, when NMAgent returns error for GetNCVersionList", + req: cns.IPConfigsRequest{ + DesiredIPAddresses: []string{testIP1}, + OrchestratorContext: orchestratorContext, + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + }, + mockNMAgent: &fakes.NMAgentClientFake{ + GetNCVersionListF: func(_ context.Context) (nma.NCVersionList, error) { + // NMAgent returns an error, eg. NC is not programmed + return nma.NCVersionList{ + Containers: []nma.NCVersion{}, + }, errors.New("any NMAgent error") + }, + }, + expectedResponse: &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.Success, + }, + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: testIP1, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: testIP1, + }, + GatewayIPAddress: mockGatewayIP, + }, + MacAddress: mockMACAddress, + NICType: cns.DelegatedVMNIC, + HostPrimaryIPInfo: cns.HostIPInfo{ + Gateway: mockGatewayIP, + PrimaryIP: fakes.HostPrimaryIP, + Subnet: fakes.HostSubnet, + }, + }, + }, + }, + }, + { + name: "Successful single IPAM for Standalone SwiftV2 pod, when NMAgent returns empty response and no error for GetNCVersionList", + req: cns.IPConfigsRequest{ + DesiredIPAddresses: []string{testIP1}, + OrchestratorContext: orchestratorContext, + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + }, + mockNMAgent: &fakes.NMAgentClientFake{ + GetNCVersionListF: func(_ context.Context) (nma.NCVersionList, error) { + // NMAgent returns an empty response with no error + return nma.NCVersionList{ + Containers: []nma.NCVersion{}, + }, nil + }, + }, + expectedResponse: &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.Success, + }, + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: testIP1, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: testIP1, + }, + GatewayIPAddress: mockGatewayIP, + }, + MacAddress: mockMACAddress, + NICType: cns.DelegatedVMNIC, + HostPrimaryIPInfo: cns.HostIPInfo{ + Gateway: mockGatewayIP, + PrimaryIP: fakes.HostPrimaryIP, + Subnet: fakes.HostSubnet, + }, + }, + }, + }, + }, + { + name: "Successful single IPAM for Standalone SwiftV2 pod, when NMAgent returns an NC for GetNCVersionList even if it's not programmed", + req: cns.IPConfigsRequest{ + DesiredIPAddresses: []string{testIP1}, + OrchestratorContext: orchestratorContext, + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + }, + mockNMAgent: &fakes.NMAgentClientFake{ + GetNCVersionListF: func(_ context.Context) (nma.NCVersionList, error) { + // NMAgent returns an NC even if it's not programmed + return nma.NCVersionList{ + Containers: []nma.NCVersion{ + { + NetworkContainerID: testNCID, + Version: "0", + }, + }, + }, nil + }, + }, + expectedResponse: &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.Success, + }, + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: testIP1, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: testIP1, + }, + GatewayIPAddress: mockGatewayIP, + }, + MacAddress: mockMACAddress, + NICType: cns.DelegatedVMNIC, + HostPrimaryIPInfo: cns.HostIPInfo{ + Gateway: mockGatewayIP, + PrimaryIP: fakes.HostPrimaryIP, + Subnet: fakes.HostSubnet, + }, + }, + }, + }, + }, + { + name: "Fail validation when orchestrator context can't be unmarshalled", + req: cns.IPConfigsRequest{ + DesiredIPAddresses: []string{testIP1}, + OrchestratorContext: json.RawMessage("invalid"), + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + }, + expectedResponse: &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.UnsupportedOrchestratorContext, + }, + }, + }, + { + name: "Fail validation when orchestrator context is nil", + req: cns.IPConfigsRequest{ + DesiredIPAddresses: []string{testIP1}, + OrchestratorContext: nil, + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + }, + expectedResponse: &cns.IPConfigsResponse{ + Response: cns.Response{ + ReturnCode: types.EmptyOrchestratorContext, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + // setup CNS state with SwiftV2 NC + createAndSaveMockNCRequest(t, svc, testNCID, orchestratorContext, tc.req.DesiredIPAddresses[0], mockGatewayIP, mockMACAddress) + + // IMPORTANT: although SwiftV2 reuses the concept of NCs, NMAgent doesn't program NCs for SwiftV2, but + // instead programs NICs. When getting SwiftV2 NCs, we want the NIC type and MAC address of the NCs. + // TODO: we need another way to verify and sync NMAgent's NIC programming status. currently pending a new NMAgent API or NIC programming status to be passed in the SwiftV2 create NC request. + setupMockNMAgent(t, svc, tc.mockNMAgent) + + // invoke the SwiftV2 IPAM wrapper handler with the standalone SwiftV2 middleware + wrappedHandler := svc.IPConfigsHandlerMiddleware.IPConfigsRequestHandlerWrapper(svc.requestIPConfigHandlerHelperStandalone, nil) + resp, err := wrappedHandler(context.TODO(), tc.req) + + if tc.expectedResponse.Response.ReturnCode == types.Success { + require.NoError(t, err) + + // assert CNS response code + require.Equal(t, tc.expectedResponse.Response.ReturnCode, resp.Response.ReturnCode) + + expectedPodIPInfo := tc.expectedResponse.PodIPInfo + actualPodIPInfo := resp.PodIPInfo + + for i, expected := range expectedPodIPInfo { + // assert SwiftV2 IP is returned + assert.Len(t, actualPodIPInfo, len(tc.req.DesiredIPAddresses), "Expected list of IPs returned matches the number of desired IPs from CNI IPAM request") + assert.Equal(t, expected.PodIPConfig.IPAddress, actualPodIPInfo[i].PodIPConfig.IPAddress) + assert.Equal(t, expected.MacAddress, actualPodIPInfo[i].MacAddress) + assert.Equal(t, expected.NICType, actualPodIPInfo[i].NICType) + + // assert that PodIPInfo contains interface information + assert.Equal(t, expected.HostPrimaryIPInfo.Gateway, actualPodIPInfo[i].HostPrimaryIPInfo.Gateway) + assert.Equal(t, expected.HostPrimaryIPInfo.PrimaryIP, actualPodIPInfo[i].HostPrimaryIPInfo.PrimaryIP) + assert.Equal(t, expected.HostPrimaryIPInfo.Subnet, actualPodIPInfo[i].HostPrimaryIPInfo.Subnet) + } + } else { + require.Error(t, err) + assert.Equal(t, tc.expectedResponse.Response.ReturnCode, resp.Response.ReturnCode) + } + }) + } +} + +func setupMockNMAgent(t *testing.T, svc *HTTPRestService, mockNMAgent *fakes.NMAgentClientFake) { + t.Helper() + t.Log("Started mock NMAgent") + cleanupNMAgentMock := setMockNMAgent(svc, mockNMAgent) + t.Cleanup(func() { + cleanupNMAgentMock() + t.Log("Stopped mock NMAgent") + }) +} + +func createAndSaveMockNCRequest(t *testing.T, svc *HTTPRestService, ncID string, orchestratorContext json.RawMessage, desiredIP, mockGatewayIP, mockMACAddress string) { + t.Helper() + + createNCReq := &cns.CreateNetworkContainerRequest{ + NetworkContainerType: "Docker", + NetworkContainerid: ncID, + OrchestratorContext: orchestratorContext, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: desiredIP, + PrefixLength: ipPrefixBitsv4, + }, + GatewayIPAddress: mockGatewayIP, + }, + // SwiftV2 NIC info + NetworkInterfaceInfo: cns.NetworkInterfaceInfo{ + NICType: cns.DelegatedVMNIC, + MACAddress: mockMACAddress, + }, + } + err := createNCReq.Validate() + require.NoError(t, err) + + // save SwiftV2 NC state in CNS + returnCode, returnMessage := svc.saveNetworkContainerGoalState(*createNCReq) + require.Equal(t, types.Success, returnCode) + require.Empty(t, returnMessage) +} + +// Validate Statefile in Stateless CNI scenarios +func TestStatelessCNIStateFile(t *testing.T) { + svc := getTestService(cns.KubernetesCRD) + svc.EndpointStateStore = store.NewMockStore("") + // test Case 1 - AKS SIngleTenancy + endpointInfo1ContainerID := "0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea" + endpointInfo1 := &EndpointInfo{IfnameToIPMap: make(map[string]*IPInfo)} + endpointInfo1.IfnameToIPMap["eth0"] = &IPInfo{IPv4: []net.IPNet{{IP: net.IPv4(10, 0, 0, 1), Mask: net.IPv4Mask(255, 255, 255, 0)}}} + req1 := make(map[string]*IPInfo) + req1["eth0"] = &IPInfo{IPv4: []net.IPNet{{IP: net.IPv4(10, 0, 0, 1), Mask: net.IPv4Mask(255, 255, 255, 0)}}, HnsEndpointID: "5c15cccc-830a-4dff-81f3-4b1e55cb7dcb", NICType: cns.InfraNIC} + testPod1Info = cns.NewPodInfo(endpointInfo1ContainerID, endpointInfo1ContainerID, "pod1", "default") + req := cns.IPConfigsRequest{ + PodInterfaceID: testPod1Info.InterfaceID(), + InfraContainerID: testPod1Info.InfraContainerID(), + } + // test Case 2 - ACI + endpointInfo2ContainerID := "1b4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea" + endpointInfo2 := &EndpointInfo{IfnameToIPMap: make(map[string]*IPInfo)} + endpointInfo2.IfnameToIPMap["eth2"] = &IPInfo{ + IPv4: nil, + NICType: cns.DelegatedVMNIC, + HnsEndpointID: "5c15cccc-830a-4dff-81f3-4b1e55cb7dcb", + HnsNetworkID: "5c0712cd-824c-4898-b1c0-2fcb16ede4fb", + MacAddress: "7c:1e:52:06:d3:4b", + } + // test cases + tests := []struct { + name string + endpointID string + req map[string]*IPInfo + store store.KeyValueStore + want *EndpointInfo + wantErr bool + }{ + { + name: "single-tenancy: update endpoint without error", + endpointID: endpointInfo1ContainerID, + req: req1, + store: svc.EndpointStateStore, + want: &EndpointInfo{ + PodName: "pod1", PodNamespace: "default", IfnameToIPMap: map[string]*IPInfo{ + "eth0": { + IPv4: []net.IPNet{{IP: net.IPv4(10, 0, 0, 1), Mask: net.IPv4Mask(255, 255, 255, 0)}}, + HnsEndpointID: "5c15cccc-830a-4dff-81f3-4b1e55cb7dcb", + NICType: cns.InfraNIC, + }, + }, + }, + wantErr: false, + }, + { + name: "ACI: update and create absent endpoint without error", + endpointID: endpointInfo2ContainerID, + req: endpointInfo2.IfnameToIPMap, + store: svc.EndpointStateStore, + want: endpointInfo2, + wantErr: false, + }, + } + ncStates := []ncState{ + { + ncID: testNCID, + ips: []string{ + testIP1, + }, + }, + } + + ipconfigs := make(map[string]cns.IPConfigurationStatus, 0) + for i := range ncStates { + state := newPodState(ncStates[i].ips[0], ipIDs[i][0], ncStates[i].ncID, types.Available, 0) + ipconfigs[state.ID] = state + err := updatePodIPConfigState(t, svc, ipconfigs, ncStates[i].ncID) + if err != nil { + t.Fatalf("Expected to not fail update service with config: %+v", err) + } + } + t.Log(ipconfigs) + b, _ := testPod1Info.OrchestratorContext() + req.OrchestratorContext = b + req.Ifname = "eth0" + podIPInfo, err := requestIPConfigsHelper(svc, req) + if err != nil { + t.Fatalf("Expected to not fail getting pod ip info: %+v", err) + } + + ipInfo := &IPInfo{} + for i := range podIPInfo { + ip, ipnet, errIP := net.ParseCIDR(podIPInfo[i].PodIPConfig.IPAddress + "/" + strconv.FormatUint(uint64(podIPInfo[i].PodIPConfig.PrefixLength), 10)) + if errIP != nil { + t.Fatalf("failed to parse pod ip address: %+v", errIP) + } + ipconfig := net.IPNet{IP: ip, Mask: ipnet.Mask} + if ip.To4() == nil { // is an ipv6 address + ipInfo.IPv6 = append(ipInfo.IPv6, ipconfig) + } else { + ipInfo.IPv4 = append(ipInfo.IPv4, ipconfig) + } + } + + // add goalState + err = svc.updateEndpointState(req, testPod1Info, podIPInfo) + if err != nil { + t.Fatalf("Expected to not fail updating endpoint state: %+v", err) + } + // update State + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + err := svc.UpdateEndpointHelper(tt.endpointID, tt.req) + if tt.wantErr { + assert.Error(t, err) + return + } + got, err := svc.GetEndpointHelper(tt.endpointID) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/cns/restserver/ipusage.go b/cns/restserver/ipusage.go deleted file mode 100644 index e2ae30382b..0000000000 --- a/cns/restserver/ipusage.go +++ /dev/null @@ -1,58 +0,0 @@ -package restserver - -import ( - "github.com/Azure/azure-container-networking/cns/logger" - "github.com/Azure/azure-container-networking/cns/types" -) - -type ipState struct { - // allocatedIPs are all the IPs given to CNS by DNC. - allocatedIPs int64 - // assignedIPs are the IPs CNS gives to Pods. - assignedIPs int64 - // availableIPs are the IPs in state "Available". - availableIPs int64 - // programmingIPs are the IPs in state "PendingProgramming". - programmingIPs int64 - // releasingIPs are the IPs in state "PendingReleasr". - releasingIPs int64 -} - -func (service *HTTPRestService) buildIPState() *ipState { - service.Lock() - defer service.Unlock() - - state := ipState{ - allocatedIPs: 0, - assignedIPs: 0, - availableIPs: 0, - programmingIPs: 0, - releasingIPs: 0, - } - - //nolint:gocritic // This has to iterate over the IP Config state to get the counts. - for _, ipConfig := range service.PodIPConfigState { - state.allocatedIPs++ - if ipConfig.GetState() == types.Assigned { - state.assignedIPs++ - } - if ipConfig.GetState() == types.Available { - state.availableIPs++ - } - if ipConfig.GetState() == types.PendingProgramming { - state.programmingIPs++ - } - if ipConfig.GetState() == types.PendingRelease { - state.releasingIPs++ - } - } - - logger.Printf("[IP Usage] Allocated IPs: %d, Assigned IPs: %d, Available IPs: %d, PendingProgramming IPs: %d, PendingRelease IPs: %d", - state.allocatedIPs, - state.assignedIPs, - state.availableIPs, - state.programmingIPs, - state.releasingIPs, - ) - return &state -} diff --git a/cns/restserver/metrics.go b/cns/restserver/metrics.go index 7b9068f83f..b4c0dae8dd 100644 --- a/cns/restserver/metrics.go +++ b/cns/restserver/metrics.go @@ -1,10 +1,13 @@ package restserver import ( + "maps" "net/http" + "sync" "time" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/types" "github.com/prometheus/client_golang/prometheus" "sigs.k8s.io/controller-runtime/pkg/metrics" @@ -62,6 +65,12 @@ var ( }, []string{"ok"}, ) + hasNC = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "has_networkcontainer", + Help: "Number of Network Containers retrieved from NMA", + }, + ) allocatedIPCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "cx_allocated_ips_v2", @@ -111,6 +120,7 @@ func init() { ipConfigStatusStateTransitionTime, syncHostNCVersionCount, syncHostNCVersionLatency, + hasNC, allocatedIPCount, assignedIPCount, availableIPCount, @@ -122,7 +132,6 @@ func init() { // Every http response is 200 so we really want cns response code. // Hard tto do with middleware unless we derserialize the responses but making it an explit header works around it. // if that doesn't work we could have a separate countervec just for response codes. - func NewHandlerFuncWithHistogram(handler http.HandlerFunc, histogram *prometheus.HistogramVec) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { start := time.Now() @@ -142,11 +151,87 @@ func stateTransitionMiddleware(i *cns.IPConfigurationStatus, s types.IPState) { ipConfigStatusStateTransitionTime.WithLabelValues(string(i.GetState()), string(s)).Observe(time.Since(i.LastStateTransition).Seconds()) } -func publishIPStateMetrics(state *ipState) { - labels := []string{} // TODO. ragasthya Add dimensions to the IP Usage metrics. +type ipState struct { + // allocatedIPs are all the IPs given to CNS by DNC. + allocatedIPs int64 + // assignedIPs are the IPs CNS gives to Pods. + assignedIPs int64 + // availableIPs are the IPs in state "Available". + availableIPs int64 + // programmingIPs are the IPs in state "PendingProgramming". + programmingIPs int64 + // releasingIPs are the IPs in state "PendingReleasr". + releasingIPs int64 +} + +type asyncMetricsRecorder struct { + podIPConfigSrc func() map[string]cns.IPConfigurationStatus + sig chan struct{} + once sync.Once +} + +// singleton recorder +var recorder asyncMetricsRecorder + +// run starts the asyncMetricsRecorder and listens for signals to record the metrics. +func (a *asyncMetricsRecorder) run() { + for range a.sig { + a.record() + } +} + +// record records the IP Config state metrics to Prometheus. +func (a *asyncMetricsRecorder) record() { + var state ipState + for ipConfig := range maps.Values(a.podIPConfigSrc()) { + state.allocatedIPs++ + if ipConfig.GetState() == types.Assigned { + state.assignedIPs++ + } + if ipConfig.GetState() == types.Available { + state.availableIPs++ + } + if ipConfig.GetState() == types.PendingProgramming { + state.programmingIPs++ + } + if ipConfig.GetState() == types.PendingRelease { + state.releasingIPs++ + } + } + + logger.Printf("Allocated IPs: %d, Assigned IPs: %d, Available IPs: %d, PendingProgramming IPs: %d, PendingRelease IPs: %d", + state.allocatedIPs, + state.assignedIPs, + state.availableIPs, + state.programmingIPs, + state.releasingIPs, + ) + + labels := []string{} allocatedIPCount.WithLabelValues(labels...).Set(float64(state.allocatedIPs)) assignedIPCount.WithLabelValues(labels...).Set(float64(state.assignedIPs)) availableIPCount.WithLabelValues(labels...).Set(float64(state.availableIPs)) pendingProgrammingIPCount.WithLabelValues(labels...).Set(float64(state.programmingIPs)) pendingReleaseIPCount.WithLabelValues(labels...).Set(float64(state.releasingIPs)) } + +// publishIPStateMetrics logs and publishes the IP Config state metrics to Prometheus. +func (service *HTTPRestService) publishIPStateMetrics() { + recorder.once.Do(func() { + recorder.podIPConfigSrc = service.PodIPConfigStates + recorder.sig = make(chan struct{}) + go recorder.run() + }) + select { + case recorder.sig <- struct{}{}: // signal the recorder to record the metrics + default: // drop the signal if the recorder already has an event queued + } +} + +// PodIPConfigStates returns a clone of the IP Config State map. +func (service *HTTPRestService) PodIPConfigStates() map[string]cns.IPConfigurationStatus { + // copy state + service.RLock() + defer service.RUnlock() + return maps.Clone(service.PodIPConfigState) +} diff --git a/cns/restserver/nodesubnet.go b/cns/restserver/nodesubnet.go new file mode 100644 index 0000000000..177d4266bc --- /dev/null +++ b/cns/restserver/nodesubnet.go @@ -0,0 +1,64 @@ +package restserver + +import ( + "context" + "net/netip" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/logger" + nodesubnet "github.com/Azure/azure-container-networking/cns/nodesubnet" + "github.com/Azure/azure-container-networking/cns/types" + "github.com/pkg/errors" +) + +var _ nodesubnet.IPConsumer = &HTTPRestService{} + +// UpdateIPsForNodeSubnet updates the IP pool of HTTPRestService with newly fetched secondary IPs +func (service *HTTPRestService) UpdateIPsForNodeSubnet(secondaryIPs []netip.Addr) error { + secondaryIPStrs := make([]string, len(secondaryIPs)) + for i, ip := range secondaryIPs { + secondaryIPStrs[i] = ip.String() + } + + networkContainerRequest := nodesubnet.CreateNodeSubnetNCRequest(secondaryIPStrs) + + code, msg := service.saveNetworkContainerGoalState(*networkContainerRequest) + if code != types.Success { + return errors.Errorf("failed to save fetched ips. code: %d, message %s", code, msg) + } + + logger.Debugf("IP change processed successfully") + + // saved NC successfully. UpdateIPsForNodeSubnet is called only when IPs are fetched from NMAgent. + // We now have IPs to serve IPAM requests. Generate conflist to indicate CNS is ready + service.MustGenerateCNIConflistOnce() + return nil +} + +// InitializeNodeSubnet prepares CNS for serving NodeSubnet requests. +// It sets the orchestrator type to KubernetesCRD, reconciles the initial +// CNS state from the statefile, then creates an IP fetcher. +func (service *HTTPRestService) InitializeNodeSubnet(ctx context.Context, podInfoByIPProvider cns.PodInfoByIPProvider) error { + // set orchestrator type + orchestrator := cns.SetOrchestratorTypeRequest{ + OrchestratorType: cns.KubernetesCRD, + } + service.SetNodeOrchestrator(&orchestrator) + + if podInfoByIPProvider == nil { + logger.Printf("PodInfoByIPProvider is nil, this usually means no saved endpoint state. Skipping reconciliation") + } else if _, err := nodesubnet.ReconcileInitialCNSState(ctx, service, podInfoByIPProvider); err != nil { + return errors.Wrap(err, "reconcile initial CNS state") + } + // statefile (if any) is reconciled. Initialize the IP fetcher. Start the IP fetcher only after the service is started, + // because starting the IP fetcher will generate conflist, which should be done only once we are ready to respond to IPAM requests. + service.nodesubnetIPFetcher = nodesubnet.NewIPFetcher(service.nma, service, 0, 0, logger.Log) + + return nil +} + +// StartNodeSubnet starts the IP fetcher for NodeSubnet. This will cause secondary IPs to be fetched periodically. +// After the first successful fetch, conflist will be generated to indicate CNS is ready. +func (service *HTTPRestService) StartNodeSubnet(ctx context.Context) { + service.nodesubnetIPFetcher.Start(ctx) +} diff --git a/cns/restserver/nodesubnet_test.go b/cns/restserver/nodesubnet_test.go new file mode 100644 index 0000000000..361f5d005b --- /dev/null +++ b/cns/restserver/nodesubnet_test.go @@ -0,0 +1,144 @@ +package restserver_test + +import ( + "context" + "net" + "testing" + + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/restserver" + "github.com/Azure/azure-container-networking/cns/stateprovider/cns" + "github.com/Azure/azure-container-networking/cns/types" + "github.com/Azure/azure-container-networking/store" +) + +// getMockStore creates a mock KeyValueStore with some endpoint state +func getMockStore() store.KeyValueStore { + mockStore := store.NewMockStore("") + endpointState := map[string]*restserver.EndpointInfo{ + "12e65d89e58cb23c784e97840cf76866bfc9902089bdc8e87e9f64032e312b0b": { + PodName: "coredns-54b69f46b8-ldmwr", + PodNamespace: "kube-system", + IfnameToIPMap: map[string]*restserver.IPInfo{ + "eth0": { + IPv4: []net.IPNet{ + { + IP: net.IPv4(10, 0, 0, 52), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + }, + }, + "1fc5176913a3a1a7facfb823dde3b4ded404041134fef4f4a0c8bba140fc0413": { + PodName: "load-test-7f7d49687d-wxc9p", + PodNamespace: "load-test", + IfnameToIPMap: map[string]*restserver.IPInfo{ + "eth0": { + IPv4: []net.IPNet{ + { + IP: net.IPv4(10, 0, 0, 63), + Mask: net.CIDRMask(24, 32), + }, + }, + }, + }, + }, + } + + err := mockStore.Write(restserver.EndpointStoreKey, endpointState) + if err != nil { + return nil + } + return mockStore +} + +// Mock implementation of CNIConflistGenerator +type MockCNIConflistGenerator struct { + GenerateCalled chan struct{} +} + +func (m *MockCNIConflistGenerator) Generate() error { + close(m.GenerateCalled) + return nil +} + +func (m *MockCNIConflistGenerator) Close() error { + // Implement the Close method logic here if needed + return nil +} + +// TestNodeSubnet tests initialization of NodeSubnet with endpoint info, and verfies that +// the conflist is generated after fetching secondary IPs +func TestNodeSubnet(t *testing.T) { + podInfoByIPProvider, err := cns.New(getMockStore()) + if err != nil { + t.Fatalf("NewCNSPodInfoProvider returned an error: %v", err) + } + + // create a real HTTPRestService object + mockCNIConflistGenerator := &MockCNIConflistGenerator{ + GenerateCalled: make(chan struct{}), + } + service := restserver.GetRestServiceObjectForNodeSubnetTest(t, mockCNIConflistGenerator) + ctx, cancel := testContext(t) + defer cancel() + + err = service.InitializeNodeSubnet(ctx, podInfoByIPProvider) + if err != nil { + t.Fatalf("InitializeNodeSubnet returned an error: %v", err) + } + + expectedIPs := map[string]types.IPState{ + "10.0.0.52": types.Assigned, + "10.0.0.63": types.Assigned, + } + + checkIPassignment(t, service, expectedIPs) + + service.StartNodeSubnet(ctx) + + if service.GetNodesubnetIPFetcher() == nil { + t.Fatal("NodeSubnetIPFetcher is not initialized") + } + + select { + case <-ctx.Done(): + t.Errorf("test context done - %s", ctx.Err()) + return + case <-mockCNIConflistGenerator.GenerateCalled: + break + } + + expectedIPs["10.0.0.45"] = types.Available + checkIPassignment(t, service, expectedIPs) +} + +// checkIPassignment checks whether the IP assignment state in the HTTPRestService object matches expectation +func checkIPassignment(t *testing.T, service *restserver.HTTPRestService, expectedIPs map[string]types.IPState) { + if len(service.PodIPConfigState) != len(expectedIPs) { + t.Fatalf("expected 2 entries in PodIPConfigState, got %d", len(service.PodIPConfigState)) + } + + for ip := range service.GetPodIPConfigState() { + config := service.GetPodIPConfigState()[ip] + if assignmentState, exists := expectedIPs[ip]; !exists { + t.Fatalf("unexpected IP %s in PodIPConfigState", ip) + } else if config.GetState() != assignmentState { + t.Fatalf("expected state 'Assigned' for IP %s, got %s", ip, config.GetState()) + } + } +} + +// testContext creates a context from the provided testing.T that will be +// canceled if the test suite is terminated. +func testContext(t *testing.T) (context.Context, context.CancelFunc) { + if deadline, ok := t.Deadline(); ok { + return context.WithDeadline(context.Background(), deadline) + } + return context.WithCancel(context.Background()) +} + +func init() { + logger.InitLogger("testlogs", 0, 0, "./") +} diff --git a/cns/restserver/restserver.go b/cns/restserver/restserver.go index 1e39c9e8cf..c467ab04e2 100644 --- a/cns/restserver/restserver.go +++ b/cns/restserver/restserver.go @@ -11,9 +11,9 @@ import ( "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/common" "github.com/Azure/azure-container-networking/cns/dockerclient" - "github.com/Azure/azure-container-networking/cns/ipamclient" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/networkcontainers" + "github.com/Azure/azure-container-networking/cns/nodesubnet" "github.com/Azure/azure-container-networking/cns/routes" "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/cns/types/bounded" @@ -41,6 +41,7 @@ type nmagentClient interface { SupportedAPIs(context.Context) ([]string, error) GetNCVersionList(context.Context) (nma.NCVersionList, error) GetHomeAz(context.Context) (nma.AzResponse, error) + GetInterfaceIPInfo(ctx context.Context) (nma.Interfaces, error) } type wireserverProxy interface { @@ -53,12 +54,24 @@ type imdsClient interface { GetVMUniqueID(ctx context.Context) (string, error) } +type iptablesClient interface { + ChainExists(table string, chain string) (bool, error) + NewChain(table string, chain string) error + Append(table string, chain string, rulespec ...string) error + Exists(table string, chain string, rulespec ...string) (bool, error) + Insert(table string, chain string, pos int, rulespec ...string) error +} + +type iptablesGetter interface { + GetIPTables() (iptablesClient, error) +} + // HTTPRestService represents http listener for CNS - Container Networking Service. type HTTPRestService struct { *cns.Service dockerClient *dockerclient.Client wscli interfaceGetter - ipamClient *ipamclient.IpamClient + iptables iptablesGetter nma nmagentClient wsproxy wireserverProxy homeAzMonitor *HomeAzMonitor @@ -78,6 +91,7 @@ type HTTPRestService struct { IPConfigsHandlerMiddleware cns.IPConfigsHandlerMiddleware PnpIDByMacAddress map[string]string imdsClient imdsClient + nodesubnetIPFetcher *nodesubnet.IPFetcher } type CNIConflistGenerator interface { @@ -166,7 +180,7 @@ type networkInfo struct { } // NewHTTPRestService creates a new HTTP Service object. -func NewHTTPRestService(config *common.ServiceConfig, wscli interfaceGetter, wsproxy wireserverProxy, nmagentClient nmagentClient, +func NewHTTPRestService(config *common.ServiceConfig, wscli interfaceGetter, wsproxy wireserverProxy, iptg iptablesGetter, nmagentClient nmagentClient, endpointStateStore store.KeyValueStore, gen CNIConflistGenerator, homeAzMonitor *HomeAzMonitor, imdsClient imdsClient, ) (*HTTPRestService, error) { @@ -182,11 +196,6 @@ func NewHTTPRestService(config *common.ServiceConfig, wscli interfaceGetter, wsp return nil, err } - ic, err := ipamclient.NewIpamClient("") - if err != nil { - return nil, err - } - res, err := wscli.GetInterfaces(context.TODO()) // TODO(rbtr): thread context through this client if err != nil { return nil, errors.Wrap(err, "failed to get interfaces from IMDS") @@ -218,7 +227,7 @@ func NewHTTPRestService(config *common.ServiceConfig, wscli interfaceGetter, wsp store: service.Service.Store, dockerClient: dc, wscli: wscli, - ipamClient: ic, + iptables: iptg, nma: nmagentClient, wsproxy: wsproxy, networkContainer: nc, @@ -256,11 +265,7 @@ func (service *HTTPRestService) Init(config *common.ServiceConfig) error { listener.AddHandler(cns.SetEnvironmentPath, service.setEnvironment) listener.AddHandler(cns.CreateNetworkPath, service.createNetwork) listener.AddHandler(cns.DeleteNetworkPath, service.deleteNetwork) - listener.AddHandler(cns.ReserveIPAddressPath, service.reserveIPAddress) - listener.AddHandler(cns.ReleaseIPAddressPath, service.releaseIPAddress) listener.AddHandler(cns.GetHostLocalIPPath, service.getHostLocalIP) - listener.AddHandler(cns.GetIPAddressUtilizationPath, service.getIPAddressUtilization) - listener.AddHandler(cns.GetUnhealthyIPAddressesPath, service.getUnhealthyIPAddresses) listener.AddHandler(cns.CreateOrUpdateNetworkContainer, service.createOrUpdateNetworkContainer) listener.AddHandler(cns.DeleteNetworkContainer, service.deleteNetworkContainer) listener.AddHandler(cns.GetInterfaceForContainer, service.getInterfaceForContainer) @@ -287,20 +292,17 @@ func (service *HTTPRestService) Init(config *common.ServiceConfig) error { listener.AddHandler(cns.NetworkContainersURLPath, service.getOrRefreshNetworkContainers) listener.AddHandler(cns.GetHomeAz, service.getHomeAz) listener.AddHandler(cns.EndpointPath, service.EndpointHandlerAPI) - // This API is only needed for Direct channel mode with Swift v2. + // This API is only needed for Direct channel mode. if config.ChannelMode == cns.Direct { listener.AddHandler(cns.GetVMUniqueID, service.getVMUniqueID) + listener.AddHandler(cns.GetNCList, service.nmAgentNCListHandler) } // handlers for v0.2 listener.AddHandler(cns.V2Prefix+cns.SetEnvironmentPath, service.setEnvironment) listener.AddHandler(cns.V2Prefix+cns.CreateNetworkPath, service.createNetwork) listener.AddHandler(cns.V2Prefix+cns.DeleteNetworkPath, service.deleteNetwork) - listener.AddHandler(cns.V2Prefix+cns.ReserveIPAddressPath, service.reserveIPAddress) - listener.AddHandler(cns.V2Prefix+cns.ReleaseIPAddressPath, service.releaseIPAddress) listener.AddHandler(cns.V2Prefix+cns.GetHostLocalIPPath, service.getHostLocalIP) - listener.AddHandler(cns.V2Prefix+cns.GetIPAddressUtilizationPath, service.getIPAddressUtilization) - listener.AddHandler(cns.V2Prefix+cns.GetUnhealthyIPAddressesPath, service.getUnhealthyIPAddresses) listener.AddHandler(cns.V2Prefix+cns.CreateOrUpdateNetworkContainer, service.createOrUpdateNetworkContainer) listener.AddHandler(cns.V2Prefix+cns.DeleteNetworkContainer, service.deleteNetworkContainer) listener.AddHandler(cns.V2Prefix+cns.GetInterfaceForContainer, service.getInterfaceForContainer) @@ -317,9 +319,10 @@ func (service *HTTPRestService) Init(config *common.ServiceConfig) error { listener.AddHandler(cns.V2Prefix+cns.NmAgentSupportedApisPath, service.nmAgentSupportedApisHandler) listener.AddHandler(cns.V2Prefix+cns.GetHomeAz, service.getHomeAz) listener.AddHandler(cns.V2Prefix+cns.EndpointPath, service.EndpointHandlerAPI) - // This API is only needed for Direct channel mode with Swift v2. + // This API is only needed for Direct channel mode. if config.ChannelMode == cns.Direct { listener.AddHandler(cns.V2Prefix+cns.GetVMUniqueID, service.getVMUniqueID) + listener.AddHandler(cns.V2Prefix+cns.GetNCList, service.nmAgentNCListHandler) } // Initialize HTTP client to be reused in CNS diff --git a/cns/restserver/util.go b/cns/restserver/util.go index 327e09c2f0..43d1e1aef9 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-container-networking/cns/dockerclient" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/networkcontainers" + "github.com/Azure/azure-container-networking/cns/nodesubnet" "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/cns/wireserver" acn "github.com/Azure/azure-container-networking/common" @@ -158,6 +159,12 @@ func (service *HTTPRestService) saveNetworkContainerGoalState(req cns.CreateNetw existingSecondaryIPConfigs = existingNCStatus.CreateNetworkContainerRequest.SecondaryIPConfigs vfpUpdateComplete = existingNCStatus.VfpUpdateComplete } + + if req.NetworkContainerid == nodesubnet.NodeSubnetNCID { + hostVersion = nodesubnet.NodeSubnetHostVersion + vfpUpdateComplete = true + } + if hostVersion == "" { // Host version is the NC version from NMAgent, set it -1 to indicate no result from NMAgent yet. // TODO, query NMAgent and with aggresive time out and assign latest host version. @@ -187,9 +194,7 @@ func (service *HTTPRestService) saveNetworkContainerGoalState(req cns.CreateNetw fallthrough case cns.JobObject: fallthrough - case cns.COW, cns.BackendNICNC: - fallthrough - case cns.WebApps: + case cns.COW, cns.BackendNICNC, cns.WebApps: switch service.state.OrchestratorType { case cns.Kubernetes: fallthrough @@ -291,6 +296,7 @@ func (service *HTTPRestService) updateIPConfigsStateUntransacted( if hostNCVersionInInt, err = strconv.Atoi(hostVersion); err != nil { return types.UnsupportedNCVersion, fmt.Sprintf("Invalid hostVersion is %s, err:%s", hostVersion, err) } + service.addIPConfigStateUntransacted(req.NetworkContainerid, hostNCVersionInInt, req.SecondaryIPConfigs, existingSecondaryIPConfigs) diff --git a/cns/restserver/util_test.go b/cns/restserver/util_test.go index 56eede0e4e..f2454fc320 100644 --- a/cns/restserver/util_test.go +++ b/cns/restserver/util_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/Azure/azure-container-networking/cns" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -163,7 +164,7 @@ func TestDeleteNCs(t *testing.T) { } func TestGetPnpIDMapping(t *testing.T) { - svc := getTestService() + svc := getTestService(cns.KubernetesCRD) svc.state.PnpIDByMacAddress = map[string]string{ "macaddress1": "value1", } diff --git a/cns/restserver/v2/server_test.go b/cns/restserver/v2/server_test.go index 109c8e0b55..867377e582 100644 --- a/cns/restserver/v2/server_test.go +++ b/cns/restserver/v2/server_test.go @@ -50,7 +50,7 @@ func startService(cnsPort, cnsURL string) error { nmagentClient := &fakes.NMAgentClientFake{} service, err := restserver.NewHTTPRestService(&config, &fakes.WireserverClientFake{}, - &fakes.WireserverProxyFake{}, nmagentClient, nil, nil, nil, + &fakes.WireserverProxyFake{}, &restserver.IPtablesProvider{}, nmagentClient, nil, nil, nil, fakes.NewMockIMDSClient()) if err != nil { return errors.Wrap(err, "Failed to initialize service") diff --git a/cns/service.go b/cns/service.go index b46dde7041..ab7a0be3c3 100644 --- a/cns/service.go +++ b/cns/service.go @@ -17,7 +17,6 @@ import ( "github.com/Azure/azure-container-networking/cns/logger" acn "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/keyvault" - "github.com/Azure/azure-container-networking/log" localtls "github.com/Azure/azure-container-networking/server/tls" "github.com/Azure/azure-container-networking/store" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" @@ -29,6 +28,8 @@ const ( genericData = "com.microsoft.azure.network.generic" ) +var errTLSConfig = errors.New("unsupported TLS version name from config") + // Service defines Container Networking Service. type Service struct { *common.Service @@ -112,7 +113,7 @@ func (service *Service) AddListener(config *common.ServiceConfig) error { // Start the listener and HTTP and HTTPS server. tlsConfig, err := getTLSConfig(config.TLSSettings, config.ErrChan) //nolint if err != nil { - log.Printf("Failed to compose Tls Configuration with error: %+v", err) + logger.Printf("Failed to compose Tls Configuration with error: %+v", err) return errors.Wrap(err, "could not get tls config") } @@ -122,14 +123,14 @@ func (service *Service) AddListener(config *common.ServiceConfig) error { } service.Listener = nodeListener - log.Debugf("[Azure CNS] Successfully initialized a service with config: %+v", config) + logger.Debugf("[Azure CNS] Successfully initialized a service with config: %+v", config) return nil } // Initialize initializes the service and starts the listener. func (service *Service) Initialize(config *common.ServiceConfig) error { - log.Debugf("[Azure CNS] Going to initialize a service with config: %+v", config) + logger.Debugf("[Azure CNS] Going to initialize a service with config: %+v", config) // Initialize the base service. if err := service.Service.Initialize(config); err != nil { @@ -180,10 +181,14 @@ func getTLSConfigFromFile(tlsSettings localtls.TlsSettings) (*tls.Config, error) PrivateKey: privateKey, Leaf: leafCertificate, } + minTLSVersionNumber, err := parseTLSVersionName(tlsSettings.MinTLSVersion) + if err != nil { + return nil, errors.Wrap(err, "parsing MinTLSVersion from config") + } tlsConfig := &tls.Config{ MaxVersion: tls.VersionTLS13, - MinVersion: tls.VersionTLS12, + MinVersion: minTLSVersionNumber, Certificates: []tls.Certificate{ tlsCert, }, @@ -227,8 +232,13 @@ func getTLSConfigFromKeyVault(tlsSettings localtls.TlsSettings, errChan chan<- e errChan <- cr.Refresh(ctx, tlsSettings.KeyVaultCertificateRefreshInterval) }() + minTLSVersionNumber, err := parseTLSVersionName(tlsSettings.MinTLSVersion) + if err != nil { + return nil, errors.Wrap(err, "parsing MinTLSVersion from config") + } + tlsConfig := tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: minTLSVersionNumber, MaxVersion: tls.VersionTLS13, GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { return cr.GetCertificate(), nil @@ -281,11 +291,11 @@ func mtlsRootCAsFromCertificate(tlsCert *tls.Certificate) (*x509.CertPool, error } func (service *Service) StartListener(config *common.ServiceConfig) error { - log.Debugf("[Azure CNS] Going to start listener: %+v", config) + logger.Debugf("[Azure CNS] Going to start listener: %+v", config) // Initialize the listener. if service.Listener != nil { - log.Debugf("[Azure CNS] Starting listener: %+v", config) + logger.Debugf("[Azure CNS] Starting listener: %+v", config) // Start the listener. // continue to listen on the normal endpoint for http traffic, this will be supported // for sometime until partners migrate fully to https @@ -315,5 +325,18 @@ func (service *Service) ParseOptions(options OptionMap) OptionMap { func (service *Service) SendErrorResponse(w http.ResponseWriter, errMsg error) { resp := errorResponse{errMsg.Error()} err := acn.Encode(w, &resp) - log.Errorf("[%s] %+v %s.", service.Name, &resp, err.Error()) + logger.Errorf("[%s] %+v %s.", service.Name, &resp, err.Error()) +} + +// parseTLSVersionName returns the version number for the provided TLS version name +// (e.g. 0x0301) +func parseTLSVersionName(versionName string) (uint16, error) { + switch versionName { + case "TLS 1.2": + return tls.VersionTLS12, nil + case "TLS 1.3": + return tls.VersionTLS13, nil + default: + return 0, errors.Wrapf(errTLSConfig, "version name %s", versionName) + } } diff --git a/cns/service/main.go b/cns/service/main.go index 50f2b53d46..df1fc67453 100644 --- a/cns/service/main.go +++ b/cns/service/main.go @@ -19,37 +19,41 @@ import ( "time" "github.com/Azure/azure-container-networking/aitelemetry" - "github.com/Azure/azure-container-networking/cnm/ipam" - "github.com/Azure/azure-container-networking/cnm/network" "github.com/Azure/azure-container-networking/cns" cnsclient "github.com/Azure/azure-container-networking/cns/client" cnscli "github.com/Azure/azure-container-networking/cns/cmd/cli" "github.com/Azure/azure-container-networking/cns/cniconflist" - "github.com/Azure/azure-container-networking/cns/cnireconciler" "github.com/Azure/azure-container-networking/cns/common" "github.com/Azure/azure-container-networking/cns/configuration" + "github.com/Azure/azure-container-networking/cns/deviceplugin" + "github.com/Azure/azure-container-networking/cns/endpointmanager" "github.com/Azure/azure-container-networking/cns/fsnotify" "github.com/Azure/azure-container-networking/cns/grpc" "github.com/Azure/azure-container-networking/cns/healthserver" "github.com/Azure/azure-container-networking/cns/hnsclient" "github.com/Azure/azure-container-networking/cns/imds" "github.com/Azure/azure-container-networking/cns/ipampool" + "github.com/Azure/azure-container-networking/cns/ipampool/metrics" ipampoolv2 "github.com/Azure/azure-container-networking/cns/ipampool/v2" cssctrl "github.com/Azure/azure-container-networking/cns/kubecontroller/clustersubnetstate" mtpncctrl "github.com/Azure/azure-container-networking/cns/kubecontroller/multitenantpodnetworkconfig" nncctrl "github.com/Azure/azure-container-networking/cns/kubecontroller/nodenetworkconfig" podctrl "github.com/Azure/azure-container-networking/cns/kubecontroller/pod" "github.com/Azure/azure-container-networking/cns/logger" + loggerv2 "github.com/Azure/azure-container-networking/cns/logger/v2" "github.com/Azure/azure-container-networking/cns/metric" "github.com/Azure/azure-container-networking/cns/middlewares" "github.com/Azure/azure-container-networking/cns/multitenantcontroller" "github.com/Azure/azure-container-networking/cns/multitenantcontroller/multitenantoperator" "github.com/Azure/azure-container-networking/cns/restserver" restserverv2 "github.com/Azure/azure-container-networking/cns/restserver/v2" + cnipodprovider "github.com/Azure/azure-container-networking/cns/stateprovider/cni" + cnspodprovider "github.com/Azure/azure-container-networking/cns/stateprovider/cns" cnstypes "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/cns/wireserver" acn "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/crd" + "github.com/Azure/azure-container-networking/crd/clustersubnetstate" cssv1alpha1 "github.com/Azure/azure-container-networking/crd/clustersubnetstate/api/v1alpha1" "github.com/Azure/azure-container-networking/crd/multitenancy" mtv1alpha1 "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" @@ -64,9 +68,10 @@ import ( "github.com/Azure/azure-container-networking/store" "github.com/Azure/azure-container-networking/telemetry" "github.com/avast/retry-go/v4" + "github.com/go-logr/zapr" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "golang.org/x/time/rate" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -81,7 +86,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/healthz" - ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) @@ -104,9 +108,14 @@ const ( // envVarEnableCNIConflistGeneration enables cni conflist generation if set (value doesn't matter) envVarEnableCNIConflistGeneration = "CNS_ENABLE_CNI_CONFLIST_GENERATION" - cnsReqTimeout = 15 * time.Second - defaultLocalServerIP = "localhost" - defaultLocalServerPort = "10090" + cnsReqTimeout = 15 * time.Second + defaultLocalServerIP = "localhost" + defaultLocalServerPort = "10090" + defaultDevicePluginRetryInterval = 2 * time.Second + defaultNodeInfoCRDPollInterval = 5 * time.Second + defaultDevicePluginMaxRetryCount = 5 + initialVnetNICCount = 0 + initialIBNICCount = 0 ) type cniConflistScenario string @@ -122,7 +131,6 @@ const ( var ( rootCtx context.Context rootErrCh chan error - z *zap.Logger ) // Version is populated by make during build. @@ -212,13 +220,6 @@ var args = acn.ArgumentList{ Type: "string", DefaultValue: "", }, - { - Name: acn.OptStartAzureCNM, - Shorthand: acn.OptStartAzureCNMAlias, - Description: "Start Azure-CNM if flag is set", - Type: "bool", - DefaultValue: false, - }, { Name: acn.OptVersion, Shorthand: acn.OptVersionAlias, @@ -226,6 +227,13 @@ var args = acn.ArgumentList{ Type: "bool", DefaultValue: false, }, + { + Name: acn.OptStoreFileLocation, + Shorthand: acn.OptStoreFileLocationAlias, + Description: "Set store file absolute path", + Type: "string", + DefaultValue: platform.CNMRuntimePath, + }, { Name: acn.OptNetPluginPath, Shorthand: acn.OptNetPluginPathAlias, @@ -243,7 +251,7 @@ var args = acn.ArgumentList{ { Name: acn.OptCreateDefaultExtNetworkType, Shorthand: acn.OptCreateDefaultExtNetworkTypeAlias, - Description: "Create default external network for windows platform with the specified type (l2bridge or l2tunnel)", + Description: "Create default external network for windows platform with the specified type (l2bridge)", Type: "string", DefaultValue: "", }, @@ -268,13 +276,6 @@ var args = acn.ArgumentList{ Type: "int", DefaultValue: "120", }, - { - Name: acn.OptStoreFileLocation, - Shorthand: acn.OptStoreFileLocationAlias, - Description: "Set store file absolute path", - Type: "string", - DefaultValue: platform.CNMRuntimePath, - }, { Name: acn.OptPrivateEndpoint, Shorthand: acn.OptPrivateEndpointAlias, @@ -371,9 +372,9 @@ func init() { // Wait until receiving a signal. select { case sig := <-sigCh: - log.Errorf("caught exit signal %v, exiting", sig) + logger.Errorf("caught exit signal %v, exiting", sig) case err := <-rootErrCh: - log.Errorf("unhandled error %v, exiting", err) + logger.Errorf("unhandled error %v, exiting", err) } cancel() }() @@ -434,7 +435,7 @@ func sendRegisterNodeRequest(ctx context.Context, httpClient httpDoer, httpRestS var body bytes.Buffer err := json.NewEncoder(&body).Encode(nodeRegisterRequest) if err != nil { - log.Errorf("[Azure CNS] Failed to register node while encoding json failed with non-retryable err %v", err) + logger.Errorf("Failed to register node while encoding json failed with non-retryable err %v", err) return errors.Wrap(retry.Unrecoverable(err), "failed to sendRegisterNodeRequest") } @@ -460,7 +461,7 @@ func sendRegisterNodeRequest(ctx context.Context, httpClient httpDoer, httpRestS var req cns.SetOrchestratorTypeRequest err = json.NewDecoder(response.Body).Decode(&req) if err != nil { - log.Errorf("[Azure CNS] decoding Node Register response json failed with err %v", err) + logger.Errorf("decoding Node Register response json failed with err %v", err) return errors.Wrap(err, "failed to sendRegisterNodeRequest") } httpRestService.SetNodeOrchestrator(&req) @@ -475,7 +476,7 @@ func startTelemetryService(ctx context.Context) { tb := telemetry.NewTelemetryBuffer(nil) err := tb.CreateAITelemetryHandle(config, false, false, false) if err != nil { - log.Errorf("AI telemetry handle creation failed..:%w", err) + logger.Errorf("AI telemetry handle creation failed: %v", err) return } @@ -484,12 +485,12 @@ func startTelemetryService(ctx context.Context) { tbtemp.Cleanup(telemetry.FdName) err = tb.StartServer() - log.Printf("Telemetry service for CNI started") + logger.Printf("Telemetry service for CNI started") if err != nil { - log.Errorf("Telemetry service failed to start: %w", err) + logger.Errorf("Telemetry service failed to start: %v", err) return } - tb.PushData(rootCtx) + tb.PushData(ctx) } // Main is the entry point for CNS. @@ -497,8 +498,6 @@ func main() { // Initialize and parse command line arguments. acn.ParseArgs(&args, printVersion) - environment := acn.GetArg(acn.OptEnvironment).(string) - url := acn.GetArg(acn.OptAPIServerURL).(string) cniPath := acn.GetArg(acn.OptNetPluginPath).(string) cniConfigFile := acn.GetArg(acn.OptNetPluginConfigFile).(string) cnsURL := acn.GetArg(acn.OptCnsURL).(string) @@ -506,10 +505,7 @@ func main() { logLevel := acn.GetArg(acn.OptLogLevel).(int) logTarget := acn.GetArg(acn.OptLogTarget).(int) logDirectory := acn.GetArg(acn.OptLogLocation).(string) - ipamQueryUrl := acn.GetArg(acn.OptIpamQueryUrl).(string) - ipamQueryInterval := acn.GetArg(acn.OptIpamQueryInterval).(int) - startCNM := acn.GetArg(acn.OptStartAzureCNM).(bool) vers := acn.GetArg(acn.OptVersion).(bool) createDefaultExtNetworkType := acn.GetArg(acn.OptCreateDefaultExtNetworkType).(string) telemetryEnabled := acn.GetArg(acn.OptTelemetry).(bool) @@ -589,6 +585,10 @@ func main() { } else { logger.InitAI(aiConfig, ts.DisableTrace, ts.DisableMetric, ts.DisableEvent) } + + if cnsconfig.TelemetrySettings.ConfigSnapshotIntervalInMins > 0 { + go metric.SendCNSConfigSnapshot(rootCtx, cnsconfig) + } } logger.Printf("[Azure CNS] Using config: %+v", cnsconfig) @@ -629,16 +629,31 @@ func main() { } } - // configure zap logger - zconfig := zap.NewProductionConfig() - zconfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - if z, err = zconfig.Build(); err != nil { + // Get host metadata and attach it to logger(v2) appinsights config. + // If this errors, we will not have metadata in the AI logs. Should we exit? + metadata, _ := acn.GetHostMetadata(aitelemetry.MetadataFile) + aifields := loggerv2.MetadataToFields(metadata) + if cnsconfig.Logger.AppInsights != nil { + cnsconfig.Logger.AppInsights.Fields = append(cnsconfig.Logger.AppInsights.Fields, aifields...) + } + + // build the zap logger + z, c, err := loggerv2.New(&cnsconfig.Logger) + defer c() + if err != nil { fmt.Printf("failed to create logger: %v", err) os.Exit(1) } + host, _ := os.Hostname() + z = z.With(zap.String("hostname", host), zap.String("version", version), zap.String("kubernetes_apiserver", os.Getenv("KUBERNETES_SERVICE_HOST"))) + // Set the v2 logger to the global logger if v2 logger enabled. + if cnsconfig.EnableLoggerV2 { + logger.Printf("hotswapping logger v2") //nolint:staticcheck // ignore new deprecation + logger.Log = loggerv2.AsV1(z, c) + } // start the healthz/readyz/metrics server - readyCh := make(chan interface{}) + readyCh := make(chan any) readyChecker := healthz.CheckHandler{ Checker: healthz.Checker(func(*http.Request) error { select { @@ -649,7 +664,13 @@ func main() { return nil }), } - go healthserver.Start(z, cnsconfig.MetricsBindAddress, &healthz.Handler{}, readyChecker) + + healthzHandler, err := healthserver.NewHealthzHandlerWithChecks(&healthserver.Config{PingAPIServer: cnsconfig.EnableAPIServerHealthPing}) + if err != nil { + logger.Errorf("unable to initialize a healthz handler: %v", err) + return + } + go healthserver.Start(z, cnsconfig.MetricsBindAddress, healthzHandler, readyChecker) nmaConfig, err := nmagent.NewConfig(cnsconfig.WireserverIP) if err != nil { @@ -663,16 +684,14 @@ func main() { return } + // copy ChannelMode from cnsconfig to HTTPRemoteRestService config + config.ChannelMode = cnsconfig.ChannelMode if cnsconfig.ChannelMode == cns.Managed { - config.ChannelMode = cns.Managed privateEndpoint = cnsconfig.ManagedSettings.PrivateEndpoint infravnet = cnsconfig.ManagedSettings.InfrastructureNetworkID nodeID = cnsconfig.ManagedSettings.NodeID - } else if cnsconfig.ChannelMode == cns.CRD { - config.ChannelMode = cns.CRD - } else if cnsconfig.ChannelMode == cns.MultiTenantCRD { - config.ChannelMode = cns.MultiTenantCRD - } else if acn.GetArg(acn.OptManaged).(bool) { + } + if isManaged, ok := acn.GetArg(acn.OptManaged).(bool); ok && isManaged { config.ChannelMode = cns.Managed } @@ -685,7 +704,7 @@ func main() { } if telemetryDaemonEnabled { - log.Printf("CNI Telemtry is enabled") + logger.Printf("CNI Telemetry is enabled") go startTelemetryService(rootCtx) } @@ -700,7 +719,7 @@ func main() { lockclient, err := processlock.NewFileLock(platform.CNILockPath + name + store.LockExtension) if err != nil { - log.Printf("Error initializing file lock:%v", err) + logger.Printf("Error initializing file lock:%v", err) return } @@ -714,10 +733,10 @@ func main() { // Initialize endpoint state store if cns is managing endpoint state. if cnsconfig.ManageEndpointState { - log.Printf("[Azure CNS] Configured to manage endpoints state") + logger.Printf("[Azure CNS] Configured to manage endpoints state") endpointStoreLock, err := processlock.NewFileLock(platform.CNILockPath + endpointStoreName + store.LockExtension) // nolint if err != nil { - log.Printf("Error initializing endpoint state file lock:%v", err) + logger.Printf("Error initializing endpoint state file lock:%v", err) return } defer endpointStoreLock.Unlock() // nolint @@ -749,8 +768,7 @@ func main() { } imdsClient := imds.NewClient() - - httpRemoteRestService, err := restserver.NewHTTPRestService(&config, wsclient, &wsProxy, nmaClient, + httpRemoteRestService, err := restserver.NewHTTPRestService(&config, wsclient, &wsProxy, &restserver.IPtablesProvider{}, nmaClient, endpointStateStore, conflistGenerator, homeAzMonitor, imdsClient) if err != nil { logger.Errorf("Failed to create CNS object, err:%v.\n", err) @@ -790,6 +808,7 @@ func main() { MSIResourceID: cnsconfig.MSISettings.ResourceID, KeyVaultCertificateRefreshInterval: time.Duration(cnsconfig.KeyVaultSettings.RefreshIntervalInHrs) * time.Hour, UseMTLS: cnsconfig.UseMTLS, + MinTLSVersion: cnsconfig.MinTLSVersion, } } @@ -801,12 +820,12 @@ func main() { } // Setting the remote ARP MAC address to 12-34-56-78-9a-bc on windows for external traffic if HNS is enabled - execClient := platform.NewExecClient(nil) - err = platform.SetSdnRemoteArpMacAddress(execClient) + err = platform.SetSdnRemoteArpMacAddress(rootCtx) if err != nil { logger.Errorf("Failed to set remote ARP MAC address: %v", err) return } + // We are only setting the PriorityVLANTag in 'cns.Direct' mode, because it neatly maps today, to 'isUsingMultitenancy' // In the future, we would want to have a better CNS flag, to explicitly say, this CNS is using multitenancy if cnsconfig.ChannelMode == cns.Direct { @@ -827,33 +846,20 @@ func main() { // Initialze state in if CNS is running in CRD mode // State must be initialized before we start HTTPRestService if config.ChannelMode == cns.CRD { + // Add APIServer FQDN to Log metadata + logger.Log.SetAPIServer(os.Getenv("KUBERNETES_SERVICE_HOST")) // Check the CNI statefile mount, and if the file is empty // stub an empty JSON object - if err := cnireconciler.WriteObjectToCNIStatefile(); err != nil { + if err := cnipodprovider.WriteObjectToCNIStatefile(); err != nil { //nolint:govet //shadow okay logger.Errorf("Failed to write empty object to CNI state: %v", err) return } - // We might be configured to reinitialize state from the CNI instead of the apiserver. - // If so, we should check that the CNI is new enough to support the state commands, - // otherwise we fall back to the existing behavior. - if cnsconfig.InitializeFromCNI { - var isGoodVer bool - isGoodVer, err = cnireconciler.IsDumpStateVer() - if err != nil { - logger.Errorf("error checking CNI ver: %v", err) - } - - // override the prior config flag with the result of the ver check. - cnsconfig.InitializeFromCNI = isGoodVer - - if cnsconfig.InitializeFromCNI { - // Set the PodInfoVersion by initialization type, so that the - // PodInfo maps use the correct key schema - cns.GlobalPodInfoScheme = cns.InterfaceIDPodInfoScheme - } - } + // By default reinitialize state from the CNI. + // Set the PodInfoVersion by initialization type, so that the + // PodInfo maps use the correct key schema + cns.GlobalPodInfoScheme = cns.InterfaceIDPodInfoScheme // If cns manageendpointstate is true, then cns maintains its own state and reconciles from it. // in this case, cns maintains state with containerid as key and so in-memory cache can lookup // and update based on container id. @@ -863,7 +869,7 @@ func main() { logger.Printf("Set GlobalPodInfoScheme %v (InitializeFromCNI=%t)", cns.GlobalPodInfoScheme, cnsconfig.InitializeFromCNI) - err = InitializeCRDState(rootCtx, httpRemoteRestService, cnsconfig) + err = InitializeCRDState(rootCtx, z, httpRemoteRestService, cnsconfig) if err != nil { logger.Errorf("Failed to start CRD Controller, err:%v.\n", err) return @@ -884,6 +890,32 @@ func main() { } } + // AzureHost channelmode indicates Nodesubnet. IPs are to be fetched from NMagent. + if config.ChannelMode == cns.AzureHost { + if !cnsconfig.ManageEndpointState { + logger.Errorf("ManageEndpointState must be set to true for AzureHost mode") + return + } + + // If cns manageendpointstate is true, then cns maintains its own state and reconciles from it. + // in this case, cns maintains state with containerid as key and so in-memory cache can lookup + // and update based on container id. + cns.GlobalPodInfoScheme = cns.InfraIDPodInfoScheme + + var podInfoByIPProvider cns.PodInfoByIPProvider + podInfoByIPProvider, err = getPodInfoByIPProvider(rootCtx, cnsconfig, httpRemoteRestService, nil, "") + if err != nil { + logger.Errorf("[Azure CNS] Failed to get PodInfoByIPProvider: %v", err) + return + } + + err = httpRemoteRestService.InitializeNodeSubnet(rootCtx, podInfoByIPProvider) + if err != nil { + logger.Errorf("[Azure CNS] Failed to initialize node subnet: %v", err) + return + } + } + // Initialize multi-tenant controller if the CNS is running in MultiTenantCRD mode. // It must be started before we start HTTPRemoteRestService. if config.ChannelMode == cns.MultiTenantCRD { @@ -894,6 +926,50 @@ func main() { } } + if cnsconfig.EnableSwiftV2 && cnsconfig.EnableK8sDevicePlugin { + // Create device plugin manager instance + pluginManager := deviceplugin.NewPluginManager(z) + pluginManager.AddPlugin(mtv1alpha1.DeviceTypeVnetNIC, initialVnetNICCount) + pluginManager.AddPlugin(mtv1alpha1.DeviceTypeInfiniBandNIC, initialIBNICCount) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start device plugin manager in a separate goroutine + go func() { + retryCount := 0 + ticker := time.NewTicker(defaultDevicePluginRetryInterval) + // Ensure the ticker is stopped on exit + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + z.Info("Context canceled, stopping plugin manager") + return + case <-ticker.C: + if pluginErr := pluginManager.Run(ctx); pluginErr != nil { + z.Error("plugin manager exited with error", zap.Error(pluginErr)) + retryCount++ + // Implementing a basic circuit breaker + if retryCount >= defaultDevicePluginMaxRetryCount { + z.Error("Max retries reached, stopping plugin manager") + return + } + } else { + return + } + } + } + }() + + // go routine to poll node info crd and update device counts + go func() { + if pollErr := pollNodeInfoCRDAndUpdatePlugin(ctx, z, pluginManager); pollErr != nil { + z.Error("Error in pollNodeInfoCRDAndUpdatePlugin", zap.Error(pollErr)) + } + }() + } + // Conditionally initialize and start the gRPC server if cnsconfig.GRPCSettings.Enable { // Define gRPC server settings @@ -922,6 +998,7 @@ func main() { } // if user provides cns url by -c option, then only start HTTP remote server using this url + logger.Printf("[Azure CNS] Start HTTP Remote server") if httpRemoteRestService != nil { if cnsconfig.EnablePprof { @@ -962,14 +1039,22 @@ func main() { if cnsconfig.EnableAsyncPodDelete { // Start fs watcher here - cnsclient, err := cnsclient.New("", cnsReqTimeout) //nolint + z.Info("AsyncPodDelete is enabled") + logger.Printf("AsyncPodDelete is enabled") + cnsclient, err := cnsclient.New("", cnsReqTimeout) // nolint if err != nil { z.Error("failed to create cnsclient", zap.Error(err)) } go func() { _ = retry.Do(func() error { z.Info("starting fsnotify watcher to process missed Pod deletes") - w, err := fsnotify.New(cnsclient, cnsconfig.AsyncPodDeletePath, z) + logger.Printf("starting fsnotify watcher to process missed Pod deletes") + var endpointCleanup fsnotify.ReleaseIPsClient = cnsclient + // using endpointmanager implmentation for stateless CNI sceanrio to remove HNS endpoint alongside the IPs + if cnsconfig.IsStalessCNIWindows() { + endpointCleanup = endpointmanager.WithPlatformReleaseIPsManager(cnsclient) + } + w, err := fsnotify.New(endpointCleanup, cnsconfig.AsyncPodDeletePath, z) if err != nil { z.Error("failed to create fsnotify watcher", zap.Error(err)) return errors.Wrap(err, "failed to create fsnotify watcher, will retry") @@ -979,7 +1064,7 @@ func main() { return errors.Wrap(err, "failed to start fsnotify watcher, will retry") } return nil - }, retry.DelayType(retry.BackOffDelay), retry.Attempts(0), retry.Context(rootCtx)) // infinite cancellable exponential backoff retrier + }, retry.DelayType(retry.BackOffDelay), retry.UntilSucceeded(), retry.Context(rootCtx)) // infinite cancellable exponential backoff retrier }() } @@ -1024,63 +1109,11 @@ func main() { }(privateEndpoint, infravnet, nodeID) } - var ( - netPlugin network.NetPlugin - ipamPlugin ipam.IpamPlugin - lockclientCnm processlock.Interface - ) - - if startCNM { - var pluginConfig acn.PluginConfig - pluginConfig.Version = version - - // Create a channel to receive unhandled errors from the plugins. - pluginConfig.ErrChan = make(chan error, 1) - - // Create network plugin. - netPlugin, err = network.NewPlugin(&pluginConfig) - if err != nil { - logger.Errorf("Failed to create network plugin, err:%v.\n", err) - return - } - - // Create IPAM plugin. - ipamPlugin, err = ipam.NewPlugin(&pluginConfig) - if err != nil { - logger.Errorf("Failed to create IPAM plugin, err:%v.\n", err) - return - } - - lockclientCnm, err = processlock.NewFileLock(platform.CNILockPath + pluginName + store.LockExtension) - if err != nil { - log.Printf("Error initializing file lock:%v", err) - return - } - - // Create the key value store. - pluginStoreFile := storeFileLocation + pluginName + ".json" - pluginConfig.Store, err = store.NewJsonFileStore(pluginStoreFile, lockclientCnm, nil) - if err != nil { - logger.Errorf("Failed to create plugin store file %s, due to error : %v\n", pluginStoreFile, err) - return - } - - // Set plugin options. - netPlugin.SetOption(acn.OptAPIServerURL, url) - logger.Printf("Start netplugin\n") - if err := netPlugin.Start(&pluginConfig); err != nil { - logger.Errorf("Failed to create network plugin, err:%v.\n", err) - return - } - - ipamPlugin.SetOption(acn.OptEnvironment, environment) - ipamPlugin.SetOption(acn.OptAPIServerURL, url) - ipamPlugin.SetOption(acn.OptIpamQueryUrl, ipamQueryUrl) - ipamPlugin.SetOption(acn.OptIpamQueryInterval, ipamQueryInterval) - if err := ipamPlugin.Start(&pluginConfig); err != nil { - logger.Errorf("Failed to create IPAM plugin, err:%v.\n", err) - return - } + if config.ChannelMode == cns.AzureHost { + // at this point, rest service is running. We can now start serving new requests. So call StartNodeSubnet, which + // will fetch secondary IPs and generate conflist. Do not move this all before rest service start - this will cause + // CNI to start sending requests, and if the service doesn't start successfully, the requests will fail. + httpRemoteRestService.StartNodeSubnet(rootCtx) } // mark the service as "ready" @@ -1102,28 +1135,97 @@ func main() { httpRemoteRestService.Stop() } - if startCNM { - logger.Printf("stop cnm plugin") - if netPlugin != nil { - netPlugin.Stop() - } + if err = lockclient.Unlock(); err != nil { + logger.Errorf("lockclient cns unlock error:%v", err) + } - if ipamPlugin != nil { - logger.Printf("stop ipam plugin") - ipamPlugin.Stop() - } + logger.Printf("CNS exited") + logger.Close() +} - if err = lockclientCnm.Unlock(); err != nil { - log.Errorf("lockclient cnm unlock error:%v", err) - } +// Poll CRD until it's set and update PluginManager +func pollNodeInfoCRDAndUpdatePlugin(ctx context.Context, zlog *zap.Logger, pluginManager *deviceplugin.PluginManager) error { + kubeConfig, err := ctrl.GetConfig() + if err != nil { + logger.Errorf("Failed to get kubeconfig for request controller: %v", err) + return errors.Wrap(err, "failed to get kubeconfig") } + kubeConfig.UserAgent = "azure-cns-" + version - if err = lockclient.Unlock(); err != nil { - log.Errorf("lockclient cns unlock error:%v", err) + clientset, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return errors.Wrap(err, "failed to build clientset") } - logger.Printf("CNS exited") - logger.Close() + nodeName, err := configuration.NodeName() + if err != nil { + return errors.Wrap(err, "failed to get NodeName") + } + + node, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to get node %s", nodeName) + } + + // check the Node labels for Swift V2 + if _, ok := node.Labels[configuration.LabelNodeSwiftV2]; !ok { + zlog.Info("Node is not labeled for Swift V2, skipping polling nodeinfo crd") + return nil + } + + directcli, err := client.New(kubeConfig, client.Options{Scheme: multitenancy.Scheme}) + if err != nil { + return errors.Wrap(err, "failed to create ctrl client") + } + + nodeInfoCli := multitenancy.NodeInfoClient{ + Cli: directcli, + } + + ticker := time.NewTicker(defaultNodeInfoCRDPollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + zlog.Info("Polling context canceled, exiting") + return nil + case <-ticker.C: + // Fetch the CRD status + nodeInfo, err := nodeInfoCli.Get(ctx, node.Name) + if err != nil { + zlog.Error("Error fetching nodeinfo CRD", zap.Error(err)) + return errors.Wrap(err, "failed to get nodeinfo crd") + } + + // Check if the status is set + if !cmp.Equal(nodeInfo.Status, mtv1alpha1.NodeInfoStatus{}) && len(nodeInfo.Status.DeviceInfos) > 0 { + // Create a map to count devices by type + deviceCounts := map[mtv1alpha1.DeviceType]int{ + mtv1alpha1.DeviceTypeVnetNIC: 0, + mtv1alpha1.DeviceTypeInfiniBandNIC: 0, + } + + // Aggregate device counts from the CRD + for _, deviceInfo := range nodeInfo.Status.DeviceInfos { + switch deviceInfo.DeviceType { + case mtv1alpha1.DeviceTypeVnetNIC, mtv1alpha1.DeviceTypeInfiniBandNIC: + deviceCounts[deviceInfo.DeviceType]++ + default: + zlog.Error("Unknown device type", zap.String("deviceType", string(deviceInfo.DeviceType))) + } + } + + // Update the plugin manager with device counts + for deviceType, count := range deviceCounts { + pluginManager.TrackDevices(deviceType, count) + } + + // Exit polling loop once the CRD status is successfully processed + return nil + } + } + } } func InitializeMultiTenantController(ctx context.Context, httpRestService cns.HTTPService, cnsconfig configuration.CNSConfig) error { @@ -1204,7 +1306,7 @@ type nodeNetworkConfigGetter interface { } type ipamStateReconciler interface { - ReconcileIPAMState(ncRequests []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo, nnc *v1alpha.NodeNetworkConfig) cnstypes.ResponseCode + ReconcileIPAMStateForSwift(ncRequests []*cns.CreateNetworkContainerRequest, podInfoByIP map[string]cns.PodInfo, nnc *v1alpha.NodeNetworkConfig) cnstypes.ResponseCode } // TODO(rbtr) where should this live?? @@ -1262,7 +1364,7 @@ func reconcileInitialCNSState(ctx context.Context, cli nodeNetworkConfigGetter, } // Call cnsclient init cns passing those two things. - if err := restserver.ResponseCodeToError(ipamReconciler.ReconcileIPAMState(ncReqs, podInfoByIP, nnc)); err != nil { + if err := restserver.ResponseCodeToError(ipamReconciler.ReconcileIPAMStateForSwift(ncReqs, podInfoByIP, nnc)); err != nil { return errors.Wrap(err, "failed to reconcile CNS IPAM state") } @@ -1270,7 +1372,9 @@ func reconcileInitialCNSState(ctx context.Context, cli nodeNetworkConfigGetter, } // InitializeCRDState builds and starts the CRD controllers. -func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cnsconfig *configuration.CNSConfig) error { +// +//nolint:gocyclo // legacy +func InitializeCRDState(ctx context.Context, z *zap.Logger, httpRestService cns.HTTPService, cnsconfig *configuration.CNSConfig) error { // convert interface type to implementation type httpRestServiceImplementation, ok := httpRestService.(*restserver.HTTPRestService) if !ok { @@ -1329,40 +1433,11 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn } } - var podInfoByIPProvider cns.PodInfoByIPProvider - switch { - case cnsconfig.ManageEndpointState: - logger.Printf("Initializing from self managed endpoint store") - podInfoByIPProvider, err = cnireconciler.NewCNSPodInfoProvider(httpRestServiceImplementation.EndpointStateStore) // get reference to endpoint state store from rest server - if err != nil { - if errors.Is(err, store.ErrKeyNotFound) { - logger.Printf("[Azure CNS] No endpoint state found, skipping initializing CNS state") - } else { - return errors.Wrap(err, "failed to create CNS PodInfoProvider") - } - } - case cnsconfig.InitializeFromCNI: - logger.Printf("Initializing from CNI") - podInfoByIPProvider, err = cnireconciler.NewCNIPodInfoProvider() - if err != nil { - return errors.Wrap(err, "failed to create CNI PodInfoProvider") - } - default: - logger.Printf("Initializing from Kubernetes") - podInfoByIPProvider = cns.PodInfoByIPProviderFunc(func() (map[string]cns.PodInfo, error) { - pods, err := clientset.CoreV1().Pods("").List(ctx, metav1.ListOptions{ //nolint:govet // ignore err shadow - FieldSelector: "spec.nodeName=" + nodeName, - }) - if err != nil { - return nil, errors.Wrap(err, "failed to list Pods for PodInfoProvider") - } - podInfo, err := cns.KubePodsToPodInfoByIP(pods.Items) - if err != nil { - return nil, errors.Wrap(err, "failed to convert Pods to PodInfoByIP") - } - return podInfo, nil - }) + podInfoByIPProvider, err := getPodInfoByIPProvider(ctx, cnsconfig, httpRestServiceImplementation, clientset, nodeName) + if err != nil { + return errors.Wrap(err, "failed to initialize ip state") } + // create scoped kube clients. directcli, err := client.New(kubeConfig, client.Options{Scheme: nodenetworkconfig.Scheme}) if err != nil { @@ -1380,20 +1455,18 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn // aks addons to come up so retry a bit more aggresively here. // will retry 10 times maxing out at a minute taking about 8 minutes before it gives up. attempt := 0 - err = retry.Do(func() error { + _ = retry.Do(func() error { attempt++ logger.Printf("reconciling initial CNS state attempt: %d", attempt) err = reconcileInitialCNSState(ctx, directscopedcli, httpRestServiceImplementation, podInfoByIPProvider) if err != nil { logger.Errorf("failed to reconcile initial CNS state, attempt: %d err: %v", attempt, err) + nncInitFailure.Inc() } return errors.Wrap(err, "failed to initialize CNS state") - }, retry.Context(ctx), retry.Delay(initCNSInitalDelay), retry.MaxDelay(time.Minute)) - if err != nil { - return err - } + }, retry.Context(ctx), retry.Delay(initCNSInitalDelay), retry.MaxDelay(time.Minute), retry.UntilSucceeded()) logger.Printf("reconciled initial CNS state after %d attempts", attempt) - + hasNNCInitialized.Set(1) scheme := kuberuntime.NewScheme() if err := corev1.AddToScheme(scheme); err != nil { //nolint:govet // intentional shadow return errors.Wrap(err, "failed to add corev1 to scheme") @@ -1429,11 +1502,19 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn } } + if cnsconfig.EnableSubnetScarcity { + cacheOpts.ByObject[&cssv1alpha1.ClusterSubnetState{}] = cache.ByObject{ + Namespaces: map[string]cache.Config{ + "kube-system": {}, + }, + } + } + managerOpts := ctrlmgr.Options{ Scheme: scheme, Metrics: ctrlmetrics.Options{BindAddress: "0"}, Cache: cacheOpts, - Logger: ctrlzap.New(), + Logger: zapr.NewLogger(z), } manager, err := ctrl.NewManager(kubeConfig, managerOpts) @@ -1454,8 +1535,15 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn cssCh := make(chan cssv1alpha1.ClusterSubnetState) ipDemandCh := make(chan int) if cnsconfig.EnableIPAMv2 { + cssSrc := func(context.Context) ([]cssv1alpha1.ClusterSubnetState, error) { return nil, nil } + if cnsconfig.EnableSubnetScarcity { + cssSrc = clustersubnetstate.NewClient(manager.GetClient()).List + } nncCh := make(chan v1alpha.NodeNetworkConfig) - poolMonitor = ipampoolv2.NewMonitor(z, httpRestServiceImplementation, cachedscopedcli, ipDemandCh, nncCh, cssCh).AsV1(nncCh) + pmv2 := ipampoolv2.NewMonitor(z, httpRestServiceImplementation, cachedscopedcli, ipDemandCh, nncCh, cssCh) + obs := metrics.NewLegacyMetricsObserver(httpRestService.GetPodIPConfigState, cachedscopedcli.Get, cssSrc) + pmv2.WithLegacyMetricsObserver(obs) + poolMonitor = pmv2.AsV1(nncCh) } else { poolOpts := ipampool.Options{ RefreshDelay: poolIPAMRefreshRateInMilliseconds * time.Millisecond, @@ -1469,7 +1557,10 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn nodeIP := configuration.NodeIP() nncReconciler := nncctrl.NewReconciler(httpRestServiceImplementation, poolMonitor, nodeIP) // pass Node to the Reconciler for Controller xref - if err := nncReconciler.SetupWithManager(manager, node); err != nil { //nolint:govet // intentional shadow + // IPAMv1 - reconcile only status changes (where generation doesn't change). + // IPAMv2 - reconcile all updates. + filterGenerationChange := !cnsconfig.EnableIPAMv2 + if err := nncReconciler.SetupWithManager(manager, node, filterGenerationChange); err != nil { //nolint:govet // intentional shadow return errors.Wrapf(err, "failed to setup nnc reconciler with manager") } @@ -1539,13 +1630,14 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn // wait for the Reconciler to run once on a NNC that was made for this Node. // the nncReadyCtx has a timeout of 15 minutes, after which we will consider // this false and the NNC Reconciler stuck/failed, log and retry. - nncReadyCtx, _ := context.WithTimeout(ctx, 15*time.Minute) //nolint // it will time out and not leak + nncReadyCtx, cancel := context.WithTimeout(ctx, 15*time.Minute) // nolint // it will time out and not leak if started, err := nncReconciler.Started(nncReadyCtx); !started { - log.Errorf("NNC reconciler has not started, does the NNC exist? err: %v", err) + logger.Errorf("NNC reconciler has not started, does the NNC exist? err: %v", err) nncReconcilerStartFailures.Inc() continue } logger.Printf("NodeNetworkConfig reconciler has started.") + cancel() break } @@ -1569,6 +1661,35 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn return nil } +// getPodInfoByIPProvider returns a PodInfoByIPProvider that reads endpoint state from the configured source +func getPodInfoByIPProvider( + ctx context.Context, + cnsconfig *configuration.CNSConfig, + httpRestServiceImplementation *restserver.HTTPRestService, + clientset *kubernetes.Clientset, + nodeName string, +) (podInfoByIPProvider cns.PodInfoByIPProvider, err error) { + switch { + case cnsconfig.ManageEndpointState: + logger.Printf("Initializing from self managed endpoint store") + podInfoByIPProvider, err = cnspodprovider.New(httpRestServiceImplementation.EndpointStateStore) // get reference to endpoint state store from rest server + if err != nil { + if errors.Is(err, store.ErrKeyNotFound) { + logger.Printf("[Azure CNS] No endpoint state found, skipping initializing CNS state") + } else { + return podInfoByIPProvider, errors.Wrap(err, "failed to create CNS PodInfoProvider") + } + } + default: + logger.Printf("Initializing from CNI") + podInfoByIPProvider, err = cnipodprovider.New() + if err != nil { + return podInfoByIPProvider, errors.Wrap(err, "failed to create CNI PodInfoProvider") + } + } + return podInfoByIPProvider, nil +} + // createOrUpdateNodeInfoCRD polls imds to learn the VM Unique ID and then creates or updates the NodeInfo CRD // with that vm unique ID func createOrUpdateNodeInfoCRD(ctx context.Context, restConfig *rest.Config, node *corev1.Node) error { @@ -1610,7 +1731,7 @@ func createOrUpdateNodeInfoCRD(ctx context.Context, restConfig *rest.Config, nod // PopulateCNSEndpointState initilizes CNS Endpoint State by Migrating the CNI state. func PopulateCNSEndpointState(endpointStateStore store.KeyValueStore) error { logger.Printf("State Migration is enabled") - endpointState, err := cnireconciler.MigrateCNISate() + endpointState, err := cnspodprovider.MigrateCNISate() if err != nil { return errors.Wrap(err, "failed to create CNS Endpoint state from CNI") } diff --git a/cns/service/metrics.go b/cns/service/metrics.go index ca4da8a7f4..44f9e9a945 100644 --- a/cns/service/metrics.go +++ b/cns/service/metrics.go @@ -5,31 +5,49 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics" ) -// managerStartFailures is a monotic counter which tracks the number of times the controller-runtime -// manager failed to start. To drive alerting based on this metric, it is recommended to use the rate -// of increase over a period of time. A positive rate of change indicates that the CNS is actively -// failing and retrying. -var managerStartFailures = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "cns_ctrlmanager_start_failures_total", - Help: "Number of times the controller-runtime manager failed to start.", - }, -) - -// nncReconcilerStartFailures is a monotic counter which tracks the number of times the NNC reconciler -// has failed to start within the timeout period. To drive alerting based on this metric, it is -// recommended to use the rate of increase over a period of time. A positive rate of change indicates -// that the CNS is actively failing and retrying. -var nncReconcilerStartFailures = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "cns_nnc_reconciler_start_failures_total", - Help: "Number of times the NNC reconciler has failed to start within the timeout period.", - }, +var ( + // managerStartFailures is a monotic counter which tracks the number of times the controller-runtime + // manager failed to start. To drive alerting based on this metric, it is recommended to use the rate + // of increase over a period of time. A positive rate of change indicates that the CNS is actively + // failing and retrying. + managerStartFailures = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cns_ctrlmanager_start_failures_total", + Help: "Number of times the controller-runtime manager failed to start.", + }, + ) + // nncReconcilerStartFailures is a monotic counter which tracks the number of times the NNC reconciler + // has failed to start within the timeout period. To drive alerting based on this metric, it is + // recommended to use the rate of increase over a period of time. A positive rate of change indicates + // that the CNS is actively failing and retrying. + nncReconcilerStartFailures = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cns_nnc_reconciler_start_failures_total", + Help: "Number of times the NNC reconciler has failed to start within the timeout period.", + }, + ) + // nncInitFailure is a monotic counter which tracks the number of times the initial NNC reconcile + // has failed. + nncInitFailure = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cns_nnc_init_failures_total", + Help: "Number of times the initial NNC reconcile has failed.", + }, + ) + // hasNNCInitialized is a gauge which tracks whether the initial NNC reconcile has completed. + hasNNCInitialized = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "cns_nnc_initialized", + Help: "Whether the initial NNC reconcile has completed.", + }, + ) ) func init() { metrics.Registry.MustRegister( managerStartFailures, nncReconcilerStartFailures, + nncInitFailure, + hasNNCInitialized, ) } diff --git a/cns/service_test.go b/cns/service_test.go index 9bf4af8ce7..d20c2ef11a 100644 --- a/cns/service_test.go +++ b/cns/service_test.go @@ -76,6 +76,7 @@ func TestNewService(t *testing.T) { TLSPort: "10091", TLSSubjectName: "localhost", TLSCertificatePath: testCertFilePath, + MinTLSVersion: "TLS 1.2", } svc, err := NewService(config.Name, config.Version, config.ChannelMode, config.Store) @@ -94,10 +95,13 @@ func TestNewService(t *testing.T) { err = svc.StartListener(config) require.NoError(t, err) + minTLSVersionNumber, err := parseTLSVersionName(config.TLSSettings.MinTLSVersion) + require.NoError(t, err) + tlsClient := &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: minTLSVersionNumber, MaxVersion: tls.VersionTLS13, ServerName: config.TLSSettings.TLSSubjectName, // #nosec G402 for test purposes only @@ -134,6 +138,7 @@ func TestNewService(t *testing.T) { TLSSubjectName: "localhost", TLSCertificatePath: testCertFilePath, UseMTLS: true, + MinTLSVersion: "TLS 1.2", } svc, err := NewService(config.Name, config.Version, config.ChannelMode, config.Store) @@ -322,3 +327,31 @@ func createTestCertificate(t *testing.T) string { return testCertFilePath } + +func TestTLSVersionNumber(t *testing.T) { + t.Run("unsupported ServerSettings.MinTLSVersion TLS 1.0", func(t *testing.T) { + versionNumber, err := parseTLSVersionName("TLS 1.0") + require.Equal(t, uint16(0), versionNumber) + require.Error(t, err) + require.ErrorContains(t, err, "unsupported TLS version name") + }) + + t.Run("unsupported ServerSettings.MinTLSVersion TLS 1.1", func(t *testing.T) { + versionNumber, err := parseTLSVersionName("TLS 1.1") + require.Equal(t, uint16(0), versionNumber) + require.Error(t, err) + require.ErrorContains(t, err, "unsupported TLS version name") + }) + t.Run("unsupported ServerSettings.MinTLSVersion TLS 1.4", func(t *testing.T) { + versionNumber, err := parseTLSVersionName("TLS 1.4") + require.Equal(t, uint16(0), versionNumber) + require.Error(t, err) + require.ErrorContains(t, err, "unsupported TLS version name") + }) + + t.Run("valid ServerSettings.MinTLSVersion", func(t *testing.T) { + versionNumber, err := parseTLSVersionName("TLS 1.2") + require.Equal(t, uint16(tls.VersionTLS12), versionNumber) + require.NoError(t, err) + }) +} diff --git a/cns/stateprovider/cni/podinfoprovider.go b/cns/stateprovider/cni/podinfoprovider.go new file mode 100644 index 0000000000..ee0325e675 --- /dev/null +++ b/cns/stateprovider/cni/podinfoprovider.go @@ -0,0 +1,49 @@ +package cni + +import ( + "fmt" + + "github.com/Azure/azure-container-networking/cni/api" + "github.com/Azure/azure-container-networking/cni/client" + "github.com/Azure/azure-container-networking/cns" + "github.com/pkg/errors" + kexec "k8s.io/utils/exec" +) + +// New returns an implementation of cns.PodInfoByIPProvider +// that execs out to the CNI and uses the response to build the PodInfo map. +func New() (cns.PodInfoByIPProvider, error) { + return podInfoProvider(kexec.New()) +} + +func podInfoProvider(exec kexec.Interface) (cns.PodInfoByIPProvider, error) { + cli := client.New(exec) + state, err := cli.GetEndpointState() + if err != nil { + return nil, fmt.Errorf("failed to invoke CNI client.GetEndpointState(): %w", err) + } + return cns.PodInfoByIPProviderFunc(func() (map[string]cns.PodInfo, error) { + return cniStateToPodInfoByIP(state) + }), nil +} + +// cniStateToPodInfoByIP converts an AzureCNIState dumped from a CNI exec +// into a PodInfo map, using the endpoint IPs as keys in the map. +// for pods with multiple IPs (such as in dualstack cases), this means multiple keys in the map +// will point to the same pod information. +func cniStateToPodInfoByIP(state *api.AzureCNIState) (map[string]cns.PodInfo, error) { + podInfoByIP := map[string]cns.PodInfo{} + for _, endpoint := range state.ContainerInterfaces { + for _, epIP := range endpoint.IPAddresses { + podInfo := cns.NewPodInfo(endpoint.ContainerID, endpoint.PodEndpointId, endpoint.PodName, endpoint.PodNamespace) + + ipKey := epIP.IP.String() + if prevPodInfo, ok := podInfoByIP[ipKey]; ok { + return nil, errors.Wrapf(cns.ErrDuplicateIP, "duplicate ip %s found for different pods: pod: %+v, pod: %+v", ipKey, podInfo, prevPodInfo) + } + + podInfoByIP[ipKey] = podInfo + } + } + return podInfoByIP, nil +} diff --git a/cns/cnireconciler/podinfoprovider_test.go b/cns/stateprovider/cni/podinfoprovider_test.go similarity index 67% rename from cns/cnireconciler/podinfoprovider_test.go rename to cns/stateprovider/cni/podinfoprovider_test.go index 8d10b1c586..0928cf2ca0 100644 --- a/cns/cnireconciler/podinfoprovider_test.go +++ b/cns/stateprovider/cni/podinfoprovider_test.go @@ -1,12 +1,9 @@ -package cnireconciler +package cni import ( - "net" "testing" "github.com/Azure/azure-container-networking/cns" - "github.com/Azure/azure-container-networking/cns/restserver" - "github.com/Azure/azure-container-networking/store" testutils "github.com/Azure/azure-container-networking/test/utils" "github.com/stretchr/testify/assert" "k8s.io/utils/exec" @@ -76,54 +73,7 @@ func TestNewCNIPodInfoProvider(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - got, err := newCNIPodInfoProvider(tt.exec) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - podInfoByIP, _ := got.PodInfoByIP() - assert.Equal(t, tt.want, podInfoByIP) - }) - } -} - -func TestNewCNSPodInfoProvider(t *testing.T) { - goodStore := store.NewMockStore("") - goodEndpointState := make(map[string]*restserver.EndpointInfo) - endpointInfo := &restserver.EndpointInfo{PodName: "goldpinger-deploy-bbbf9fd7c-z8v4l", PodNamespace: "default", IfnameToIPMap: make(map[string]*restserver.IPInfo)} - endpointInfo.IfnameToIPMap["eth0"] = &restserver.IPInfo{IPv4: []net.IPNet{{IP: net.IPv4(10, 241, 0, 65), Mask: net.IPv4Mask(255, 255, 255, 0)}}} - - goodEndpointState["0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea"] = endpointInfo - err := goodStore.Write(restserver.EndpointStoreKey, goodEndpointState) - if err != nil { - t.Fatalf("Error writing to store: %v", err) - } - tests := []struct { - name string - store store.KeyValueStore - want map[string]cns.PodInfo - wantErr bool - }{ - { - name: "good", - store: goodStore, - want: map[string]cns.PodInfo{"10.241.0.65": cns.NewPodInfo("0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea", - "0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea", "goldpinger-deploy-bbbf9fd7c-z8v4l", "default")}, - wantErr: false, - }, - { - name: "empty store", - store: store.NewMockStore(""), - want: map[string]cns.PodInfo{}, - wantErr: true, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - got, err := newCNSPodInfoProvider(tt.store) + got, err := podInfoProvider(tt.exec) if tt.wantErr { assert.Error(t, err) return diff --git a/cns/cnireconciler/statefile.go b/cns/stateprovider/cni/statefile.go similarity index 98% rename from cns/cnireconciler/statefile.go rename to cns/stateprovider/cni/statefile.go index 58649b1b57..0caaeec8f7 100644 --- a/cns/cnireconciler/statefile.go +++ b/cns/stateprovider/cni/statefile.go @@ -1,4 +1,4 @@ -package cnireconciler +package cni import ( "encoding/json" diff --git a/cns/cnireconciler/statefile_test.go b/cns/stateprovider/cni/statefile_test.go similarity index 89% rename from cns/cnireconciler/statefile_test.go rename to cns/stateprovider/cni/statefile_test.go index 335ad20918..45a4e3797d 100644 --- a/cns/cnireconciler/statefile_test.go +++ b/cns/stateprovider/cni/statefile_test.go @@ -1,4 +1,4 @@ -package cnireconciler +package cni import ( "os" @@ -12,8 +12,9 @@ import ( func TestWriteObjectToFile(t *testing.T) { name := "testdata/test" - err := os.MkdirAll(path.Dir(name), 0o666) + err := os.MkdirAll(path.Dir(name), 0o755) require.NoError(t, err) + defer os.RemoveAll("testdata") _, err = os.Stat(name) require.ErrorIs(t, err, os.ErrNotExist) diff --git a/cns/cnireconciler/podinfoprovider.go b/cns/stateprovider/cns/podinfoprovider.go similarity index 67% rename from cns/cnireconciler/podinfoprovider.go rename to cns/stateprovider/cns/podinfoprovider.go index 4a19a91d2c..d522728ed5 100644 --- a/cns/cnireconciler/podinfoprovider.go +++ b/cns/stateprovider/cns/podinfoprovider.go @@ -1,4 +1,4 @@ -package cnireconciler +package cns import ( "fmt" @@ -12,20 +12,15 @@ import ( "github.com/Azure/azure-container-networking/cns/restserver" "github.com/Azure/azure-container-networking/store" "github.com/pkg/errors" - "k8s.io/utils/exec" + kexec "k8s.io/utils/exec" ) -// NewCNIPodInfoProvider returns an implementation of cns.PodInfoByIPProvider -// that execs out to the CNI and uses the response to build the PodInfo map. -func NewCNIPodInfoProvider() (cns.PodInfoByIPProvider, error) { - return newCNIPodInfoProvider(exec.New()) +// New returns a PodInfoByIPProvider that reads from CNS statefile endpoint store. +func New(endpointStore store.KeyValueStore) (cns.PodInfoByIPProvider, error) { + return podInfoProvider(endpointStore) } -func NewCNSPodInfoProvider(endpointStore store.KeyValueStore) (cns.PodInfoByIPProvider, error) { - return newCNSPodInfoProvider(endpointStore) -} - -func newCNSPodInfoProvider(endpointStore store.KeyValueStore) (cns.PodInfoByIPProvider, error) { +func podInfoProvider(endpointStore store.KeyValueStore) (cns.PodInfoByIPProvider, error) { var state map[string]*restserver.EndpointInfo err := endpointStore.Read(restserver.EndpointStoreKey, &state) if err != nil { @@ -42,38 +37,6 @@ func newCNSPodInfoProvider(endpointStore store.KeyValueStore) (cns.PodInfoByIPPr }), nil } -func newCNIPodInfoProvider(exec exec.Interface) (cns.PodInfoByIPProvider, error) { - cli := client.New(exec) - state, err := cli.GetEndpointState() - if err != nil { - return nil, fmt.Errorf("failed to invoke CNI client.GetEndpointState(): %w", err) - } - return cns.PodInfoByIPProviderFunc(func() (map[string]cns.PodInfo, error) { - return cniStateToPodInfoByIP(state) - }), nil -} - -// cniStateToPodInfoByIP converts an AzureCNIState dumped from a CNI exec -// into a PodInfo map, using the endpoint IPs as keys in the map. -// for pods with multiple IPs (such as in dualstack cases), this means multiple keys in the map -// will point to the same pod information. -func cniStateToPodInfoByIP(state *api.AzureCNIState) (map[string]cns.PodInfo, error) { - podInfoByIP := map[string]cns.PodInfo{} - for _, endpoint := range state.ContainerInterfaces { - for _, epIP := range endpoint.IPAddresses { - podInfo := cns.NewPodInfo(endpoint.ContainerID, endpoint.PodEndpointId, endpoint.PodName, endpoint.PodNamespace) - - ipKey := epIP.IP.String() - if prevPodInfo, ok := podInfoByIP[ipKey]; ok { - return nil, errors.Wrapf(cns.ErrDuplicateIP, "duplicate ip %s found for different pods: pod: %+v, pod: %+v", ipKey, podInfo, prevPodInfo) - } - - podInfoByIP[ipKey] = podInfo - } - } - return podInfoByIP, nil -} - func endpointStateToPodInfoByIP(state map[string]*restserver.EndpointInfo) (map[string]cns.PodInfo, error) { podInfoByIP := map[string]cns.PodInfo{} for containerID, endpointInfo := range state { // for each endpoint @@ -107,11 +70,11 @@ func endpointStateToPodInfoByIP(state map[string]*restserver.EndpointInfo) (map[ // MigrateCNISate returns an endpoint state of CNS by reading the CNI state file func MigrateCNISate() (map[string]*restserver.EndpointInfo, error) { - return migrateCNISate(exec.New()) + return migrateCNISate(kexec.New()) } -func migrateCNISate(exc exec.Interface) (map[string]*restserver.EndpointInfo, error) { - cli := client.New(exc) +func migrateCNISate(exec kexec.Interface) (map[string]*restserver.EndpointInfo, error) { + cli := client.New(exec) state, err := cli.GetEndpointState() if err != nil { return nil, fmt.Errorf("failed to invoke CNI client.GetEndpointState(): %w", err) diff --git a/cns/stateprovider/cns/podinfoprovider_test.go b/cns/stateprovider/cns/podinfoprovider_test.go new file mode 100644 index 0000000000..05689509d6 --- /dev/null +++ b/cns/stateprovider/cns/podinfoprovider_test.go @@ -0,0 +1,58 @@ +package cns + +import ( + "net" + "testing" + + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/restserver" + "github.com/Azure/azure-container-networking/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewCNSPodInfoProvider(t *testing.T) { + goodStore := store.NewMockStore("") + goodEndpointState := make(map[string]*restserver.EndpointInfo) + endpointInfo := &restserver.EndpointInfo{PodName: "goldpinger-deploy-bbbf9fd7c-z8v4l", PodNamespace: "default", IfnameToIPMap: make(map[string]*restserver.IPInfo)} + endpointInfo.IfnameToIPMap["eth0"] = &restserver.IPInfo{IPv4: []net.IPNet{{IP: net.IPv4(10, 241, 0, 65), Mask: net.IPv4Mask(255, 255, 255, 0)}}} + + goodEndpointState["0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea"] = endpointInfo + err := goodStore.Write(restserver.EndpointStoreKey, goodEndpointState) + if err != nil { + t.Fatalf("Error writing to store: %v", err) + } + tests := []struct { + name string + store store.KeyValueStore + want map[string]cns.PodInfo + wantErr bool + }{ + { + name: "good", + store: goodStore, + want: map[string]cns.PodInfo{"10.241.0.65": cns.NewPodInfo("0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea", + "0a4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea", "goldpinger-deploy-bbbf9fd7c-z8v4l", "default")}, + wantErr: false, + }, + { + name: "empty store", + store: store.NewMockStore(""), + want: map[string]cns.PodInfo{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := podInfoProvider(tt.store) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + podInfoByIP, _ := got.PodInfoByIP() + assert.Equal(t, tt.want, podInfoByIP) + }) + } +} diff --git a/cns/types/codes.go b/cns/types/codes.go index f96680231e..9492e92a59 100644 --- a/cns/types/codes.go +++ b/cns/types/codes.go @@ -44,7 +44,9 @@ const ( StatusUnauthorized ResponseCode = 42 UnsupportedAPI ResponseCode = 43 FailedToAllocateBackendConfig ResponseCode = 44 + ConnectionError ResponseCode = 45 UnexpectedError ResponseCode = 99 + NmAgentNCVersionListError ResponseCode = 100 ) // nolint:gocyclo diff --git a/codeql/acn-addipamconfig.ql b/codeql/acn-addipamconfig.ql new file mode 100644 index 0000000000..7a4c05ee87 --- /dev/null +++ b/codeql/acn-addipamconfig.ql @@ -0,0 +1,41 @@ +/** + * @name Command Injection From CNS ipam add result / CNS multitenancy ipam add result + * @description Flow exists from CNS ipam add result / CNS multitenancy ipam add result (untrusted) to exec command + * @kind path-problem + * @problem.severity error + * @id go/cmd-inject-ipam-add-result + * @tags security + * @security-severity 9.8 + * @precision high + */ + +// Detect inputs from CNS add ipam result / CNS multitenancy ipam add result to command injection +import go +import lib.ACN + +private class Source extends DataFlow::Node { + Source() { + exists(DataFlow::CallNode c, Method m | + //m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "NetPlugin", + // "addIpamInvoker") or // this is not necessary since we call GetAllNetworkContainers right next to this = duplicated results, but if this call moves, uncomment this + m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "Multitenancy", + "GetAllNetworkContainers") and + c = m.getACall() and + c.getResult(0) = this + ) + } +} + +module MyConfiguration implements DataFlow::ConfigSig { + predicate isSink(DataFlow::Node sink) { sink instanceof ACN::CommandSink } + + predicate isSource(DataFlow::Node source) { source instanceof Source } +} + +module Flow = TaintTracking::Global; + +import Flow::PathGraph + +from Flow::PathNode source, Flow::PathNode sink +where Flow::flowPath(source, sink) +select sink.getNode(), source, sink, "potential command injection" diff --git a/codeql/acn-cni-args.ql b/codeql/acn-cni-args.ql new file mode 100644 index 0000000000..0deb372d3c --- /dev/null +++ b/codeql/acn-cni-args.ql @@ -0,0 +1,47 @@ +/** + * @name Command Injection From CNI Args + * @description Flow exists from CNI Args (untrusted) to exec command + * @kind path-problem + * @problem.severity error + * @id go/cmd-inject-cni + * @tags security + * @security-severity 9.8 + * @precision high + */ + +// Detect inputs from CNI ARGS to command injection +import go +import lib.ACN + +private class Source extends DataFlow::Node { + Source() { + exists(DataFlow::CallNode c, Method m | + ( + m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "NetPlugin", + "Add") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "NetPlugin", + "Delete") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "NetPlugin", + "Update") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cni/network", "NetPlugin", + "Get") + ) and + c = m.getACall() and + c.getArgument(0) = this + ) + } +} + +module MyConfiguration implements DataFlow::ConfigSig { + predicate isSink(DataFlow::Node sink) { sink instanceof ACN::CommandSink } + + predicate isSource(DataFlow::Node source) { source instanceof Source } +} + +module Flow = TaintTracking::Global; + +import Flow::PathGraph + +from Flow::PathNode source, Flow::PathNode sink +where Flow::flowPath(source, sink) +select sink.getNode(), source, sink, "potential command injection" diff --git a/codeql/acn-cns-invoker.ql b/codeql/acn-cns-invoker.ql new file mode 100644 index 0000000000..c543893a2e --- /dev/null +++ b/codeql/acn-cns-invoker.ql @@ -0,0 +1,48 @@ +/** + * @name Command Injection From CNS Invoker + * @description Flow exists from CNS Invoker (untrusted) to exec command + * @kind path-problem + * @problem.severity error + * @id go/cmd-inject-cns-invoker + * @tags security + * @security-severity 9.8 + * @precision high + */ + +// Detect inputs from CNS Invoker to command injection +// Does not detect flow to outside the enclosed method (which is why we analyze addIpamInvoker's results too) +import go +import lib.ACN + +private class Source extends DataFlow::Node { + Source() { + exists(DataFlow::CallNode c, Method m | + ( + m.hasQualifiedName("github.com/Azure/azure-container-networking/cns/client", "Client", + "RequestIPs") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cns/client", "Client", + "RequestIPAddress") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cns/client", "Client", + "GetNetworkContainer") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/cns/client", "Client", + "GetAllNetworkContainers") + ) and + c = m.getACall() and + c.getResult(0) = this + ) + } +} + +module MyConfiguration implements DataFlow::ConfigSig { + predicate isSink(DataFlow::Node sink) { sink instanceof ACN::CommandSink } + + predicate isSource(DataFlow::Node source) { source instanceof Source } +} + +module Flow = TaintTracking::Global; + +import Flow::PathGraph + +from Flow::PathNode source, Flow::PathNode sink +where Flow::flowPath(source, sink) +select sink.getNode(), source, sink, "potential command injection" diff --git a/codeql/acn-decode.ql b/codeql/acn-decode.ql new file mode 100644 index 0000000000..ff6f5a789d --- /dev/null +++ b/codeql/acn-decode.ql @@ -0,0 +1,37 @@ +/** + * @name Command Injection From Decode + * @description Flow exists from decodes (untrusted) to exec command + * @kind path-problem + * @problem.severity error + * @id go/cmd-inject-decode + * @tags security + * @security-severity 9.8 + * @precision high + */ + +// Detect flow from the DECODE method (which decodes http requests) to a command execution +import go +import lib.ACN + +private class Source extends DataFlow::Node { + Source() { + exists(DataFlow::CallNode c | + c.getTarget().hasQualifiedName("github.com/Azure/azure-container-networking/common", "Decode") and + c.getArgument(2) = this + ) + } +} + +module MyConfiguration implements DataFlow::ConfigSig { + predicate isSink(DataFlow::Node sink) { sink instanceof ACN::CommandSink } + + predicate isSource(DataFlow::Node source) { source instanceof Source } +} + +module Flow = TaintTracking::Global; + +import Flow::PathGraph + +from Flow::PathNode source, Flow::PathNode sink +where Flow::flowPath(source, sink) +select sink.getNode(), source, sink, "potential command injection" diff --git a/codeql/codeql-pack.lock.yml b/codeql/codeql-pack.lock.yml new file mode 100644 index 0000000000..5300427457 --- /dev/null +++ b/codeql/codeql-pack.lock.yml @@ -0,0 +1,4 @@ +--- +lockVersion: 1.0.0 +dependencies: {} +compiled: false diff --git a/codeql/lib/ACN.qll b/codeql/lib/ACN.qll new file mode 100644 index 0000000000..e1a2a2ff47 --- /dev/null +++ b/codeql/lib/ACN.qll @@ -0,0 +1,35 @@ +import go + +module ACN { + class CommandSink extends DataFlow::Node { + CommandSink() { + exists(DataFlow::CallNode c, Method m | + ( + // Detect dangerous usage of command wrappers with the command in the 0th arg position + ( + m.hasQualifiedName("github.com/Azure/azure-container-networking/platform", "execClient", + "ExecuteRawCommand") or + m.hasQualifiedName("github.com/Azure/azure-container-networking/platform", "execClient", + "ExecutePowershellCommand") + ) and + c.getArgument(0) = this + or + // Detect dangerous usage of command wrappers with the command in the 1st arg position + m.hasQualifiedName("github.com/Azure/azure-container-networking/platform", "execClient", + "ExecutePowershellCommandWithContext") and + c.getArgument(1) = this + ) and + c = m.getACall() + or + // Detect dangerous calls directly to os exec + ( + c.getTarget().hasQualifiedName("os/exec", "CommandContext") and + (c.getArgument(2) = this or c.getArgument(1) = this) + or + c.getTarget().hasQualifiedName("os/exec", "Command") and + (c.getArgument(0) = this or c.getArgument(1) = this) + ) + ) + } + } +} diff --git a/codeql/qlpack.yml b/codeql/qlpack.yml new file mode 100644 index 0000000000..833313ee1a --- /dev/null +++ b/codeql/qlpack.yml @@ -0,0 +1,7 @@ +--- +library: false +warnOnImplicitThis: false +name: codeql +version: 0.0.1 +dependencies: + codeql/go-all: ^1.1.3 diff --git a/common/config.go b/common/config.go index 54e2dea276..3434c2e2e1 100644 --- a/common/config.go +++ b/common/config.go @@ -49,10 +49,6 @@ const ( OptIpamQueryInterval = "ipam-query-interval" OptIpamQueryIntervalAlias = "i" - // Start CNM - OptStartAzureCNM = "start-azure-cnm" - OptStartAzureCNMAlias = "startcnm" - // Interval to send reports to host OptReportToHostInterval = "report-interval" OptReportToHostIntervalAlias = "hostinterval" diff --git a/crd/clustersubnetstate/client.go b/crd/clustersubnetstate/client.go index 7bbe40d005..887b507b5c 100644 --- a/crd/clustersubnetstate/client.go +++ b/crd/clustersubnetstate/client.go @@ -103,3 +103,9 @@ func (c *Client) Get(ctx context.Context, key types.NamespacedName) (*v1alpha1.C err := c.cli.Get(ctx, key, clusterSubnetState) return clusterSubnetState, errors.Wrapf(err, "failed to get css %v", key) } + +func (c *Client) List(ctx context.Context) ([]v1alpha1.ClusterSubnetState, error) { + clusterSubnetStateList := &v1alpha1.ClusterSubnetStateList{} + err := c.cli.List(ctx, clusterSubnetStateList, client.InNamespace("kube-system")) + return clusterSubnetStateList.Items, errors.Wrap(err, "failed to list css") +} diff --git a/crd/clustersubnetstate/manifests/acn.azure.com_clustersubnetstates.yaml b/crd/clustersubnetstate/manifests/acn.azure.com_clustersubnetstates.yaml index 69b7e38c5d..952eb502c8 100644 --- a/crd/clustersubnetstate/manifests/acn.azure.com_clustersubnetstates.yaml +++ b/crd/clustersubnetstate/manifests/acn.azure.com_clustersubnetstates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: clustersubnetstates.acn.azure.com spec: group: acn.azure.com @@ -27,14 +27,19 @@ spec: description: ClusterSubnetState is the Schema for the ClusterSubnetState API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go index ab481fa496..dba7fdd117 100644 --- a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go +++ b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go @@ -85,6 +85,9 @@ type MultitenantPodNetworkConfigStatus struct { // InterfaceInfos describes all of the network container goal state for this Pod // +kubebuilder:validation:Optional InterfaceInfos []InterfaceInfo `json:"interfaceInfos,omitempty"` + // DefaultDenyACL bool indicates whether default deny policy will be present on the pods upon pod creation + // +kubebuilder:validation:Optional + DefaultDenyACL bool `json:"defaultDenyACL"` } func init() { diff --git a/crd/multitenancy/api/v1alpha1/nodeinfo.go b/crd/multitenancy/api/v1alpha1/nodeinfo.go index 7ebb4d0d35..5936f072d5 100644 --- a/crd/multitenancy/api/v1alpha1/nodeinfo.go +++ b/crd/multitenancy/api/v1alpha1/nodeinfo.go @@ -32,13 +32,19 @@ type NodeInfoList struct { Items []NodeInfo `json:"items"` } -// NodeInfoSpec defines the desired state of NodeInfo +// NodeInfoSpec defines the desired state of NodeInfo. This is information +// provided by CNS. type NodeInfoSpec struct { // +kubebuilder:validation:Optional VMUniqueID string `json:"vmUniqueID,omitempty"` + + // +kubebuilder:validation:optional + // +kubebuilder:validation:Pattern=`^AZ\d{2}$` + HomeAZ string `json:"homeAZ,omitempty"` } -// NodeInfoStatus defines the observed state of NodeInfo +// NodeInfoStatus defines the observed state of NodeInfo. This is information +// provided by DNC. type NodeInfoStatus struct { // +kubebuilder:validation:Optional DeviceInfos []DeviceInfo `json:"deviceInfos,omitempty"` diff --git a/crd/multitenancy/api/v1alpha1/podnetworkinstance.go b/crd/multitenancy/api/v1alpha1/podnetworkinstance.go index 4a775363ae..0437bee57f 100644 --- a/crd/multitenancy/api/v1alpha1/podnetworkinstance.go +++ b/crd/multitenancy/api/v1alpha1/podnetworkinstance.go @@ -56,6 +56,10 @@ type PodNetworkInstanceSpec struct { // optional for now in case orchestrator uses the deprecated fields // +kubebuilder:validation:Optional PodNetworkConfigs []PodNetworkConfig `json:"podNetworkConfigs"` + // DefaultDenyACL bool indicates whether default deny policy will be present on the pods upon pod creation + // +kubebuilder:default=false + // +kubebuilder:validation:Optional + DefaultDenyACL bool `json:"defaultDenyACL"` } // PodNetworkInstanceStatus defines the observed state of PodNetworkInstance diff --git a/crd/multitenancy/client.go b/crd/multitenancy/client.go index bfd7a0061e..1c2065ad2d 100644 --- a/crd/multitenancy/client.go +++ b/crd/multitenancy/client.go @@ -216,3 +216,12 @@ func (n *NodeInfoClient) CreateOrUpdate(ctx context.Context, nodeInfo *v1alpha1. } return nil } + +// Get retrieves the NodeInfo CRD by name. +func (n *NodeInfoClient) Get(ctx context.Context, name string) (*v1alpha1.NodeInfo, error) { + var nodeInfo v1alpha1.NodeInfo + if err := n.Cli.Get(ctx, client.ObjectKey{Name: name}, &nodeInfo); err != nil { + return nil, errors.Wrap(err, "error getting nodeinfo crd") + } + return &nodeInfo, nil +} diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml index 670cd15084..9390424b82 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 labels: managed: "" owner: "" @@ -36,14 +36,19 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -67,6 +72,10 @@ spec: description: MultitenantPodNetworkConfigStatus defines the observed state of PodNetworkConfig properties: + defaultDenyACL: + description: DefaultDenyACL bool indicates whether default deny policy + will be present on the pods upon pod creation + type: boolean gatewayIP: description: Deprecated - use InterfaceInfos type: string diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_nodeinfo.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_nodeinfo.yaml index 27f2979577..a3a1c1dbb7 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_nodeinfo.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_nodeinfo.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: nodeinfo.multitenancy.acn.azure.com spec: group: multitenancy.acn.azure.com @@ -26,25 +26,37 @@ spec: description: NodeInfo is the Schema for the NodeInfo API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: NodeInfoSpec defines the desired state of NodeInfo + description: |- + NodeInfoSpec defines the desired state of NodeInfo. This is information + provided by CNS. properties: + homeAZ: + pattern: ^AZ\d{2}$ + type: string vmUniqueID: type: string type: object status: - description: NodeInfoStatus defines the observed state of NodeInfo + description: |- + NodeInfoStatus defines the observed state of NodeInfo. This is information + provided by DNC. properties: deviceInfos: items: diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml index 85b0def919..8dbbbe127f 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 labels: managed: "" owner: "" @@ -34,28 +34,38 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: PodNetworkInstanceSpec defines the desired state of PodNetworkInstance properties: + defaultDenyACL: + default: false + description: DefaultDenyACL bool indicates whether default deny policy + will be present on the pods upon pod creation + type: boolean podIPReservationSize: default: 0 description: Deprecated - use PodNetworks type: integer podNetworkConfigs: - description: PodNetworkConfigs describes each PodNetwork to attach - to a single Pod optional for now in case orchestrator uses the deprecated - fields + description: |- + PodNetworkConfigs describes each PodNetwork to attach to a single Pod + optional for now in case orchestrator uses the deprecated fields items: description: PodNetworkConfig describes a template for how to attach a PodNetwork to a Pod diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml index 6fedbcd8c2..f7aa88bbd9 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: podnetworks.multitenancy.acn.azure.com spec: group: multitenancy.acn.azure.com @@ -47,14 +47,19 @@ spec: description: PodNetwork is the Schema for the PodNetworks API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/crd/multitenantnetworkcontainer/manifests/networking.azure.com_multitenantnetworkcontainers.yaml b/crd/multitenantnetworkcontainer/manifests/networking.azure.com_multitenantnetworkcontainers.yaml index a65a9b0cd6..8ccaa51c9f 100644 --- a/crd/multitenantnetworkcontainer/manifests/networking.azure.com_multitenantnetworkcontainers.yaml +++ b/crd/multitenantnetworkcontainer/manifests/networking.azure.com_multitenantnetworkcontainers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: multitenantnetworkcontainers.networking.azure.com spec: group: networking.azure.com @@ -21,14 +21,19 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/crd/nodenetworkconfig/api/v1alpha/nodenetworkconfig.go b/crd/nodenetworkconfig/api/v1alpha/nodenetworkconfig.go index 02add068c0..7ded69a387 100644 --- a/crd/nodenetworkconfig/api/v1alpha/nodenetworkconfig.go +++ b/crd/nodenetworkconfig/api/v1alpha/nodenetworkconfig.go @@ -16,6 +16,7 @@ import ( // +kubebuilder:resource:shortName=nnc // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Requested IPs",type=integer,priority=1,JSONPath=`.spec.requestedIPCount` +// +kubebuilder:printcolumn:name="Availability Zone",type=string,priority=1,JSONPath=`.spec.availabilityZone` // +kubebuilder:printcolumn:name="Allocated IPs",type=integer,priority=0,JSONPath=`.status.assignedIPCount` // +kubebuilder:printcolumn:name="Subnet",type=string,priority=1,JSONPath=`.status.networkContainers[*].subnetName` // +kubebuilder:printcolumn:name="Subnet CIDR",type=string,priority=1,JSONPath=`.status.networkContainers[*].subnetAddressSpace` @@ -46,6 +47,9 @@ type NodeNetworkConfigSpec struct { // +kubebuilder:validation:Optional RequestedIPCount int64 `json:"requestedIPCount"` IPsNotInUse []string `json:"ipsNotInUse,omitempty"` + // AvailabilityZone contains the Azure availability zone for the virtual machine where network containers are placed. + // +kubebuilder:validation:Optional + AvailabilityZone uint `json:"availabilityZone,omitempty"` } // Status indicates the NNC reconcile status @@ -115,9 +119,12 @@ type NetworkContainer struct { // +kubebuilder:default=vnet Type NCType `json:"type,omitempty"` PrimaryIP string `json:"primaryIP,omitempty"` + PrimaryIPV6 string `json:"primaryIPV6,omitempty"` SubnetName string `json:"subnetName,omitempty"` IPAssignments []IPAssignment `json:"ipAssignments,omitempty"` DefaultGateway string `json:"defaultGateway,omitempty"` + DefaultGatewayV6 string `json:"defaultGatewayV6,omitempty"` + MacAddress string `json:"macAddress,omitempty"` SubnetAddressSpace string `json:"subnetAddressSpace,omitempty"` // +kubebuilder:default=0 // +kubebuilder:validation:Optional diff --git a/crd/nodenetworkconfig/manifests/acn.azure.com_nodenetworkconfigs.yaml b/crd/nodenetworkconfig/manifests/acn.azure.com_nodenetworkconfigs.yaml index 00f8a8a0d4..64820de18f 100644 --- a/crd/nodenetworkconfig/manifests/acn.azure.com_nodenetworkconfigs.yaml +++ b/crd/nodenetworkconfig/manifests/acn.azure.com_nodenetworkconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: nodenetworkconfigs.acn.azure.com spec: group: acn.azure.com @@ -21,6 +21,10 @@ spec: name: Requested IPs priority: 1 type: integer + - jsonPath: .spec.availabilityZone + name: Availability Zone + priority: 1 + type: string - jsonPath: .status.assignedIPCount name: Allocated IPs type: integer @@ -52,20 +56,29 @@ spec: description: NodeNetworkConfig is the Schema for the nodenetworkconfigs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: NodeNetworkConfigSpec defines the desired state of NetworkConfig properties: + availabilityZone: + description: AvailabilityZone contains the Azure availability zone + for the virtual machine where network containers are placed. + type: integer ipsNotInUse: items: type: string @@ -96,6 +109,8 @@ spec: type: string defaultGateway: type: string + defaultGatewayV6: + type: string id: type: string ipAssignments: @@ -109,10 +124,14 @@ spec: type: string type: object type: array + macAddress: + type: string nodeIP: type: string primaryIP: type: string + primaryIPV6: + type: string resourceGroupID: type: string status: diff --git a/crd/overlayextensionconfig/Makefile b/crd/overlayextensionconfig/Makefile new file mode 100644 index 0000000000..c1fd004a43 --- /dev/null +++ b/crd/overlayextensionconfig/Makefile @@ -0,0 +1,19 @@ +.DEFAULT_GOAL = all + +REPO_ROOT = $(shell git rev-parse --show-toplevel) +TOOLS_DIR = $(REPO_ROOT)/build/tools +TOOLS_BIN_DIR = $(REPO_ROOT)/build/tools/bin +CONTROLLER_GEN = $(TOOLS_BIN_DIR)/controller-gen + +all: generate manifests + +generate: $(CONTROLLER_GEN) + $(CONTROLLER_GEN) object paths="./..." + +.PHONY: manifests +manifests: $(CONTROLLER_GEN) + mkdir -p manifests + $(CONTROLLER_GEN) crd paths="./..." output:crd:artifacts:config=manifests/ + +$(CONTROLLER_GEN): + @make -C $(REPO_ROOT) $(CONTROLLER_GEN) diff --git a/crd/overlayextensionconfig/README.md b/crd/overlayextensionconfig/README.md new file mode 100644 index 0000000000..a155b3055b --- /dev/null +++ b/crd/overlayextensionconfig/README.md @@ -0,0 +1,5 @@ +List of included CRDs + +# OverlayExtensionConfig CRD + +OverlayExtensionConfig CRD defines an IP address range (usually an Azure subnet) from which it is possible to reach routing domain IPs (usually pods running in Azure CNI Overlay cluster). diff --git a/crd/overlayextensionconfig/api/v1alpha1/groupversion_info.go b/crd/overlayextensionconfig/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..88c30b03ed --- /dev/null +++ b/crd/overlayextensionconfig/api/v1alpha1/groupversion_info.go @@ -0,0 +1,23 @@ +//go:build !ignore_uncovered +// +build !ignore_uncovered + +// Package v1alpha1 contains API Schema definitions for the acn v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=acn.azure.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "acn.azure.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/crd/overlayextensionconfig/api/v1alpha1/overlayextensionconfig_types.go b/crd/overlayextensionconfig/api/v1alpha1/overlayextensionconfig_types.go new file mode 100644 index 0000000000..9d5d220d59 --- /dev/null +++ b/crd/overlayextensionconfig/api/v1alpha1/overlayextensionconfig_types.go @@ -0,0 +1,63 @@ +//go:build !ignore_uncovered +// +build !ignore_uncovered + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OverlayExtensionConfig is the Schema for the overlayextensionconfigs API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:resource:shortName=oec +// +kubebuilder:printcolumn:name="OverlayExtensionConfig IP range",type=string,priority=1,JSONPath=`.spec.extensionIPRange` +type OverlayExtensionConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OverlayExtensionConfigSpec `json:"spec,omitempty"` + Status OverlayExtensionConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OverlayExtensionConfigList contains a list of OverlayExtensionConfig +type OverlayExtensionConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OverlayExtensionConfig `json:"items"` +} + +// OverlayExtensionConfigSpec defines the desired state of OverlayExtensionConfig. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.extensionIPRange) || has(self.extensionIPRange)", message="ExtensionIPRange is required once set" +type OverlayExtensionConfigSpec struct { + // ExtensionIPRange field defines a CIDR that should be able to reach routing domain ip addresses. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MaxLength=43 + // 43 is max length of IPv6 CIDR string + ExtensionIPRange string `json:"extensionIPRange,omitempty"` +} + +type OECState string + +const ( + None OECState = "None" + Pending OECState = "Pending" + Succeeded OECState = "Succeeded" + Failed OECState = "Failed" +) + +// OverlayExtensionConfigStatus defines the observed state of OverlayExtensionConfig +type OverlayExtensionConfigStatus struct { + // +kubebuilder:validation:Enum=None;Pending;Succeeded;Failed + // +kubebuilder:default="None" + State OECState `json:"state,omitempty"` + Message string `json:"message,omitempty"` +} + +func init() { + SchemeBuilder.Register(&OverlayExtensionConfig{}, &OverlayExtensionConfigList{}) +} diff --git a/crd/overlayextensionconfig/api/v1alpha1/zz_generated.deepcopy.go b/crd/overlayextensionconfig/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..07d3c48fe4 --- /dev/null +++ b/crd/overlayextensionconfig/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,98 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayExtensionConfig) DeepCopyInto(out *OverlayExtensionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayExtensionConfig. +func (in *OverlayExtensionConfig) DeepCopy() *OverlayExtensionConfig { + if in == nil { + return nil + } + out := new(OverlayExtensionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OverlayExtensionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayExtensionConfigList) DeepCopyInto(out *OverlayExtensionConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OverlayExtensionConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayExtensionConfigList. +func (in *OverlayExtensionConfigList) DeepCopy() *OverlayExtensionConfigList { + if in == nil { + return nil + } + out := new(OverlayExtensionConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OverlayExtensionConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayExtensionConfigSpec) DeepCopyInto(out *OverlayExtensionConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayExtensionConfigSpec. +func (in *OverlayExtensionConfigSpec) DeepCopy() *OverlayExtensionConfigSpec { + if in == nil { + return nil + } + out := new(OverlayExtensionConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayExtensionConfigStatus) DeepCopyInto(out *OverlayExtensionConfigStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayExtensionConfigStatus. +func (in *OverlayExtensionConfigStatus) DeepCopy() *OverlayExtensionConfigStatus { + if in == nil { + return nil + } + out := new(OverlayExtensionConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/crd/overlayextensionconfig/client.go b/crd/overlayextensionconfig/client.go new file mode 100644 index 0000000000..97f7b16d30 --- /dev/null +++ b/crd/overlayextensionconfig/client.go @@ -0,0 +1,75 @@ +package overlayextensionconfig + +import ( + "context" + "reflect" + + "github.com/Azure/azure-container-networking/crd" + "github.com/Azure/azure-container-networking/crd/overlayextensionconfig/api/v1alpha1" + "github.com/pkg/errors" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + typedv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +// Scheme is a runtime scheme containing the client-go scheme and the OverlayExtensionConfig scheme. +var Scheme = runtime.NewScheme() + +func init() { + _ = scheme.AddToScheme(Scheme) + _ = v1alpha1.AddToScheme(Scheme) +} + +// Installer provides methods to manage the lifecycle of the OverlayExtensionConfig resource definition. +type Installer struct { + cli typedv1.CustomResourceDefinitionInterface +} + +func NewInstaller(c *rest.Config) (*Installer, error) { + cli, err := crd.NewCRDClientFromConfig(c) + if err != nil { + return nil, errors.Wrap(err, "failed to init crd client") + } + return &Installer{ + cli: cli, + }, nil +} + +func (i *Installer) create(ctx context.Context, res *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { + res, err := i.cli.Create(ctx, res, metav1.CreateOptions{}) + if err != nil { + return nil, errors.Wrap(err, "failed to create oec crd") + } + return res, nil +} + +// InstallOrUpdate installs the embedded OverlayExtensionConfig CRD definition in the cluster or updates it if present. +func (i *Installer) InstallOrUpdate(ctx context.Context) (*v1.CustomResourceDefinition, error) { + oec, err := GetOverlayExtensionConfigs() + if err != nil { + return nil, errors.Wrap(err, "failed to get embedded oec crd") + } + current, err := i.create(ctx, oec) + if !apierrors.IsAlreadyExists(err) { + return current, err + } + if current == nil { + current, err = i.cli.Get(ctx, oec.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrap(err, "failed to get existing oec crd") + } + } + if !reflect.DeepEqual(oec.Spec.Versions, current.Spec.Versions) { + oec.SetResourceVersion(current.GetResourceVersion()) + previous := *current + current, err = i.cli.Update(ctx, oec, metav1.UpdateOptions{}) + if err != nil { + return &previous, errors.Wrap(err, "failed to update existing oec crd") + } + } + return current, nil +} diff --git a/crd/overlayextensionconfig/embed.go b/crd/overlayextensionconfig/embed.go new file mode 100644 index 0000000000..d4ab04fee4 --- /dev/null +++ b/crd/overlayextensionconfig/embed.go @@ -0,0 +1,26 @@ +package overlayextensionconfig + +import ( + _ "embed" + + // import the manifests package so that caller of this package have the manifests compiled in as a side-effect. + _ "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/manifests" + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "sigs.k8s.io/yaml" +) + +// OverlayExtensionConfigsYAML embeds the CRD YAML for downstream consumers. +// +//go:embed manifests/acn.azure.com_overlayextensionconfigs.yaml +var OverlayExtensionConfigsYAML []byte + +// GetOverlayExtensionConfigs parses the raw []byte NodeNetworkConfigs in +// to a CustomResourceDefinition and returns it or an unmarshalling error. +func GetOverlayExtensionConfigs() (*apiextensionsv1.CustomResourceDefinition, error) { + overlayExtensionConfigs := &apiextensionsv1.CustomResourceDefinition{} + if err := yaml.Unmarshal(OverlayExtensionConfigsYAML, &overlayExtensionConfigs); err != nil { + return nil, errors.Wrap(err, "error unmarshalling embedded nnc") + } + return overlayExtensionConfigs, nil +} diff --git a/crd/overlayextensionconfig/embed_test.go b/crd/overlayextensionconfig/embed_test.go new file mode 100644 index 0000000000..7084b5f7c3 --- /dev/null +++ b/crd/overlayextensionconfig/embed_test.go @@ -0,0 +1,22 @@ +package overlayextensionconfig + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const filename = "manifests/acn.azure.com_overlayextensionconfigs.yaml" + +func TestEmbed(t *testing.T) { + b, err := os.ReadFile(filename) + require.NoError(t, err) + assert.Equal(t, b, OverlayExtensionConfigsYAML) +} + +func TestGetOverlayExtensionConfigs(t *testing.T) { + _, err := GetOverlayExtensionConfigs() + require.NoError(t, err) +} diff --git a/crd/overlayextensionconfig/manifests/acn.azure.com_overlayextensionconfigs.yaml b/crd/overlayextensionconfig/manifests/acn.azure.com_overlayextensionconfigs.yaml new file mode 100644 index 0000000000..418691c2af --- /dev/null +++ b/crd/overlayextensionconfig/manifests/acn.azure.com_overlayextensionconfigs.yaml @@ -0,0 +1,82 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: overlayextensionconfigs.acn.azure.com +spec: + group: acn.azure.com + names: + kind: OverlayExtensionConfig + listKind: OverlayExtensionConfigList + plural: overlayextensionconfigs + shortNames: + - oec + singular: overlayextensionconfig + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.extensionIPRange + name: OverlayExtensionConfig IP range + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: OverlayExtensionConfig is the Schema for the overlayextensionconfigs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OverlayExtensionConfigSpec defines the desired state of OverlayExtensionConfig. + properties: + extensionIPRange: + description: |- + ExtensionIPRange field defines a CIDR that should be able to reach routing domain ip addresses. + 43 is max length of IPv6 CIDR string + maxLength: 43 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: ExtensionIPRange is required once set + rule: '!has(oldSelf.extensionIPRange) || has(self.extensionIPRange)' + status: + description: OverlayExtensionConfigStatus defines the observed state of + OverlayExtensionConfig + properties: + message: + type: string + state: + default: None + enum: + - None + - Pending + - Succeeded + - Failed + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd/overlayextensionconfig/manifests/doc.go b/crd/overlayextensionconfig/manifests/doc.go new file mode 100644 index 0000000000..b08acc397f --- /dev/null +++ b/crd/overlayextensionconfig/manifests/doc.go @@ -0,0 +1,3 @@ +// Package manifests exists to allow the rendered CRD manifests to be +// packaged in to dependent components. +package manifests diff --git a/dhcp/dhcp_linux.go b/dhcp/dhcp_linux.go new file mode 100644 index 0000000000..9e7a029c05 --- /dev/null +++ b/dhcp/dhcp_linux.go @@ -0,0 +1,461 @@ +//go:build linux +// +build linux + +package dhcp + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/binary" + "io" + "net" + "time" + + "github.com/pkg/errors" + "go.uber.org/zap" + "golang.org/x/net/ipv4" + "golang.org/x/sys/unix" +) + +const ( + dhcpDiscover = 1 + bootRequest = 1 + ethPAll = 0x0003 + MaxUDPReceivedPacketSize = 8192 + dhcpServerPort = 67 + dhcpClientPort = 68 + dhcpOpCodeReply = 2 + bootpMinLen = 300 + bytesInAddress = 4 // bytes in an ip address + macBytes = 6 // bytes in a mac address + udpProtocol = 17 + + opRequest = 1 + htypeEthernet = 1 + hlenEthernet = 6 + hops = 0 + secs = 0 + flags = 0x8000 // Broadcast flag +) + +// TransactionID represents a 4-byte DHCP transaction ID as defined in RFC 951, +// Section 3. +// +// The TransactionID is used to match DHCP replies to their original request. +type TransactionID [4]byte + +var ( + magicCookie = []byte{0x63, 0x82, 0x53, 0x63} // DHCP magic cookie + DefaultReadTimeout = 3 * time.Second + DefaultTimeout = 3 * time.Second +) + +type DHCP struct { + logger *zap.Logger +} + +func New(logger *zap.Logger) *DHCP { + return &DHCP{ + logger: logger, + } +} + +type Socket struct { + fd int + remoteAddr unix.SockaddrInet4 +} + +// Linux specific +// returns a writer which should always be closed, even if we return an error +func NewWriteSocket(ifname string, remoteAddr unix.SockaddrInet4) (io.WriteCloser, error) { + fd, err := MakeBroadcastSocket(ifname) + ret := &Socket{ + fd: fd, + remoteAddr: remoteAddr, + } + if err != nil { + return ret, errors.Wrap(err, "could not make dhcp write socket") + } + + return ret, nil +} + +func (s *Socket) Write(packetBytes []byte) (int, error) { + err := unix.Sendto(s.fd, packetBytes, 0, &s.remoteAddr) + if err != nil { + return 0, errors.Wrap(err, "failed unix send to") + } + return len(packetBytes), nil +} + +// returns a reader which should always be closed, even if we return an error +func NewReadSocket(ifname string, timeout time.Duration) (io.ReadCloser, error) { + fd, err := makeListeningSocket(ifname, timeout) + ret := &Socket{ + fd: fd, + } + if err != nil { + return ret, errors.Wrap(err, "could not make dhcp read socket") + } + + return ret, nil +} + +func (s *Socket) Read(p []byte) (n int, err error) { + n, _, innerErr := unix.Recvfrom(s.fd, p, 0) + if innerErr != nil { + return 0, errors.Wrap(err, "failed unix recv from") + } + return n, nil +} + +func (s *Socket) Close() error { + // do not attempt to close fd with -1 as they are not valid + if s.fd == -1 { + return nil + } + // Ensure the file descriptor is closed when done + if err := unix.Close(s.fd); err != nil { + return errors.Wrap(err, "error closing dhcp unix socket") + } + return nil +} + +// GenerateTransactionID generates a random 32-bits number suitable for use as TransactionID +func GenerateTransactionID() (TransactionID, error) { + var xid TransactionID + _, err := rand.Read(xid[:]) + if err != nil { + return xid, errors.Errorf("could not get random number: %v", err) + } + return xid, nil +} + +func makeListeningSocket(ifname string, timeout time.Duration) (int, error) { + // reference: https://manned.org/packet.7 + // starts listening to the specified protocol, or none if zero + // the SockaddrLinkLayer also ensures packets for the htons(unix.ETH_P_IP) prot are received + fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_DGRAM, int(htons(unix.ETH_P_IP))) + if err != nil { + return fd, errors.Wrap(err, "dhcp socket creation failure") + } + iface, err := net.InterfaceByName(ifname) + if err != nil { + return fd, errors.Wrap(err, "dhcp failed to get interface") + } + llAddr := unix.SockaddrLinklayer{ + Ifindex: iface.Index, + Protocol: htons(unix.ETH_P_IP), + } + err = unix.Bind(fd, &llAddr) + + // set max time waiting without any data received + timeval := unix.NsecToTimeval(timeout.Nanoseconds()) + if innerErr := unix.SetsockoptTimeval(fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &timeval); innerErr != nil { + return fd, errors.Wrap(innerErr, "could not set timeout on socket") + } + + return fd, errors.Wrap(err, "dhcp failed to bind") +} + +// MakeBroadcastSocket creates a socket that can be passed to unix.Sendto +// that will send packets out to the broadcast address. +func MakeBroadcastSocket(ifname string) (int, error) { + fd, err := makeRawSocket(ifname) + if err != nil { + return fd, err + } + // enables broadcast (disabled by default) + err = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_BROADCAST, 1) + if err != nil { + return fd, errors.Wrap(err, "dhcp failed to set sockopt") + } + return fd, nil +} + +// conversion between host and network byte order +func htons(v uint16) uint16 { + var tmp [2]byte + binary.BigEndian.PutUint16(tmp[:], v) + return binary.LittleEndian.Uint16(tmp[:]) +} + +func BindToInterface(fd int, ifname string) error { + return errors.Wrap(unix.BindToDevice(fd, ifname), "failed to bind to device") +} + +// makeRawSocket creates a socket that can be passed to unix.Sendto. +func makeRawSocket(ifname string) (int, error) { + // AF_INET sends via IPv4, SOCK_RAW means create an ip datagram socket (skips udp transport layer, see below) + fd, err := unix.Socket(unix.AF_INET, unix.SOCK_RAW, unix.IPPROTO_RAW) + if err != nil { + return fd, errors.Wrap(err, "dhcp raw socket creation failure") + } + // Later on when we write to this socket, our packet already contains the header (we create it with MakeRawUDPPacket). + err = unix.SetsockoptInt(fd, unix.IPPROTO_IP, unix.IP_HDRINCL, 1) + if err != nil { + return fd, errors.Wrap(err, "dhcp failed to set hdrincl raw sockopt") + } + err = BindToInterface(fd, ifname) + if err != nil { + return fd, errors.Wrap(err, "dhcp failed to bind to interface") + } + return fd, nil +} + +// Build DHCP Discover Packet +func buildDHCPDiscover(mac net.HardwareAddr, txid TransactionID) ([]byte, error) { + if len(mac) != macBytes { + return nil, errors.Errorf("invalid MAC address length") + } + + var packet bytes.Buffer + + // BOOTP header + packet.WriteByte(opRequest) // op: BOOTREQUEST (1) + packet.WriteByte(htypeEthernet) // htype: Ethernet (1) + packet.WriteByte(hlenEthernet) // hlen: MAC address length (6) + packet.WriteByte(hops) // hops: 0 + packet.Write(txid[:]) // xid: Transaction ID (4 bytes) + err := binary.Write(&packet, binary.BigEndian, uint16(secs)) // secs: Seconds elapsed + if err != nil { + return nil, errors.Wrap(err, "failed to write seconds elapsed") + } + err = binary.Write(&packet, binary.BigEndian, uint16(flags)) // flags: Broadcast flag + if err != nil { + return nil, errors.Wrap(err, "failed to write broadcast flag") + } + + // Client IP address (0.0.0.0) + packet.Write(make([]byte, bytesInAddress)) + // Your IP address (0.0.0.0) + packet.Write(make([]byte, bytesInAddress)) + // Server IP address (0.0.0.0) + packet.Write(make([]byte, bytesInAddress)) + // Gateway IP address (0.0.0.0) + packet.Write(make([]byte, bytesInAddress)) + + // chaddr: Client hardware address (MAC address) + paddingBytes := 10 + packet.Write(mac) // MAC address (6 bytes) + packet.Write(make([]byte, paddingBytes)) // Padding to 16 bytes + + // sname: Server host name (64 bytes) + serverHostNameBytes := 64 + packet.Write(make([]byte, serverHostNameBytes)) + // file: Boot file name (128 bytes) + bootFileNameBytes := 128 + packet.Write(make([]byte, bootFileNameBytes)) + + // Magic cookie (DHCP) + err = binary.Write(&packet, binary.BigEndian, magicCookie) + if err != nil { + return nil, errors.Wrap(err, "failed to write magic cookie") + } + + // DHCP options (minimal required options for DISCOVER) + packet.Write([]byte{ + 53, 1, 1, // Option 53: DHCP Message Type (1 = DHCP Discover) + 55, 3, 1, 3, 6, // Option 55: Parameter Request List (1 = Subnet Mask, 3 = Router, 6 = DNS) + 255, // End option + }) + + // padding length to 300 bytes + var value uint8 // default is zero + if packet.Len() < bootpMinLen { + packet.Write(bytes.Repeat([]byte{value}, bootpMinLen-packet.Len())) + } + + return packet.Bytes(), nil +} + +// MakeRawUDPPacket converts a payload (a serialized packet) into a +// raw UDP packet for the specified serverAddr from the specified clientAddr. +func MakeRawUDPPacket(payload []byte, serverAddr, clientAddr net.UDPAddr) ([]byte, error) { + udpBytes := 8 + udp := make([]byte, udpBytes) + binary.BigEndian.PutUint16(udp[:2], uint16(clientAddr.Port)) + binary.BigEndian.PutUint16(udp[2:4], uint16(serverAddr.Port)) + totalLen := uint16(udpBytes + len(payload)) + binary.BigEndian.PutUint16(udp[4:6], totalLen) + binary.BigEndian.PutUint16(udp[6:8], 0) // try to offload the checksum + + headerVersion := 4 + headerLen := 20 + headerTTL := 64 + + h := ipv4.Header{ + Version: headerVersion, // nolint + Len: headerLen, // nolint + TotalLen: headerLen + len(udp) + len(payload), + TTL: headerTTL, + Protocol: udpProtocol, // UDP + Dst: serverAddr.IP, + Src: clientAddr.IP, + } + ret, err := h.Marshal() + if err != nil { + return nil, errors.Wrap(err, "failed to marshal when making udp packet") + } + ret = append(ret, udp...) + ret = append(ret, payload...) + return ret, nil +} + +// Receive DHCP response packet using reader +func (c *DHCP) receiveDHCPResponse(ctx context.Context, reader io.ReadCloser, xid TransactionID) error { + recvErrors := make(chan error, 1) + // Recvfrom is a blocking call, so if something goes wrong with its timeout it won't return. + + // Additionally, the timeout on the socket (on the Read(...)) call is how long until the socket times out and gives an error, + // but it won't error if we do get some sort of data within the time out period. + + // If we get some data (even if it is not the packet we are looking for, like wrong txid, wrong response opcode etc.) + // then we continue in the for loop. We then call recvfrom again which will reset the timeout period + // Without the secondary timeout at the bottom of the function, we could stay stuck in the for loop as long as we receive packets. + go func(errs chan<- error) { + // loop will only exit if there is an error, context canceled, or we find our reply packet + for { + if ctx.Err() != nil { + errs <- ctx.Err() + return + } + + buf := make([]byte, MaxUDPReceivedPacketSize) + // Blocks until data received or timeout period is reached + n, innerErr := reader.Read(buf) + if innerErr != nil { + errs <- innerErr + return + } + // check header + var iph ipv4.Header + if err := iph.Parse(buf[:n]); err != nil { + // skip non-IP data + continue + } + if iph.Protocol != udpProtocol { + // skip non-UDP packets + continue + } + udph := buf[iph.Len:n] + // source is from dhcp server if receiving + srcPort := int(binary.BigEndian.Uint16(udph[0:2])) + if srcPort != dhcpServerPort { + continue + } + // client is to dhcp client if receiving + dstPort := int(binary.BigEndian.Uint16(udph[2:4])) + if dstPort != dhcpClientPort { + continue + } + // check payload + pLen := int(binary.BigEndian.Uint16(udph[4:6])) + payload := buf[iph.Len+8 : iph.Len+pLen] + + // retrieve opcode from payload + opcode := payload[0] // opcode is first byte + // retrieve txid from payload + txidOffset := 4 // after 4 bytes, the txid starts + // the txid is 4 bytes, so we take four bytes after the offset + txid := payload[txidOffset : txidOffset+4] + + c.logger.Info("Received packet", zap.Int("opCode", int(opcode)), zap.Any("transactionID", TransactionID(txid))) + if opcode != dhcpOpCodeReply { + continue // opcode is not a reply, so continue + } + + if TransactionID(txid) == xid { + break + } + } + // only occurs if we find our reply packet successfully + // a nil error means a reply was found for this txid + recvErrors <- nil + }(recvErrors) + + // sends a message on repeat after timeout, but only the first one matters + ticker := time.NewTicker(DefaultReadTimeout) + defer ticker.Stop() + + select { + case err := <-recvErrors: + if err != nil { + return errors.Wrap(err, "error during receiving") + } + case <-ticker.C: + return errors.New("timed out waiting for replies") + } + return nil +} + +// Issues a DHCP Discover packet from the nic specified by mac and name ifname +// Returns nil if a reply to the transaction was received, or error if time out +// Does not return the DHCP Offer that was received from the DHCP server +func (c *DHCP) DiscoverRequest(ctx context.Context, mac net.HardwareAddr, ifname string) error { + txid, err := GenerateTransactionID() + if err != nil { + return errors.Wrap(err, "failed to generate random transaction id") + } + + // Used in later steps + raddr := &net.UDPAddr{IP: net.IPv4bcast, Port: dhcpServerPort} + laddr := &net.UDPAddr{IP: net.IPv4zero, Port: dhcpClientPort} + var destination [net.IPv4len]byte + copy(destination[:], raddr.IP.To4()) + + // Build a DHCP discover packet + dhcpPacket, err := buildDHCPDiscover(mac, txid) + if err != nil { + return errors.Wrap(err, "failed to build dhcp discover packet") + } + // Make UDP packet from dhcp packet in previous steps + packetToSendBytes, err := MakeRawUDPPacket(dhcpPacket, *raddr, *laddr) + if err != nil { + return errors.Wrap(err, "error making raw udp packet") + } + + // Make writer + remoteAddr := unix.SockaddrInet4{Port: laddr.Port, Addr: destination} + writer, err := NewWriteSocket(ifname, remoteAddr) + defer func() { + // Ensure the file descriptor is closed when done + if err = writer.Close(); err != nil { + c.logger.Error("Error closing dhcp writer socket:", zap.Error(err)) + } + }() + if err != nil { + return errors.Wrap(err, "failed to make broadcast socket") + } + + // Make reader + deadline, ok := ctx.Deadline() + if !ok { + return errors.New("no deadline for passed in context") + } + timeout := time.Until(deadline) + // note: if the write/send takes a long time DiscoverRequest might take a bit longer than the deadline + reader, err := NewReadSocket(ifname, timeout) + defer func() { + // Ensure the file descriptor is closed when done + if err = reader.Close(); err != nil { + c.logger.Error("Error closing dhcp reader socket:", zap.Error(err)) + } + }() + if err != nil { + return errors.Wrap(err, "failed to make listening socket") + } + + // Once writer and reader created, start sending and receiving + _, err = writer.Write(packetToSendBytes) + if err != nil { + return errors.Wrap(err, "failed to send dhcp discover packet") + } + + c.logger.Info("DHCP Discover packet was sent successfully", zap.Any("transactionID", txid)) + + // Wait for DHCP response (Offer) + res := c.receiveDHCPResponse(ctx, reader, txid) + return res +} diff --git a/dhcp/dhcp_windows.go b/dhcp/dhcp_windows.go new file mode 100644 index 0000000000..7b23dbeeff --- /dev/null +++ b/dhcp/dhcp_windows.go @@ -0,0 +1,22 @@ +package dhcp + +import ( + "context" + "net" + + "go.uber.org/zap" +) + +type DHCP struct { + logger *zap.Logger +} + +func New(logger *zap.Logger) *DHCP { + return &DHCP{ + logger: logger, + } +} + +func (c *DHCP) DiscoverRequest(_ context.Context, _ net.HardwareAddr, _ string) error { + return nil +} diff --git a/docs/README.md b/docs/README.md index 1cb377b490..438ff3a584 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,6 @@ * [CNI plugin](cni.md) - describes how to setup Azure CNI plugins. * [Azure CNI Powered By Cilium](cilium.md) - describes the next generation of Azure CNI Plugin powered by Cilium dataplane. * [Azure CNI Overlay Mode for AKS](overlay-for-aks.md) - describes a mode of the Azure CNI Plugin to provide a Pod network from an overlay address space with no encapsulation. -* [CNM (libnetwork) plugin](cnm.md) - describes how to setup Azure CNM plugins. * [ACS](acs.md) - describes how to use the plugins with Azure Container Service. * [Network](network.md) - describes container networks created by plugins. * [IPAM](ipam.md) - describes how container IP address management is done by plugins. diff --git a/docs/cnm.md b/docs/cnm.md deleted file mode 100644 index 364dafa9a0..0000000000 --- a/docs/cnm.md +++ /dev/null @@ -1,95 +0,0 @@ -# Microsoft Azure Container Networking - -## Azure VNET CNM (libnetwork) Plugin -The `azure-vnet` libnetwork plugin implements the Docker libnetwork [network](https://github.com/docker/libnetwork/blob/master/docs/remote.md) and [IPAM](https://github.com/docker/libnetwork/blob/master/docs/ipam.md) remote plugin interfaces. - -The plugin is available on both Linux and Windows platforms. - -The network and IPAM plugins are designed to work together. The IPAM plugin can also be used by 3rd party software to manage IP addresses from Azure VNET space. - -This page describes how to setup the CNM plugin manually on Azure IaaS VMs. If you are planning to deploy an ACS cluster, see [ACS](acs.md) instead. - -## Install -Copy the plugin package from the [release](https://github.com/Azure/azure-container-networking/releases) share to your Azure VM, extract the contents and run the plugin in the background. - -```bash -# Get the last version from https://github.com/Azure/azure-container-networking/releases -$ PLUGIN_VERSION="v1.x.x" -$ curl -OsSL https://github.com/Azure/azure-container-networking/releases/download/${PLUGIN_VERSION}/azure-vnet-cnm-linux-amd64-${PLUGIN_VERSION}.tgz -$ tar xzvf azure-vnet-cnm-linux-amd64-${PLUGIN_VERSION}.tgz -# Might require sudo if not running as root -$ ./azure-cnm-plugin& -``` - -The `azure-vnet` plugin also requires the ebtables package when running on Linux. This step is not required on Windows. - -```bash -$ apt-get install -y ebtables -``` - -## Build -The plugin can also be built directly from the source code in this repository. - -```bash -$ git clone https://github.com/Azure/azure-container-networking -$ cd azure-container-networking -$ make azure-cnm-plugin -``` - -This builds the plugin and generates a tar archive. The binaries are placed in the `output` directory. - -## Usage -```bash -$ azure-cnm-plugin --help - -Usage: azure-cnm-plugin [OPTIONS] - -Options: - -e, --environment=azure Set the operating environment {azure,mas} - -u, --api-url Set the API server URL - -l, --log-level=info Set the logging level {info,debug} - -t, --log-target=logfile Set the logging target {syslog,stderr,logfile} - -o, --log-location Set the logging directory - -q, --ipam-query-url Set the IPAM query URL - -i, --ipam-query-interval Set the IPAM plugin query interval - -v, --version Print version information - -h, --help Print usage information -``` - -## Examples -To connect your containers to other resources on your Azure VNET, you need to first create a Docker network. A network is a group of uniquely addressable endpoints that can communicate with each other. Pass the plugin name as both the network and IPAM plugin. You also need to specify an Azure VNET subnet for your network. - -Create a network: - -```bash -$ docker network create --driver=azure-vnet --ipam-driver=azure-vnet --subnet=[subnet] azure -``` - -When the command succeeds, it will return the network ID. Confirm that the network was created successfully: - -```bash -$ docker network ls -NETWORK ID NAME DRIVER SCOPE -3159b0528a83 azure azure-vnet local -515779dadc8a bridge bridge local -ed6e704a74ef host host local -b35e3b663cc1 none null local -``` - -Connect containers to your network by specifying the `--net` argument with your network's name when running them: - -```bash -$ docker run -it --rm --net=azure ubuntu:latest /bin/bash -``` - -Finally, once all containers on the network exit, you can delete the network. - -```bash -$ docker network rm azure -``` - -## Outbound Connectivity from container -You have to add following iptable command to allow outbound(internet) connectivity from container -```bash -iptables -t nat -A POSTROUTING -m addrtype ! --dst-type local ! -d -j MASQUERADE -``` diff --git a/dropgz/README.md b/dropgz/README.md deleted file mode 100644 index 7ca59da397..0000000000 --- a/dropgz/README.md +++ /dev/null @@ -1,10 +0,0 @@ -### Running the dropgz locally - -Select the file(for example azure-ipam binary) you want to deploy using the dropgz. - -1. Copy the file (i.e azure-ipam) to the directory `/dropgz/pkg/embed/fs` -2. Add the sha of the file to the sum.txt file.(`sha256sum * > sum.txt`) -3. You need to gzip the file, so run the cmd `gzip --verbose --best --recursive azure-ipam` and rename the output .gz file to original file name. -4. Do the step 3 for `sum.txt` file as well. -5. go to dropgz directory and build it. (`go build .`) -6. You can now test the dropgz command locally. (`./dropgz deploy azure-ipam -o ./azure-ipam`) \ No newline at end of file diff --git a/dropgz/build/linux.Dockerfile b/dropgz/build/linux.Dockerfile deleted file mode 100644 index d090971698..0000000000 --- a/dropgz/build/linux.Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -FROM mcr.microsoft.com/cbl-mariner/base/core:2.0 AS tar -RUN tdnf install -y tar -RUN tdnf upgrade -y && tdnf install -y ca-certificates - -FROM tar AS azure-ipam -ARG AZIPAM_VERSION=v0.0.6 -ARG VERSION -ARG OS -ARG ARCH -WORKDIR /azure-ipam -COPY ./azure-ipam . -RUN curl -LO --cacert /etc/ssl/certs/ca-certificates.crt https://github.com/Azure/azure-container-networking/releases/download/azure-ipam%2F$AZIPAM_VERSION/azure-ipam-$OS-$ARCH-$AZIPAM_VERSION.tgz && tar -xvf azure-ipam-$OS-$ARCH-$AZIPAM_VERSION.tgz - -FROM tar AS azure-vnet -ARG AZCNI_VERSION=v1.6.0 -ARG VERSION -ARG OS -ARG ARCH -WORKDIR /azure-container-networking -COPY . . -RUN curl -LO --cacert /etc/ssl/certs/ca-certificates.crt https://github.com/Azure/azure-container-networking/releases/download/$AZCNI_VERSION/azure-vnet-cni-swift-$OS-$ARCH-$AZCNI_VERSION.tgz && tar -xvf azure-vnet-cni-swift-$OS-$ARCH-$AZCNI_VERSION.tgz - -FROM mcr.microsoft.com/cbl-mariner/base/core:2.0 AS compressor -ARG OS -WORKDIR /dropgz -COPY dropgz . -COPY --from=azure-ipam /azure-ipam/*.conflist pkg/embed/fs -COPY --from=azure-ipam /azure-ipam/azure-ipam pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS.conflist pkg/embed/fs/azure.conflist -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift.conflist pkg/embed/fs/azure-swift.conflist -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay.conflist pkg/embed/fs/azure-swift-overlay.conflist -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay-dualstack.conflist pkg/embed/fs/azure-swift-overlay-dualstack.conflist -COPY --from=azure-vnet /azure-container-networking/azure-vnet pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-telemetry pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-ipam pkg/embed/fs -RUN cd pkg/embed/fs/ && sha256sum * > sum.txt -RUN gzip --verbose --best --recursive pkg/embed/fs && for f in pkg/embed/fs/*.gz; do mv -- "$f" "${f%%.gz}"; done - -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS dropgz -ARG VERSION -WORKDIR /dropgz -COPY --from=compressor /dropgz . -RUN CGO_ENABLED=0 go build -a -o bin/dropgz -trimpath -ldflags "-X github.com/Azure/azure-container-networking/dropgz/internal/buildinfo.Version="$VERSION"" -gcflags="-dwarflocationlists=true" main.go - -FROM scratch as linux -COPY --from=dropgz /dropgz/bin/dropgz /dropgz -ENTRYPOINT [ "/dropgz" ] diff --git a/dropgz/build/windows.Dockerfile b/dropgz/build/windows.Dockerfile deleted file mode 100644 index dae5ee3840..0000000000 --- a/dropgz/build/windows.Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -ARG ARCH -ARG OS_VERSION -FROM --platform=linux/${ARCH} mcr.microsoft.com/cbl-mariner/base/core:2.0 AS tar -RUN tdnf install -y tar -RUN tdnf install -y unzip -RUN tdnf upgrade -y && tdnf install -y ca-certificates - -FROM tar AS azure-vnet -ARG AZCNI_VERSION=v1.6.0 -ARG VERSION -ARG OS -ARG ARCH -WORKDIR /azure-container-networking -COPY . . -RUN curl -LO --cacert /etc/ssl/certs/ca-certificates.crt https://github.com/Azure/azure-container-networking/releases/download/$AZCNI_VERSION/azure-vnet-cni-$OS-$ARCH-$AZCNI_VERSION.zip && unzip -o azure-vnet-cni-$OS-$ARCH-$AZCNI_VERSION.zip - -FROM --platform=linux/${ARCH} mcr.microsoft.com/cbl-mariner/base/core:2.0 AS compressor -ARG OS -WORKDIR /dropgz -COPY dropgz . -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay.conflist pkg/embed/fs/azure-swift-overlay.conflist -COPY --from=azure-vnet /azure-container-networking/cni/azure-$OS-swift-overlay-dualstack.conflist pkg/embed/fs/azure-swift-overlay-dualstack.conflist -COPY --from=azure-vnet /azure-container-networking/azure-vnet.exe pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-stateless.exe pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-telemetry.exe pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-ipam.exe pkg/embed/fs -COPY --from=azure-vnet /azure-container-networking/azure-vnet-telemetry.config pkg/embed/fs -RUN cd pkg/embed/fs/ && sha256sum * > sum.txt -RUN gzip --verbose --best --recursive pkg/embed/fs && for f in pkg/embed/fs/*.gz; do mv -- "$f" "${f%%.gz}"; done - -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS dropgz -ARG VERSION -WORKDIR /dropgz -COPY --from=compressor /dropgz . -RUN GOOS=windows CGO_ENABLED=0 go build -a -o bin/dropgz.exe -trimpath -ldflags "-X github.com/Azure/azure-container-networking/dropgz/internal/buildinfo.Version="$VERSION"" -gcflags="-dwarflocationlists=true" main.go - -FROM mcr.microsoft.com/windows/nanoserver:${OS_VERSION} as windows -COPY --from=dropgz /dropgz/bin/dropgz.exe dropgz.exe -ENTRYPOINT [ "dropgz.exe" ] diff --git a/dropgz/cmd/payload.go b/dropgz/cmd/payload.go index 921591d35a..dab4a23e9c 100644 --- a/dropgz/cmd/payload.go +++ b/dropgz/cmd/payload.go @@ -10,6 +10,12 @@ import ( "go.uber.org/zap" ) +var ( + compression embed.Compression + skipVerify bool + outs []string +) + // list subcommand var list = &cobra.Command{ Use: "list", @@ -32,7 +38,7 @@ func checksum(srcs, dests []string) error { if len(srcs) != len(dests) { return errors.Wrapf(embed.ErrArgsMismatched, "%d and %d", len(srcs), len(dests)) } - rc, err := embed.Extract("sum.txt") + rc, err := embed.Extract("sum.txt", compression) if err != nil { return errors.Wrap(err, "failed to extract checksum file") } @@ -54,11 +60,6 @@ func checksum(srcs, dests []string) error { return nil } -var ( - skipVerify bool - outs []string -) - // deploy subcommand var deploy = &cobra.Command{ Use: "deploy", @@ -73,7 +74,7 @@ var deploy = &cobra.Command{ return errors.Wrapf(embed.ErrArgsMismatched, "%d files, %d outputs", len(srcs), len(outs)) } log := z.With(zap.Strings("sources", srcs), zap.Strings("outputs", outs), zap.String("cmd", "deploy")) - if err := embed.Deploy(log, srcs, outs); err != nil { + if err := embed.Deploy(log, srcs, outs, compression); err != nil { return errors.Wrapf(err, "failed to deploy %s", srcs) } log.Info("successfully wrote files") @@ -120,6 +121,7 @@ func init() { root.AddCommand(verify) deploy.ValidArgs, _ = embed.Contents() // setting this after the command is initialized is required + deploy.Flags().StringVarP((*string)(&compression), "compression", "c", "none", "compression type (default none)") deploy.Flags().BoolVar(&skipVerify, "skip-verify", false, "set to disable checksum validation") deploy.Flags().StringSliceVarP(&outs, "output", "o", []string{}, "output file path") root.AddCommand(deploy) diff --git a/dropgz/go.mod b/dropgz/go.mod index a22b9c4c80..b57c12d148 100644 --- a/dropgz/go.mod +++ b/dropgz/go.mod @@ -1,16 +1,18 @@ module github.com/Azure/azure-container-networking/dropgz -go 1.21 +go 1.23 + +toolchain go1.23.2 require ( github.com/jsternberg/zap-logfmt v1.3.0 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.8.1 + github.com/spf13/cobra v1.9.1 go.uber.org/zap v1.27.0 ) require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect go.uber.org/multierr v1.11.0 // indirect ) diff --git a/dropgz/go.sum b/dropgz/go.sum index d8c2c42039..7db46b5254 100644 --- a/dropgz/go.sum +++ b/dropgz/go.sum @@ -1,4 +1,4 @@ -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -10,10 +10,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= diff --git a/dropgz/pkg/embed/payload.go b/dropgz/pkg/embed/payload.go index fd69d56eb6..10b0dee9f4 100644 --- a/dropgz/pkg/embed/payload.go +++ b/dropgz/pkg/embed/payload.go @@ -21,6 +21,13 @@ const ( var ErrArgsMismatched = errors.New("mismatched argument count") +type Compression string + +const ( + None Compression = "none" + Gzip Compression = "gzip" +) + // embedfs contains the embedded files for deployment, as a read-only FileSystem containing only "embedfs/". // //nolint:typecheck // dir is populated at build. @@ -70,20 +77,25 @@ func (c *compoundReadCloser) Close() error { return nil } -func Extract(p string) (*compoundReadCloser, error) { +func Extract(p string, compression Compression) (*compoundReadCloser, error) { f, err := embedfs.Open(path.Join(cwd, p)) if err != nil { return nil, errors.Wrapf(err, "failed to open file %s", p) } - r, err := gzip.NewReader(bufio.NewReader(f)) - if err != nil { - return nil, errors.Wrap(err, "failed to build reader") + var rc io.ReadCloser = f + switch compression { + case Gzip: + rc, err = gzip.NewReader(bufio.NewReader(f)) + if err != nil { + return nil, errors.Wrap(err, "failed to build reader") + } + default: } - return &compoundReadCloser{closer: f, readcloser: r}, nil + return &compoundReadCloser{closer: f, readcloser: rc}, nil } -func deploy(src, dest string) error { - rc, err := Extract(src) +func deploy(src, dest string, compression Compression) error { + rc, err := Extract(src, compression) if err != nil { return err } @@ -104,14 +116,14 @@ func deploy(src, dest string) error { return errors.Wrapf(err, "failed to copy %s to %s", src, dest) } -func Deploy(log *zap.Logger, srcs, dests []string) error { +func Deploy(log *zap.Logger, srcs, dests []string, compression Compression) error { if len(srcs) != len(dests) { return errors.Wrapf(ErrArgsMismatched, "%d and %d", len(srcs), len(dests)) } for i := range srcs { src := srcs[i] dest := dests[i] - if err := deploy(src, dest); err != nil { + if err := deploy(src, dest, compression); err != nil { return err } log.Info("wrote file", zap.String("src", src), zap.String("dest", dest)) diff --git a/go.mod b/go.mod index 68cbd7ce20..5004344b4a 100644 --- a/go.mod +++ b/go.mod @@ -1,158 +1,263 @@ module github.com/Azure/azure-container-networking -go 1.21 +go 1.23.2 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 github.com/Masterminds/semver v1.5.0 github.com/Microsoft/go-winio v0.6.2 - github.com/Microsoft/hcsshim v0.12.5 + github.com/Microsoft/hcsshim v0.13.0 github.com/avast/retry-go/v3 v3.1.1 - github.com/avast/retry-go/v4 v4.6.0 + github.com/avast/retry-go/v4 v4.6.1 github.com/billgraziano/dpapi v0.5.0 - github.com/containernetworking/cni v1.2.2 - github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 - github.com/evanphx/json-patch/v5 v5.7.0 // indirect - github.com/go-logr/zapr v1.2.4 // indirect + github.com/containernetworking/cni v1.3.0 + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/go-logr/zapr v1.3.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/hashicorp/go-version v1.7.0 github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/nxadm/tail v1.4.11 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.33.1 + github.com/onsi/gomega v1.37.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.20.0 - github.com/prometheus/client_model v0.6.1 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.9.0 + github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_model v0.6.2 + github.com/spf13/cobra v1.9.1 + github.com/spf13/pflag v1.0.7 + github.com/spf13/viper v1.20.1 + github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/sys v0.24.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.65.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sys v0.34.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/grpc v1.74.2 + google.golang.org/protobuf v1.36.6 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.28.5 - k8s.io/apiextensions-apiserver v0.28.3 - k8s.io/apimachinery v0.28.5 - k8s.io/client-go v0.28.5 + k8s.io/api v0.30.14 + k8s.io/apiextensions-apiserver v0.30.1 + k8s.io/apimachinery v0.30.14 + k8s.io/client-go v0.30.14 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.16.5 + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 + sigs.k8s.io/controller-runtime v0.18.4 ) require ( - code.cloudfoundry.org/clock v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + code.cloudfoundry.org/clock v1.41.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-iptables v0.7.0 + github.com/coreos/go-iptables v0.8.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/docker v26.1.5+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.7.0 - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/gofrs/uuid v3.3.0+incompatible // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/fsnotify/fsnotify v1.9.0 + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.7 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/hpcloud/tail v1.0.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/labstack/echo/v4 v4.12.0 + github.com/labstack/echo/v4 v4.13.4 github.com/labstack/gommon v0.4.2 // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.55.0 - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/common v0.65.0 + github.com/prometheus/procfs v0.16.1 // indirect github.com/sirupsen/logrus v1.9.3 - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netlink v1.3.1 + github.com/vishvananda/netns v0.0.5 go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.6.0 + golang.org/x/crypto v0.40.0 + golang.org/x/net v0.42.0 + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/term v0.33.0 // indirect + golang.org/x/text v0.27.0 // indirect + golang.org/x/time v0.12.0 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.28.5 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) require ( + github.com/Azure/azure-container-networking/zapai v0.0.3 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0-beta.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dashboard/armdashboard v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 - golang.org/x/sync v0.8.0 - gotest.tools/v3 v3.5.1 + github.com/cilium/cilium v1.15.16 + github.com/jsternberg/zap-logfmt v1.3.0 + golang.org/x/sync v0.16.0 + gotest.tools/v3 v3.5.2 k8s.io/kubectl v0.28.5 - sigs.k8s.io/yaml v1.4.0 + k8s.io/kubernetes v1.30.7 + sigs.k8s.io/yaml v1.6.0 ) require ( - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/containerd v1.7.18 // indirect - github.com/containerd/errdefs v0.1.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect + github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/checkpoint-restore/go-criu/v6 v6.3.0 // indirect + github.com/cilium/ebpf v0.16.0 // indirect + github.com/cilium/proxy v0.0.0-20231202123106-38b645b854f3 // indirect + github.com/container-storage-interface/spec v1.8.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/containerd/console v1.0.4 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/containerd/ttrpc v1.2.5 // indirect + github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyphar/filepath-securejoin v0.3.5 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.26.2 // indirect + github.com/go-openapi/spec v0.20.11 // indirect + github.com/go-openapi/strfmt v0.21.9 // indirect + github.com/go-openapi/validate v0.22.3 // indirect + github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/google/cadvisor v0.49.0 // indirect + github.com/google/cel-go v0.17.8 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/karrick/godirwalk v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/mrunalp/fileutils v0.5.1 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/runc v1.2.3 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/seccomp/libseccomp-golang v0.10.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.etcd.io/etcd/api/v3 v3.5.11 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect + go.etcd.io/etcd/client/v3 v3.5.11 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/dig v1.17.1 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + k8s.io/apiserver v0.30.14 // indirect + k8s.io/cloud-provider v0.30.7 // indirect + k8s.io/component-base v0.30.14 // indirect + k8s.io/component-helpers v0.30.7 // indirect + k8s.io/controller-manager v0.30.7 // indirect + k8s.io/cri-api v0.30.14 // indirect + k8s.io/csi-translation-lib v0.30.14 // indirect + k8s.io/dynamic-resource-allocation v0.30.14 // indirect + k8s.io/kms v0.30.14 // indirect + k8s.io/kube-scheduler v0.30.14 // indirect + k8s.io/mount-utils v0.30.14 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect +) + +require ( + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect + k8s.io/kubelet v0.30.14 ) replace ( github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.12.0 github.com/onsi/gomega => github.com/onsi/gomega v1.10.0 ) + +retract ( + v1.16.17 // contains only retractions, new version to retract 1.15.22. + v1.16.16 // contains only retractions, has to be newer than 1.16.15. + v1.16.15 // typo in the version number. + v1.15.22 // typo in the version number. +) diff --git a/go.sum b/go.sum index f511916f71..24a00d50c2 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,24 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= -code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +code.cloudfoundry.org/clock v1.41.0 h1:YiYQSEqcxswK+YtQ+NRIE31E1VNXkwb53Bb3zRmsoOM= +code.cloudfoundry.org/clock v1.41.0/go.mod h1:ncX4UpMuVwZooK7Rw7P+fsE2brLasFbPlibOOrZq40w= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-container-networking/zapai v0.0.3 h1:73druF1cnne5Ign/ztiXP99Ss5D+UJ80EL2mzPgNRhk= +github.com/Azure/azure-container-networking/zapai v0.0.3/go.mod h1:XV/aKJQAV6KqV4HQtZlDyxg2z7LaY9rsX8dqwyWFmUI= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= @@ -28,95 +39,191 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.2.0/go.mod h1:UmyOatRyQodVpp55Jr5WJmnkmVW4wKfo85uHFmMEjfM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= -github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go/v3 v3.1.1 h1:49Scxf4v8PmiQ/nY0aY3p0hDueqSmc7++cBbtiDGu2g= github.com/avast/retry-go/v3 v3.1.1/go.mod h1:6cXRK369RpzFL3UQGqIUp9Q7GDrams+KsYWrfNA1/nQ= -github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= -github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/billgraziano/dpapi v0.5.0 h1:pcxA17vyjbDqYuxCFZbgL9tYIk2xgbRZjRaIbATwh+8= github.com/billgraziano/dpapi v0.5.0/go.mod h1:lmEcZjRfLCSbUTsRu8V2ti6Q17MvnKn3N9gQqzDdTh0= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= +github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI= +github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= +github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= +github.com/cilium/cilium v1.15.16 h1:m27kbvRA0ynOQlm1ay+a+lNVgLCTUW5Inky9WoA3wBM= +github.com/cilium/cilium v1.15.16/go.mod h1:UuiAb8fmxV/lix5cGRgiJJ7hvhRfcdF48QreqG0xTB4= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/cilium/proxy v0.0.0-20231202123106-38b645b854f3 h1:fckMszrvhMot1XdF04NUKzmGw2CBJWGc9BCpFhVPKD8= +github.com/cilium/proxy v0.0.0-20231202123106-38b645b854f3/go.mod h1:cvRtoiPIT40QqsHRR77WyyMSj8prsz0/kaV0s8Q3LIA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= +github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= +github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containernetworking/cni v1.2.2 h1:9IbP6KJQQxVKo4hhnm8r50YcVKrJbJu3Dqw+Rbt1vYk= -github.com/containernetworking/cni v1.2.2/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= -github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= -github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= +github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= +github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= +github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= +github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.3.5 h1:L81NHjquoQmcPgXcttUS9qTSR/+bXry6pbSINQGpjj4= +github.com/cyphar/filepath-securejoin v0.3.5/go.mod h1:edhVd3c6OXKjUmSrVa/tGJRS9joFTxlslFCAyaxigkE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= -github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 h1:GZvuJOpa10/Yl2EinacWoMqJ+XtNPbikclDZvNXBNO8= -github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= +github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0= +github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.11 h1:J/TzFDLTt4Rcl/l1PmyErvkqlJDncGvPTMnCI39I4gY= +github.com/go-openapi/spec v0.20.11/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= +github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= +github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -133,8 +240,16 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cadvisor v0.49.0 h1:1PYeiORXmcFYi609M4Qvq5IzcvcVaWgYxDt78uH8jYA= +github.com/google/cadvisor v0.49.0/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -142,45 +257,72 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jsternberg/zap-logfmt v1.3.0 h1:z1n1AOHVVydOOVuyphbOKyR4NICDQFiJMn1IK5hVQ5Y= +github.com/jsternberg/zap-logfmt v1.3.0/go.mod h1:N3DENp9WNmCZxvkBD/eReWwz1149BK6jEN9cQ4fNwZE= +github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= +github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -189,138 +331,275 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0= -github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= +github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA= +github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q= +github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.10.0 h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo= github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80= +github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y= +github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= +github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= +go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= +go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= +go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 h1:Z6SbqeRZAl2OczfkFOqLx1BeYBDYehNjEnqluD7581Y= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0/go.mod h1:XiglO+8SPMqM3Mqh5/rtxR1VHc63o8tb38QrU6tm4mU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0 h1:ImOVvHnku8jijXqkwCSyYKRDt2YrnGXD4BbhcpfbfJo= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0/go.mod h1:IkfUfMpKWmynvvE0264trz0sf32NRTZL4nuAN9AbWRc= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -331,60 +610,77 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -398,15 +694,19 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -416,58 +716,88 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.28.5 h1:XIPNr3nBgTEaCdEiwZ+dXaO9SB4NeTOZ2pNDRrFgfb4= -k8s.io/api v0.28.5/go.mod h1:98zkTCc60iSnqqCIyCB1GI7PYDiRDYTSfL0PRIxpM4c= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= -k8s.io/apimachinery v0.28.5 h1:EEj2q1qdTcv2p5wl88KavAn3VlFRjREgRu8Sm/EuMPY= -k8s.io/apimachinery v0.28.5/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/client-go v0.28.5 h1:6UNmc33vuJhh3+SAOEKku3QnKa+DtPKGnhO2MR0IEbk= -k8s.io/client-go v0.28.5/go.mod h1:+pt086yx1i0HAlHzM9S+RZQDqdlzuXFl4hY01uhpcpA= -k8s.io/component-base v0.28.5 h1:uFCW7USa8Fpme8dVtn2ZrdVaUPBRDwYJ+kNrV9OO1Cc= -k8s.io/component-base v0.28.5/go.mod h1:gw2d8O28okS9RrsPuJnD2mFl2It0HH9neHiGi2xoXcY= +k8s.io/api v0.30.14 h1:iPq9YNOz1vHcSuN9YTmRUt8iPpB1cYPxxjgbY25xfS4= +k8s.io/api v0.30.14/go.mod h1:IdrH4AiKc2bqDDb1FAfwcP1pPRmDdyRIqNk4K8KkEoc= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.14 h1:2OvEYwWoWeb25+xzFGP/8gChu+MfRNv24BlCQdnfGzQ= +k8s.io/apimachinery v0.30.14/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.14 h1:3iafln8nzOOShuTogncNiIM83FXxfqy3EaZMENjNn2o= +k8s.io/apiserver v0.30.14/go.mod h1:X1LOQEPPmPQ4pGg3wWRz4euhDK96gE2mzX3enFE+8PE= +k8s.io/client-go v0.30.14 h1:D81QZvBtv897JU4HRsx4YoaCDnzeZSvB8eApgmbtXVA= +k8s.io/client-go v0.30.14/go.mod h1:9ytP3kKzrz3ZWavlWih4NB0mTdYA0DB1ElBHimq+JqQ= +k8s.io/cloud-provider v0.30.7 h1:RNt+A3W9Mz3J5nb5GTDG4jhbLE+VEHnXky4+HN4Tj8c= +k8s.io/cloud-provider v0.30.7/go.mod h1:yuxkkxKZ9GDOHVGD9b912J13X1miaq08N0sc6hosH3k= +k8s.io/component-base v0.30.14 h1:kDevqj2uEZLJTh8wCsEkpELPUwSRHV64h0zA7N0fe38= +k8s.io/component-base v0.30.14/go.mod h1:1MHb4dOuyJe0u61RO6xQYvZTtFaDg231WdC1agri2TE= +k8s.io/component-helpers v0.30.7 h1:cue1Oq0242BPbST7TSQld/z7gqNQkAhmDVuMMhJu7Lg= +k8s.io/component-helpers v0.30.7/go.mod h1:f7aE0tdjEIaJ/DPH00kqUnhz+jQHthSUlKKlLbJqX2Q= +k8s.io/controller-manager v0.30.7 h1:4Jo6HDjC2bsY4P8NWxEd40tVZDFnTNk1v1HcmcrejwI= +k8s.io/controller-manager v0.30.7/go.mod h1:pJXE3qdo2wO9C505kkk5T8ra0ZzcxIeZX9pwLnCGqDA= +k8s.io/cri-api v0.30.14 h1:2n2nE1BUBpamdEGJDCve7e+7xa1wzdgUhpWrH37W/RY= +k8s.io/cri-api v0.30.14/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg= +k8s.io/csi-translation-lib v0.30.14 h1:JP08sv4mBZm5iu00AquRx+RT/jqfa18T/BvhOQnd+v4= +k8s.io/csi-translation-lib v0.30.14/go.mod h1:WnJQ/55G6A8o+/mkpq8JSenEKi0wS3OfbyRVoAQuPK4= +k8s.io/dynamic-resource-allocation v0.30.14 h1:tMJ7Ev3IOVBgB0wEjylR5ppjtyC9Inhs9aEqzw95qbM= +k8s.io/dynamic-resource-allocation v0.30.14/go.mod h1:SWEVakmo3XxbeFMUIR57ku+2Cfe/GRf8twIX1Tg/a/c= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kms v0.30.14 h1:+LFOJKFJ8FoN2NG9A5Mnt7RONUQQN9ktRAzWRllWg8k= +k8s.io/kms v0.30.14/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-scheduler v0.30.14 h1:r/h8IGO5vH6zUViFmv8ADv/+/ILrjvNP/pEwcwJaSoc= +k8s.io/kube-scheduler v0.30.14/go.mod h1:nk7lq6DHtt3DbttKMXZ6FaHn1KrlvV+9IbqEz+TGOdE= k8s.io/kubectl v0.28.5 h1:jq8xtiCCZPR8Cl/Qe1D7bLU0h8KtcunwfROqIekCUeU= k8s.io/kubectl v0.28.5/go.mod h1:9WiwzqeKs3vLiDtEQPbjhqqysX+BIVMLt7C7gN+T5w8= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= -sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +k8s.io/kubelet v0.30.14 h1:RuDEhb+Gr0LsZBZkUTchSMg81CliE8+yoXRnaT6FGP0= +k8s.io/kubelet v0.30.14/go.mod h1:VJdl7458YBOK+pz6bdTLPcdPRosNAuf0h2wINpWt9pE= +k8s.io/kubernetes v1.30.7 h1:02tPr+FPLzY0J+a14SRBIxSMaTkdmkKNHYibrRtwPVQ= +k8s.io/kubernetes v1.30.7/go.mod h1:hV3c+sqOEO0eVqgSo0KW5dOJ6UjGJ2l3Pd9+Qvft8UI= +k8s.io/mount-utils v0.30.14 h1:XfneBnQJQnI7BCDy6CVpT6aV0UqBj7dB5UBRdgdmghU= +k8s.io/mount-utils v0.30.14/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/aks/Makefile b/hack/aks/Makefile index d15a79b106..d0ce352528 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -8,17 +8,24 @@ AZIMG = mcr.microsoft.com/azure-cli AZCLI ?= docker run --rm -v $(AZCFG):/root/.azure -v $(KUBECFG):/root/.kube -v $(SSH):/root/.ssh -v $(PWD):/root/tmpsrc $(AZIMG) az # overrideable defaults -AUTOUPGRADE ?= patch -K8S_VER ?= 1.28 -NODE_COUNT ?= 2 -NODE_COUNT_WIN ?= $(NODE_COUNT) -NODEUPGRADE ?= NodeImage -OS ?= linux # Used to signify if you want to bring up a windows nodePool on byocni clusters -OS_SKU ?= Ubuntu -OS_SKU_WIN ?= Windows2022 -REGION ?= westus2 -VM_SIZE ?= Standard_B2s -VM_SIZE_WIN ?= Standard_B2s +AUTOUPGRADE ?= patch +K8S_VER ?= 1.33 +NODE_COUNT ?= 2 +NODE_COUNT_WIN ?= $(NODE_COUNT) +NODEUPGRADE ?= NodeImage +OS ?= linux # Used to signify if you want to bring up a windows nodePool on byocni clusters +OS_SKU ?= Ubuntu +OS_SKU_WIN ?= Windows2022 +REGION ?= westus2 +VM_SIZE ?= Standard_B2s +VM_SIZE_WIN ?= Standard_B2s +IP_TAG ?= FirstPartyUsage=/NonProd +IP_PREFIX ?= serviceTaggedIp +PUBLIC_IP_ID ?= /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/publicIPAddresses +PUBLIC_IPv4 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v4 +PUBLIC_IPv6 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v6 +KUBE_PROXY_JSON_PATH ?= ./kube-proxy.json +LTS ?= false # overrideable variables SUB ?= $(AZURE_SUBSCRIPTION) @@ -26,6 +33,24 @@ CLUSTER ?= $(USER)-$(REGION) GROUP ?= $(CLUSTER) VNET ?= $(CLUSTER) +# Long Term Support (LTS) +ifeq ($(LTS),true) + LTS_ARGS=--k8s-support-plan AKSLongTermSupport --tier premium +else + LTS_ARGS= +endif + +# Common az aks create fields +COMMON_AKS_FIELDS = $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --auto-upgrade-channel $(AUTOUPGRADE) \ + --node-os-upgrade-channel $(NODEUPGRADE) \ + --kubernetes-version $(K8S_VER) \ + --node-count $(NODE_COUNT) \ + --node-vm-size $(VM_SIZE) \ + --no-ssh-key \ + --os-sku $(OS_SKU) \ + $(LTS_ARGS) + ##@ Help help: ## Display this help @@ -42,6 +67,23 @@ azcfg: ## Set the $AZCLI to use aks-preview @$(AZCLI) extension add --name aks-preview --yes @$(AZCLI) extension update --name aks-preview +ip: + $(AZCLI) network public-ip create --name $(IP_PREFIX)-$(CLUSTER)-$(IPVERSION) \ + --resource-group $(GROUP) \ + --allocation-method Static \ + --ip-tags $(IP_TAG) \ + --location $(REGION) \ + --sku Standard \ + --tier Regional \ + --version IP$(IPVERSION) + +ipv4: + @$(MAKE) ip IPVERSION=v4 + +ipv6: + @$(MAKE) ip IPVERSION=v6 + + set-kubeconf: ## Adds the kubeconf for $CLUSTER $(AZCLI) aks get-credentials -n $(CLUSTER) -g $(GROUP) @@ -64,6 +106,9 @@ vars: ## Show the input vars configured for the cluster commands @echo VM_SIZE=$(VM_SIZE) @echo NODE_COUNT=$(NODE_COUNT) @echo VMSS_NAME=$(VMSS_NAME) + @echo K8S_VER=$(K8S_VER) + @echo LTS_ARGS=$(if $(LTS_ARGS),$(LTS_ARGS),$(LTS)) + @echo COMMON_AKS_FIELDS=$(COMMON_AKS_FIELDS) ##@ SWIFT Infra @@ -88,169 +133,134 @@ overlay-net-up: ## Create vnet, nodenet subnets $(AZCLI) network vnet create -g $(GROUP) -l $(REGION) --name $(VNET) --address-prefixes 10.0.0.0/8 -o none $(AZCLI) network vnet subnet create -g $(GROUP) --vnet-name $(VNET) --name nodenet --address-prefix 10.10.0.0/16 -o none - ##@ AKS Clusters byocni-up: swift-byocni-up ## Alias to swift-byocni-up cilium-up: swift-cilium-up ## Alias to swift-cilium-up up: swift-up ## Alias to swift-up -overlay-byocni-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ + +nodesubnet-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an NodeSubnet BYO CNI cluster without kube-proxy + $(COMMON_AKS_FIELDS) \ + --max-pods 250 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ + --network-plugin none \ + --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ + --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ + --yes + @$(MAKE) set-kubeconf + +overlay-byocni-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --no-ssh-key \ - --os-sku $(OS_SKU) \ --yes + @$(MAKE) set-kubeconf ifeq ($(OS),windows) - @$(MAKE) windows-nodepool-up + $(MAKE) windows-nodepool-up endif - @$(MAKE) set-kubeconf -overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +overlay-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --no-ssh-key \ - --kube-proxy-config ./kube-proxy.json \ + --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ --yes @$(MAKE) set-kubeconf -overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +overlay-cilium-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay Cilium cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf -overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +overlay-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay AzCNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf +ifeq ($(OS),windows) + $(MAKE) windows-nodepool-up +endif -swift-byocni-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ +swift-byocni-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ - --os-sku $(OS_SKU) \ --yes ifeq ($(OS),windows) @$(MAKE) windows-swift-nodepool-up endif @$(MAKE) set-kubeconf -swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +swift-byocni-nokubeproxy-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy, add managed identity and public ip + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ - --os-sku $(OS_SKU) \ - --kube-proxy-config ./kube-proxy.json \ + --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ --yes @$(MAKE) set-kubeconf -swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +swift-cilium-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT Cilium cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf -swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +swift-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf -swiftv2-multitenancy-cluster-up: rg-up +swiftv2-multitenancy-cluster-up: rg-up ipv4 $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --network-plugin-mode overlay \ - --kubernetes-version 1.28 \ + --kubernetes-version $(K8S_VER) \ --nodepool-name "mtapool" \ --node-vm-size $(VM_SIZE) \ --node-count 2 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ + $(LTS_ARGS) \ --yes @$(MAKE) set-kubeconf -swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster +swiftv2-dummy-cluster-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf @@ -258,168 +268,121 @@ swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents -vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +vnetscale-swift-byocni-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ - --os-sku $(OS_SKU) \ + --pod-ip-allocation-mode StaticBlock \ --yes @$(MAKE) set-kubeconf -vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +vnetscale-swift-byocni-nokubeproxy-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ - --os-sku $(OS_SKU) \ - --kube-proxy-config ./kube-proxy.json \ + --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ + --pod-ip-allocation-mode StaticBlock \ --yes @$(MAKE) set-kubeconf -vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +vnetscale-swift-cilium-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ + --pod-ip-allocation-mode StaticBlock \ --yes @$(MAKE) set-kubeconf -vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ +vnetscale-swift-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --no-ssh-key \ + --pod-ip-allocation-mode StaticBlock \ --yes @$(MAKE) set-kubeconf -windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +nodesubnet-cilium-up: rg-up ipv4 overlay-net-up ## Bring up a Nodesubnet Cilium cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ - --windows-admin-password $(WINDOWS_PASSWORD) \ - --windows-admin-username $(WINDOWS_USERNAME) \ + --network-dataplane cilium \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --no-ssh-key \ --yes - @$(MAKE) windows-nodepool-up @$(MAKE) set-kubeconf -linux-cniv1-up: rg-up overlay-net-up ## Bring up a Linux CNIv1 cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +cniv1-up: rg-up ipv4 overlay-net-up ## Bring up a CNIv1 cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --max-pods 250 \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ - --os-sku $(OS_SKU) \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf +ifeq ($(OS),windows) + $(MAKE) windows-nodepool-up +endif -dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +dualstack-overlay-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --subscription $(SUB) \ --ip-families ipv4,ipv6 \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf -dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +dualstack-overlay-byocni-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \ --ip-families ipv4,ipv6 \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf +ifeq ($(OS),windows) + $(MAKE) windows-nodepool-up +endif -cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +cilium-dualstack-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --network-dataplane cilium \ --subscription $(SUB) \ --ip-families ipv4,ipv6 \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --no-ssh-key \ --yes @$(MAKE) set-kubeconf -dualstack-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --auto-upgrade-channel $(AUTOUPGRADE) \ - --node-os-upgrade-channel $(NODEUPGRADE) \ - --kubernetes-version $(K8S_VER) \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ +dualstack-byocni-nokubeproxy-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \ --ip-families ipv4,ipv6 \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --no-ssh-key \ - --kube-proxy-config ./kube-proxy.json \ + --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ --yes @$(MAKE) set-kubeconf windows-nodepool-up: ## Add windows node pool + AZCLI="az" GROUP="$(GROUP)" CLUSTER="$(CLUSTER)" sh ../scripts/wait-cluster-update.sh $(AZCLI) aks nodepool add -g $(GROUP) -n npwin \ --node-count $(NODE_COUNT_WIN) \ --node-vm-size $(VM_SIZE_WIN) \ @@ -448,5 +411,5 @@ down: ## Delete the cluster restart-vmss: ## Restarts the nodes in the cluster $(AZCLI) vmss restart -g MC_${GROUP}_${CLUSTER}_${REGION} --name $(VMSS_NAME) -scale-vmss: ## Scales the nodes in the cluster - $(AZCLI) vmss scale -g MC_${GROUP}_${CLUSTER}_${REGION} --name $(VMSS_NAME) --new-capacity $(NODE_COUNT) +scale-nodes: ## Scales the nodes in the cluster + $(AZCLI) aks nodepool scale --resource-group $(GROUP) --cluster-name $(CLUSTER) --name $(NODEPOOL) --node-count $(NODE_COUNT) diff --git a/hack/aks/README.md b/hack/aks/README.md index 8e1febd0e4..7e3e89ffa8 100644 --- a/hack/aks/README.md +++ b/hack/aks/README.md @@ -24,6 +24,7 @@ AKS Clusters byocni-up Alias to swift-byocni-up cilium-up Alias to swift-cilium-up up Alias to swift-up + nodesubnet-byocni-nokubeproxy-up Bring up a Nodesubnet BYO CNI cluster. Does not include secondary IP configs. overlay-byocni-up Bring up a Overlay BYO CNI cluster overlay-byocni-nokubeproxy-up Bring up a Overlay BYO CNI cluster without kube-proxy overlay-cilium-up Bring up a Overlay Cilium cluster @@ -36,8 +37,8 @@ AKS Clusters vnetscale-swift-byocni-nokubeproxy-up Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy vnetscale-swift-cilium-up Bring up a Vnet Scale SWIFT Cilium cluster vnetscale-swift-up Bring up a Vnet Scale SWIFT AzCNI cluster - windows-cniv1-up Bring up a Windows AzCNIv1 cluster - linux-cniv1-up Bring up a Linux AzCNIv1 cluster + nodesubnet-cilium-up Bring up a Nodesubnet Cilium cluster + cniv1-up Bring up a AzCNIv1 cluster dualstack-overlay-byocni-up Bring up an dualstack overlay cluster without CNS and CNI installed cilium-dualstack-up Brings up a Cilium Dualstack Overlay cluster with Linux node only dualstack-byocni-nokubeproxy-up Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy diff --git a/hack/manifests/cni-installer.yaml b/hack/manifests/cni-installer.yaml index ef5fbe3ea0..67f4a2eae2 100644 --- a/hack/manifests/cni-installer.yaml +++ b/hack/manifests/cni-installer.yaml @@ -27,7 +27,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/hack/manifests/nginx.yaml b/hack/manifests/nginx.yaml new file mode 100644 index 0000000000..2ecfc95a72 --- /dev/null +++ b/hack/manifests/nginx.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx + namespace: kube-system +spec: + replicas: 4 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: mcr.microsoft.com/azurelinux/base/nginx:1 + ports: + - containerPort: 80 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname # KV: Key is hostname, value is each unique nodename + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: nginx \ No newline at end of file diff --git a/hack/scripts/cilium-mtu-validation.sh b/hack/scripts/cilium-mtu-validation.sh new file mode 100755 index 0000000000..9e8386b849 --- /dev/null +++ b/hack/scripts/cilium-mtu-validation.sh @@ -0,0 +1,103 @@ +#!/bin/bash +NAMESPACE="kube-system" + +echo "Deploy nginx pods for MTU testing" +kubectl apply -f ../manifests/nginx.yaml +kubectl wait --for=condition=available --timeout=60s -n $NAMESPACE deployment/nginx + +# Check node count +node_count=$(kubectl get nodes --no-headers | wc -l) + +# in CNI release test scenario scale deployments to 3 * node count to get replicas on each node +if [ "$node_count" -gt 1 ]; then + echo "Scaling nginx deployment to $((3 * node_count)) replicas" + kubectl scale deployment nginx --replicas=$((3 * node_count)) -n $NAMESPACE +fi +# Wait for nginx pods to be ready +kubectl wait --for=condition=available --timeout=60s -n $NAMESPACE deployment/nginx + + + +echo "Checking MTU for pods in namespace: $NAMESPACE using Cilium agent and nginx MTU" + +# Get all nodes +nodes=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') + +for node in $nodes; do + echo "Checking node: $node" + + # Get the Cilium agent pod running on this node + cilium_pod=$(kubectl get pods -n $NAMESPACE -o wide --field-selector spec.nodeName=$node -l k8s-app=cilium -o jsonpath='{.items[0].metadata.name}') + + if [ -z "$cilium_pod" ]; then + echo "Failed to find Cilium agent pod on node $node" + echo "##[error]Failed to find Cilium agent pod on node $node" + exit 1 + fi + + # Get the MTU of eth0 in the Cilium agent pod + cilium_mtu=$(kubectl exec -n $NAMESPACE $cilium_pod -- cat /sys/class/net/eth0/mtu 2>/dev/null) + + if [ -z "$cilium_mtu" ]; then + echo "Failed to get MTU from Cilium agent pod on node $node" + echo "##[error]Failed to get MTU from Cilium agent pod on node $node" + exit 1 + fi + + echo "Cilium agent eth0 MTU: $cilium_mtu" + + # Get an nginx pod running on this node + nginx_pod=$(kubectl get pods -n $NAMESPACE -o wide --field-selector spec.nodeName=$node -l app=nginx -o jsonpath='{.items[0].metadata.name}') + if [ -z "$nginx_pod" ]; then + echo "Failed to find nginx pod on node $node" + echo "##[error]Failed to find nginx pod on node $node" + exit 1 + fi + # Get the MTU of eth0 in the nginx pod + nginx_mtu=$(kubectl exec -n $NAMESPACE $nginx_pod -- cat /sys/class/net/eth0/mtu 2>/dev/null) + if [ -z "$nginx_mtu" ]; then + echo "Failed to get MTU from nginx pod on node $node" + echo "##[error]Failed to get MTU from nginx pod on node $node" + exit 1 + fi + echo "Nginx pod eth0 MTU: $nginx_mtu" + + # Get the node's eth0 MTU + node_mtu=$(kubectl debug node/$node -it --image=busybox -- sh -c "cat /sys/class/net/eth0/mtu" 2>/dev/null | tail -n 1) + + if [ -z "$node_mtu" ]; then + echo "Failed to get MTU from node $node" + echo "##[error]Failed to get MTU from node $node" + exit 1 + fi + echo "Node eth0 MTU: $node_mtu" + + # Check if the MTUs match + if [ "$cilium_mtu" -eq "$nginx_mtu" ] && [ "$nginx_mtu" -eq "$node_mtu" ]; then + echo "MTU validation passed for node $node" + else + echo "MTU validation failed for node $node" + echo "Cilium agent MTU: $cilium_mtu, Nginx pod MTU: $nginx_mtu, Node MTU: $node_mtu" + echo "##[error]MTU validation failed. MTUs do not match." + exit 1 + fi + + echo "----------------------------------------" + +done + +# Clean up +kubectl delete deployment nginx -n $NAMESPACE +echo "Cleaned up nginx deployment" + +# Clean up the debug pod +debug_pod=$(kubectl get pods -o name | grep "node-debugger") +if [ -n "$debug_pod" ]; then + kubectl delete $debug_pod + kubectl wait --for=delete $debug_pod --timeout=60s + if [ $? -ne 0 ]; then + echo "Failed to clean up debug pod $debug_pod" + fi +else + echo "No debug pod found" +fi \ No newline at end of file diff --git a/hack/scripts/wait-cluster-update.sh b/hack/scripts/wait-cluster-update.sh new file mode 100644 index 0000000000..51b9cfd28d --- /dev/null +++ b/hack/scripts/wait-cluster-update.sh @@ -0,0 +1,16 @@ +# wait for cluster to update +while true; do + cluster_state=$($AZCLI aks show \ + --name "$CLUSTER" \ + --resource-group "$GROUP" \ + --query provisioningState) + + if echo "$cluster_state" | grep -q "Updating"; then + echo "Cluster is updating. Sleeping for 30 seconds..." + sleep 30 + else + break + fi +done +# cluster state is always set and visible outside the loop +echo "Cluster state is: $cluster_state" diff --git a/hack/toolbox/Dockerfile.windows b/hack/toolbox/Dockerfile.windows index 12c4867157..5350fd1515 100644 --- a/hack/toolbox/Dockerfile.windows +++ b/hack/toolbox/Dockerfile.windows @@ -1,5 +1,5 @@ # Build cns -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23 AS builder # Build args ARG VERSION ARG CNS_AI_PATH diff --git a/hack/toolbox/manifests/agents.yaml b/hack/toolbox/manifests/agents.yaml index 68f7d9a383..e82b49398e 100644 --- a/hack/toolbox/manifests/agents.yaml +++ b/hack/toolbox/manifests/agents.yaml @@ -16,7 +16,7 @@ spec: app: agent-pod-8085-tcp-host spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: agent hostNetwork: true containers: @@ -47,7 +47,7 @@ spec: app: agent-pod-8085-tcp spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: agent containers: - name: agent-pod-8085-tcp @@ -77,7 +77,7 @@ spec: app: agent-pod-8086-udp spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: agent containers: - name: agent-pod-8086-udp diff --git a/hack/toolbox/manifests/daemonset.yaml b/hack/toolbox/manifests/daemonset.yaml index e1bb2aac33..aeb1a8466d 100644 --- a/hack/toolbox/manifests/daemonset.yaml +++ b/hack/toolbox/manifests/daemonset.yaml @@ -26,7 +26,7 @@ spec: - key: CriticalAddonsOnly operator: Exists nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: agent containers: - name: azure-npm diff --git a/hack/toolbox/manifests/master.yaml b/hack/toolbox/manifests/master.yaml index 15e2134b31..842f86c9a3 100644 --- a/hack/toolbox/manifests/master.yaml +++ b/hack/toolbox/manifests/master.yaml @@ -16,7 +16,7 @@ spec: app: master-pod-8085-tcp-host spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: master hostNetwork: true tolerations: @@ -55,7 +55,7 @@ spec: app: master-pod-8085-tcp spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: master tolerations: - operator: "Exists" @@ -92,7 +92,7 @@ spec: app: master-pod-8086-udp spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux kubernetes.io/role: master tolerations: - operator: "Exists" diff --git a/hack/toolbox/server/Dockerfile.heavy b/hack/toolbox/server/Dockerfile.heavy index fc0e7b160c..2366cc91bb 100644 --- a/hack/toolbox/server/Dockerfile.heavy +++ b/hack/toolbox/server/Dockerfile.heavy @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 as build +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23 as build ADD ./ / WORKDIR / RUN CGO_ENABLED=0 GOOS=linux go build -o server . diff --git a/hack/toolbox/server/Dockerfile.lite b/hack/toolbox/server/Dockerfile.lite index 6c16f7459e..7ff67d2d26 100644 --- a/hack/toolbox/server/Dockerfile.lite +++ b/hack/toolbox/server/Dockerfile.lite @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 as build +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23 as build ADD ./server/server.go / ADD ./server/go.mod / WORKDIR / diff --git a/internal/fs/atomic.go b/internal/fs/atomic.go index 03f49c2ce2..65c422e656 100644 --- a/internal/fs/atomic.go +++ b/internal/fs/atomic.go @@ -4,21 +4,26 @@ import ( "io" "io/fs" "os" - "path" + "path/filepath" + "sync" "github.com/pkg/errors" ) type AtomicWriter struct { - filename string - tempFile *os.File + dir, name string + tempFile *os.File + + lock sync.Mutex } var _ io.WriteCloser = &AtomicWriter{} // NewAtomicWriter returns an io.WriteCloser that will write contents to a temp file and move that temp file to the destination filename. If the destination // filename already exists, this constructor will copy the file to -old, truncating that file if it already exists. -func NewAtomicWriter(filename string) (*AtomicWriter, error) { +func NewAtomicWriter(f string) (*AtomicWriter, error) { + filename := filepath.Clean(f) + dir, name := filepath.Split(filename) // if a file already exists, copy it to -old exists := true existingFile, err := os.Open(filename) @@ -52,29 +57,39 @@ func NewAtomicWriter(filename string) (*AtomicWriter, error) { } } - tempFile, err := os.CreateTemp(path.Dir(filename), path.Base(filename)+"*.tmp") - if err != nil { - return nil, errors.Wrap(err, "unable to create temporary file") - } - - return &AtomicWriter{filename: filename, tempFile: tempFile}, nil + return &AtomicWriter{dir: dir, name: name}, nil } -// Close closes the temp file handle and moves the temp file to the final destination +// Close closes the temp file handle and moves the temp file to the final destination. +// Multiple calls to Close will have no effect after the first success. func (a *AtomicWriter) Close() error { - if err := a.tempFile.Close(); err != nil { + a.lock.Lock() + defer a.lock.Unlock() + if a.tempFile == nil { + return nil + } + if err := a.tempFile.Close(); err != nil && !errors.Is(err, os.ErrClosed) { return errors.Wrapf(err, "unable to close temp file %s", a.tempFile.Name()) } - - if err := os.Rename(a.tempFile.Name(), a.filename); err != nil { - return errors.Wrapf(err, "unable to move temp file %s to destination %s", a.tempFile.Name(), a.filename) + if err := os.Rename(a.tempFile.Name(), filepath.Join(a.dir, a.name)); err != nil { + return errors.Wrapf(err, "unable to move temp file %s to destination %s", a.tempFile.Name(), a.name) } - + a.tempFile = nil return nil } -// Write writes the buffer to the temp file. You must call Close() to complete the move from temp file to dest file +// Write writes the buffer to the temp file. You must call Close() to complete the move from temp file to dest file. +// Multiple calls to Write will append to the temp file. func (a *AtomicWriter) Write(p []byte) (int, error) { + a.lock.Lock() + defer a.lock.Unlock() + if a.tempFile == nil { + tempFile, err := os.CreateTemp(a.dir, a.name+"*.tmp") + if err != nil { + return 0, errors.Wrap(err, "unable to create temporary file") + } + a.tempFile = tempFile + } bs, err := a.tempFile.Write(p) return bs, errors.Wrap(err, "unable to write to temp file") } diff --git a/internal/fs/atomic_test.go b/internal/fs/atomic_test.go index 926ae88660..2f8f1a4260 100644 --- a/internal/fs/atomic_test.go +++ b/internal/fs/atomic_test.go @@ -10,7 +10,7 @@ import ( ) func TestAtomicWriterFileExists(t *testing.T) { - file := "testdata/data.txt" + file := "./testdata/data.txt" w, err := fs.NewAtomicWriter(file) require.NoError(t, err, "error creating atomic writer") @@ -37,7 +37,7 @@ func TestAtomicWriterFileExists(t *testing.T) { } func TestAtomicWriterNewFile(t *testing.T) { - file := "testdata/newdata.txt" + file := "./testdata/newdata.txt" // if the file exists before running this test, remove it err := os.Remove(file) diff --git a/internal/time/duration.go b/internal/time/duration.go new file mode 100644 index 0000000000..f9a56dc0a7 --- /dev/null +++ b/internal/time/duration.go @@ -0,0 +1,30 @@ +package time + +import ( + "encoding/json" + "time" +) + +const Second = time.Second //nolint:revive // it's not a suffix + +type Duration struct { + time.Duration +} + +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) //nolint:wrapcheck // ignore +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err //nolint:wrapcheck // ignore + } + duration, err := time.ParseDuration(s) + if err != nil { + return err //nolint:wrapcheck // ignore + } + d.Duration = duration + return nil +} diff --git a/internal/time/duration_test.go b/internal/time/duration_test.go new file mode 100644 index 0000000000..d35b5772b0 --- /dev/null +++ b/internal/time/duration_test.go @@ -0,0 +1,64 @@ +package time + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestDurationMarshalJSON(t *testing.T) { + tests := []struct { + name string + have Duration + want []byte + wantErr bool + }{ + { + name: "valid", + have: Duration{30 * time.Second}, + want: []byte(`"30s"`), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := json.Marshal(tt.have) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} + +func TestDurationUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + have []byte + want Duration + wantErr bool + }{ + { + name: "valid", + have: []byte(`"30s"`), + want: Duration{30 * time.Second}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := &Duration{} + err := json.Unmarshal(tt.have, got) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.want, *got) + }) + } +} diff --git a/iptables/iptables.go b/iptables/iptables.go index 76dec97fed..8d82b8e934 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -3,6 +3,7 @@ package iptables // This package contains wrapper functions to program iptables rules import ( + "errors" "fmt" "github.com/Azure/azure-container-networking/cni/log" @@ -10,7 +11,10 @@ import ( "go.uber.org/zap" ) -var logger = log.CNILogger.With(zap.String("component", "cni-iptables")) +var ( + logger = log.CNILogger.With(zap.String("component", "cni-iptables")) + errCouldNotValidateRuleExists = errors.New("could not validate iptable rule exists after insertion") +) // cni iptable chains const ( @@ -87,17 +91,20 @@ type IPTableEntry struct { Params string } -type Client struct{} +type Client struct { + pl platform.ExecClient +} func NewClient() *Client { - return &Client{} + return &Client{ + pl: platform.NewExecClient(logger), + } } // Run iptables command func (c *Client) RunCmd(version, params string) error { var cmd string - p := platform.NewExecClient(logger) iptCmd := iptables if version == V6 { iptCmd = ip6tables @@ -109,7 +116,7 @@ func (c *Client) RunCmd(version, params string) error { cmd = fmt.Sprintf("%s -w %d %s", iptCmd, lockTimeout, params) } - if _, err := p.ExecuteRawCommand(cmd); err != nil { + if _, err := c.pl.ExecuteRawCommand(cmd); err != nil { return err } @@ -171,7 +178,14 @@ func (c *Client) InsertIptableRule(version, tableName, chainName, match, target } cmd := c.GetInsertIptableRuleCmd(version, tableName, chainName, match, target) - return c.RunCmd(version, cmd.Params) + err := c.RunCmd(version, cmd.Params) + if err != nil { + return err + } + if !c.RuleExists(version, tableName, chainName, match, target) { + return errCouldNotValidateRuleExists + } + return nil } func (c *Client) GetAppendIptableRuleCmd(version, tableName, chainName, match, target string) IPTableEntry { @@ -189,7 +203,14 @@ func (c *Client) AppendIptableRule(version, tableName, chainName, match, target } cmd := c.GetAppendIptableRuleCmd(version, tableName, chainName, match, target) - return c.RunCmd(version, cmd.Params) + err := c.RunCmd(version, cmd.Params) + if err != nil { + return err + } + if !c.RuleExists(version, tableName, chainName, match, target) { + return errCouldNotValidateRuleExists + } + return nil } // Delete matched iptable rule diff --git a/iptables/iptables_test.go b/iptables/iptables_test.go new file mode 100644 index 0000000000..5352214f0e --- /dev/null +++ b/iptables/iptables_test.go @@ -0,0 +1,260 @@ +package iptables + +import ( + "errors" + "testing" + + "github.com/Azure/azure-container-networking/platform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type validationCase struct { + cmd string + doErr bool +} + +var ( + errMockPlatform = errors.New("mock pl error") + errExtraneousCalls = errors.New("function called too many times") +) + +// GenerateValidateFunc takes in a slice of expected calls and intended responses for each time the returned function is called +// For example, if expectedCmds has one validationCase, the first call of the func returned will check the command +// passed in matches the first validationCase's command (fails test if not), and return an error if the first validationCase has doErr as true +// The second call will use the second validation case in the slice to check against the cmd passed in and so on +// If we call this function more times than the number of elements in expectedCmds, errExtraneousCalls is returned +func GenerateValidationFunc(t *testing.T, expectedCmds []validationCase) func(cmd string) (string, error) { + curr := 0 + + ret := func(cmd string) (string, error) { + if curr >= len(expectedCmds) { + return "", errExtraneousCalls + } + expected := expectedCmds[curr] + curr++ + + require.Equal(t, expected.cmd, cmd, "command run does not match expected") + + if expected.doErr { + return "", errMockPlatform + } + return "", nil + } + + return ret +} + +func TestGenerateValidationFunc(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + fn := GenerateValidationFunc(t, []validationCase{ + { + cmd: "echo hello", + doErr: true, + }, + }) + mockPL.SetExecRawCommand(fn) + + _, err := mockPL.ExecuteRawCommand("echo hello") + require.Error(t, err) + + _, err = mockPL.ExecuteRawCommand("echo hello") + require.ErrorIs(t, err, errExtraneousCalls) +} + +func TestRunCmd(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + { + cmd: "iptables -w 60 -L", + doErr: false, + }, + }), + ) + + err := client.RunCmd(V4, "-L") + require.NoError(t, err) +} + +func TestCreateChain(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + { + cmd: "iptables -w 60 -t filter -nL AZURECNIINPUT", + doErr: true, + }, + { + cmd: "iptables -w 60 -t filter -N AZURECNIINPUT", + doErr: false, + }, + }), + ) + + err := client.CreateChain(V4, Filter, CNIInputChain) + require.NoError(t, err) +} + +func TestInsertIptableRule(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + // iptables succeeds + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 70 -j ACCEPT", + doErr: true, + }, + { + cmd: "iptables -w 60 -t filter -I AZURECNIINPUT 1 -p tcp --dport 70 -j ACCEPT", + doErr: false, + }, + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 70 -j ACCEPT", + doErr: false, + }, + // iptables fails silently + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: true, + }, + { + cmd: "iptables -w 60 -t filter -I AZURECNIINPUT 1 -p tcp --dport 80 -j ACCEPT", + doErr: false, + }, + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: true, + }, + // iptables finds rule already + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 90 -j ACCEPT", + doErr: false, + }, + }), + ) + // iptables succeeds + err := client.InsertIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 70", Accept) + require.NoError(t, err) + // iptables fails silently + err = client.InsertIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 80", Accept) + require.ErrorIs(t, err, errCouldNotValidateRuleExists) + // iptables finds rule already + err = client.InsertIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 90", Accept) + require.NoError(t, err) +} + +func TestAppendIptableRule(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + // iptables succeeds + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 70 -j ACCEPT", + doErr: true, + }, + { + cmd: "iptables -w 60 -t filter -A AZURECNIINPUT -p tcp --dport 70 -j ACCEPT", + doErr: false, + }, + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 70 -j ACCEPT", + doErr: false, + }, + // iptables fails silently + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: true, + }, + { + cmd: "iptables -w 60 -t filter -A AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: false, + }, + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: true, + }, + // iptables finds rule already + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 90 -j ACCEPT", + doErr: false, + }, + }), + ) + // iptables succeeds + err := client.AppendIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 70", Accept) + require.NoError(t, err) + // iptables fails silently + err = client.AppendIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 80", Accept) + require.ErrorIs(t, errCouldNotValidateRuleExists, err) + // iptables finds rule already + err = client.AppendIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 90", Accept) + require.NoError(t, err) +} + +func TestDeleteIptableRule(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + { + cmd: "iptables -w 60 -t filter -D AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: false, + }, + }), + ) + + err := client.DeleteIptableRule(V4, Filter, CNIInputChain, "-p tcp --dport 80", Accept) + require.NoError(t, err) +} + +func TestChainExists(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + { + cmd: "iptables -w 60 -t filter -nL AZURECNIINPUT", + doErr: true, + }, + }), + ) + + result := client.ChainExists(V4, Filter, CNIInputChain) + assert.False(t, result) +} + +func TestRuleExists(t *testing.T) { + mockPL := platform.NewMockExecClient(false) + client := &Client{ + pl: mockPL, + } + mockPL.SetExecRawCommand( + GenerateValidationFunc(t, []validationCase{ + { + cmd: "iptables -w 60 -t filter -C AZURECNIINPUT -p tcp --dport 80 -j ACCEPT", + doErr: true, + }, + }), + ) + + result := client.RuleExists(V4, Filter, CNIInputChain, "-p tcp --dport 80", Accept) + assert.False(t, result) +} diff --git a/netlink/mocknetlink.go b/netlink/mocknetlink.go index 9dfc5da1a1..3620bead45 100644 --- a/netlink/mocknetlink.go +++ b/netlink/mocknetlink.go @@ -22,6 +22,7 @@ type MockNetlink struct { errorString string deleteRouteFn routeValidateFn addRouteFn routeValidateFn + DeleteLinkFn func(name string) error } func NewMockNetlink(returnError bool, errorString string) *MockNetlink { @@ -55,6 +56,9 @@ func (f *MockNetlink) SetLinkMTU(name string, mtu int) error { } func (f *MockNetlink) DeleteLink(name string) error { + if f.DeleteLinkFn != nil { + return f.DeleteLinkFn(name) + } return f.error() } diff --git a/netlink/netlink_test.go b/netlink/netlink_test.go index d5586f093a..28f9997b4c 100644 --- a/netlink/netlink_test.go +++ b/netlink/netlink_test.go @@ -9,6 +9,7 @@ package netlink import ( "net" "testing" + "time" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" @@ -285,6 +286,10 @@ func TestAddRemoveStaticArp(t *testing.T) { mac, _ := net.ParseMAC("aa:b3:4d:5e:e2:4a") nl := NewNetlink() + // wait for interface to fully come up + // if it isn't fully up it might wipe the arp entry we're about to add + time.Sleep(time.Millisecond * 100) + linkInfo := LinkInfo{ Name: ifName, IPAddr: ip, @@ -302,6 +307,9 @@ func TestAddRemoveStaticArp(t *testing.T) { MacAddress: mac, } + // ensure arp address remains for a period of time + time.Sleep(time.Millisecond * 100) + err = nl.SetOrRemoveLinkAddress(linkInfo, REMOVE, NUD_INCOMPLETE) if err != nil { t.Errorf("ret val %v", err) diff --git a/network/dhcp.go b/network/dhcp.go new file mode 100644 index 0000000000..82be4bd977 --- /dev/null +++ b/network/dhcp.go @@ -0,0 +1,16 @@ +package network + +import ( + "context" + "net" +) + +type dhcpClient interface { + DiscoverRequest(context.Context, net.HardwareAddr, string) error +} + +type mockDHCP struct{} + +func (netns *mockDHCP) DiscoverRequest(context.Context, net.HardwareAddr, string) error { + return nil +} diff --git a/network/endpoint.go b/network/endpoint.go index f9f59e72f7..bd9fa7fc9b 100644 --- a/network/endpoint.go +++ b/network/endpoint.go @@ -66,7 +66,7 @@ type EndpointInfo struct { EndpointID string ContainerID string NetNsPath string - IfName string // value differs during creation vs. deletion flow + IfName string // value differs during creation vs. deletion flow; used in statefile, not necessarily the nic name SandboxKey string IfIndex int MacAddress net.HardwareAddr @@ -93,7 +93,7 @@ type EndpointInfo struct { IPV6Mode string VnetCidrs string ServiceCidrs string - NATInfo []policy.NATInfo + NATInfo []policy.NATInfo // windows only NICType cns.NICType SkipDefaultRoutes bool HNSEndpointID string @@ -138,6 +138,7 @@ type InterfaceInfo struct { HostSubnetPrefix net.IPNet // Move this field from ipamAddResult NCResponse *cns.GetNetworkContainerResponse PnPID string + EndpointPolicies []policy.Policy } type IPConfig struct { @@ -150,15 +151,35 @@ type apipaClient interface { CreateHostNCApipaEndpoint(ctx context.Context, networkContainerID string) (string, error) } +// FormatSliceOfPointersToString takes in a slice of pointers, and for each pointer, dereferences the pointer if not nil +// and then formats it to its string representation, returning a string where each line is a separate item in the slice. +// This is used for convenience to get a string representation of the actual structs and their fields +// in slices of pointers since the default string representation of a slice of pointers is a list of memory addresses. +func FormatSliceOfPointersToString[T any](slice []*T) string { + var builder strings.Builder + for _, ptr := range slice { + if ptr != nil { + fmt.Fprintf(&builder, "%+v \n", *ptr) + } + } + return builder.String() +} + func (epInfo *EndpointInfo) PrettyString() string { - return fmt.Sprintf("Id:%s ContainerID:%s NetNsPath:%s IfName:%s IfIndex:%d MacAddr:%s IPAddrs:%v Gateways:%v Data:%+v NICType: %s NetworkContainerID: %s HostIfName: %s NetNs: %s", + return fmt.Sprintf("EndpointID:%s ContainerID:%s NetNsPath:%s IfName:%s IfIndex:%d MacAddr:%s IPAddrs:%v Gateways:%v Data:%+v NICType: %s "+ + "NetworkContainerID: %s HostIfName: %s NetNs: %s Options: %v MasterIfName: %s HNSEndpointID: %s HNSNetworkID: %s", epInfo.EndpointID, epInfo.ContainerID, epInfo.NetNsPath, epInfo.IfName, epInfo.IfIndex, epInfo.MacAddress.String(), epInfo.IPAddresses, - epInfo.Gateways, epInfo.Data, epInfo.NICType, epInfo.NetworkContainerID, epInfo.HostIfName, epInfo.NetNs) + epInfo.Gateways, epInfo.Data, epInfo.NICType, epInfo.NetworkContainerID, epInfo.HostIfName, epInfo.NetNs, epInfo.Options, epInfo.MasterIfName, + epInfo.HNSEndpointID, epInfo.HNSNetworkID) } func (ifInfo *InterfaceInfo) PrettyString() string { - return fmt.Sprintf("Name:%s NICType:%v MacAddr:%s IPConfigs:%+v Routes:%+v DNSInfo:%+v", - ifInfo.Name, ifInfo.NICType, ifInfo.MacAddress.String(), ifInfo.IPConfigs, ifInfo.Routes, ifInfo.DNS) + var ncresponse string + if ifInfo.NCResponse != nil { + ncresponse = fmt.Sprintf("%+v", *ifInfo.NCResponse) + } + return fmt.Sprintf("Name:%s NICType:%v MacAddr:%s IPConfigs:%s Routes:%+v DNSInfo:%+v NCResponse: %s", + ifInfo.Name, ifInfo.NICType, ifInfo.MacAddress.String(), FormatSliceOfPointersToString(ifInfo.IPConfigs), ifInfo.Routes, ifInfo.DNS, ncresponse) } // NewEndpoint creates a new endpoint in the network. @@ -169,6 +190,7 @@ func (nw *network) newEndpoint( netioCli netio.NetIOInterface, nsc NamespaceClientInterface, iptc ipTablesClient, + dhcpc dhcpClient, epInfo *EndpointInfo, ) (*endpoint, error) { var ep *endpoint @@ -182,7 +204,7 @@ func (nw *network) newEndpoint( // Call the platform implementation. // Pass nil for epClient and will be initialized in newendpointImpl - ep, err = nw.newEndpointImpl(apipaCli, nl, plc, netioCli, nil, nsc, iptc, epInfo) + ep, err = nw.newEndpointImpl(apipaCli, nl, plc, netioCli, nil, nsc, iptc, dhcpc, epInfo) if err != nil { return nil, err } @@ -195,7 +217,7 @@ func (nw *network) newEndpoint( // DeleteEndpoint deletes an existing endpoint from the network. func (nw *network) deleteEndpoint(nl netlink.NetlinkInterface, plc platform.ExecClient, nioc netio.NetIOInterface, nsc NamespaceClientInterface, - iptc ipTablesClient, endpointID string, + iptc ipTablesClient, dhcpc dhcpClient, endpointID string, ) error { var err error @@ -215,7 +237,7 @@ func (nw *network) deleteEndpoint(nl netlink.NetlinkInterface, plc platform.Exec // Call the platform implementation. // Pass nil for epClient and will be initialized in deleteEndpointImpl - err = nw.deleteEndpointImpl(nl, plc, nil, nioc, nsc, iptc, ep) + err = nw.deleteEndpointImpl(nl, plc, nil, nioc, nsc, iptc, dhcpc, ep) if err != nil { return err } diff --git a/network/endpoint_linux.go b/network/endpoint_linux.go index faca6c4c97..5f57a66d51 100644 --- a/network/endpoint_linux.go +++ b/network/endpoint_linux.go @@ -57,6 +57,7 @@ func (nw *network) newEndpointImpl( testEpClient EndpointClient, nsc NamespaceClientInterface, iptc ipTablesClient, + dhcpclient dhcpClient, epInfo *EndpointInfo, ) (*endpoint, error) { var ( @@ -167,7 +168,7 @@ func (nw *network) newEndpointImpl( epClient = NewLinuxBridgeEndpointClient(nw.extIf, hostIfName, contIfName, nw.Mode, nl, plc) } else if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { logger.Info("Secondary client") - epClient = NewSecondaryEndpointClient(nl, netioCli, plc, nsc, ep) + epClient = NewSecondaryEndpointClient(nl, netioCli, plc, nsc, dhcpclient, ep) } else { logger.Info("Transparent client") epClient = NewTransparentEndpointClient(nw.extIf, hostIfName, contIfName, nw.Mode, nl, netioCli, plc) @@ -265,7 +266,7 @@ func (nw *network) newEndpointImpl( // deleteEndpointImpl deletes an existing endpoint from the network. func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform.ExecClient, epClient EndpointClient, nioc netio.NetIOInterface, nsc NamespaceClientInterface, - iptc ipTablesClient, ep *endpoint, + iptc ipTablesClient, dhcpc dhcpClient, ep *endpoint, ) error { // Delete the veth pair by deleting one of the peer interfaces. // Deleting the host interface is more convenient since it does not require @@ -278,7 +279,6 @@ func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform. epInfo := ep.getInfo() if nw.Mode == opModeTransparentVlan { epClient = NewTransparentVlanEndpointClient(nw, epInfo, ep.HostIfName, "", ep.VlanID, ep.LocalIP, nl, plc, nsc, iptc) - } else { epClient = NewOVSEndpointClient(nw, epInfo, ep.HostIfName, "", ep.VlanID, ep.LocalIP, nl, ovsctl.NewOvsctl(), plc, iptc) } @@ -287,7 +287,7 @@ func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform. } else { // delete if secondary interfaces populated or endpoint of type delegated (new way) if len(ep.SecondaryInterfaces) > 0 || ep.NICType == cns.NodeNetworkInterfaceFrontendNIC { - epClient = NewSecondaryEndpointClient(nl, nioc, plc, nsc, ep) + epClient = NewSecondaryEndpointClient(nl, nioc, plc, nsc, dhcpc, ep) epClient.DeleteEndpointRules(ep) //nolint:errcheck // ignore error epClient.DeleteEndpoints(ep) diff --git a/network/endpoint_test.go b/network/endpoint_test.go index 3835af0f26..bc31c3b3ac 100644 --- a/network/endpoint_test.go +++ b/network/endpoint_test.go @@ -22,6 +22,32 @@ func TestEndpoint(t *testing.T) { RunSpecs(t, "Endpoint Suite") } +var _ = Describe("Test FormatStructPointers", func() { + ptrSlice := []*IPConfig{ + { + Gateway: net.ParseIP("10.10.0.1"), + }, + { + Gateway: net.ParseIP("10.10.0.2"), + }, + } + Describe("Test FormatStructPointers", func() { + Context("When passing in a slice of pointers", func() { + It("Should create a pretty printed string of the contents", func() { + result := FormatSliceOfPointersToString(ptrSlice) + Expect(result).To(Equal("{Address:{IP: Mask:} Gateway:10.10.0.1} \n{Address:{IP: Mask:} Gateway:10.10.0.2} \n")) + }) + }) + Context("When passing in nil", func() { + It("Should not error", func() { + var empty []*IPConfig + result := FormatSliceOfPointersToString(empty) + Expect(result).To(Equal("")) + }) + }) + }) +}) + var _ = Describe("Test Endpoint", func() { Describe("Test getEndpoint", func() { Context("When endpoint not exists", func() { @@ -186,7 +212,7 @@ var _ = Describe("Test Endpoint", func() { It("Should be added", func() { // Add endpoint with valid id ep, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).NotTo(HaveOccurred()) Expect(ep).NotTo(BeNil()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) @@ -198,7 +224,7 @@ var _ = Describe("Test Endpoint", func() { extIf: &externalInterface{IPv4Gateway: net.ParseIP("192.168.0.1")}, } ep, err := nw2.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).NotTo(HaveOccurred()) Expect(ep).NotTo(BeNil()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) @@ -216,7 +242,7 @@ var _ = Describe("Test Endpoint", func() { Expect(err).ToNot(HaveOccurred()) // Adding endpoint with same id should fail and delete should cleanup the state ep2, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), mockCli, NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), mockCli, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).To(HaveOccurred()) Expect(ep2).To(BeNil()) assert.Contains(GinkgoT(), err.Error(), "Endpoint already exists") @@ -226,17 +252,17 @@ var _ = Describe("Test Endpoint", func() { // Adding an endpoint with an id. mockCli := NewMockEndpointClient(nil) ep2, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), mockCli, NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), mockCli, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).ToNot(HaveOccurred()) Expect(ep2).ToNot(BeNil()) Expect(len(mockCli.endpoints)).To(Equal(1)) // Deleting the endpoint //nolint:errcheck // ignore error - nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), ep2) + nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2) Expect(len(mockCli.endpoints)).To(Equal(0)) // Deleting same endpoint with same id should not fail //nolint:errcheck // ignore error - nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), ep2) + nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2) Expect(len(mockCli.endpoints)).To(Equal(0)) }) }) @@ -256,7 +282,7 @@ var _ = Describe("Test Endpoint", func() { extIf: &externalInterface{IPv4Gateway: net.ParseIP("192.168.0.1")}, } ep, err := nw2.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).NotTo(HaveOccurred()) Expect(ep).NotTo(BeNil()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) @@ -267,31 +293,6 @@ var _ = Describe("Test Endpoint", func() { Expect(ep.IfName).To(Equal("masterIfName")) }) }) - Context("When endpoint added accelnet", func() { - epInfo := &EndpointInfo{ - EndpointID: "768e8deb-eth1", - Data: make(map[string]interface{}), - IfName: eth0IfName, - MasterIfName: "accelnetNIC", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - } - - It("should have fields set", func() { - nw2 := &network{ - Endpoints: map[string]*endpoint{}, - extIf: &externalInterface{IPv4Gateway: net.ParseIP("192.168.0.1")}, - } - ep, err := nw2.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) - Expect(err).NotTo(HaveOccurred()) - Expect(ep).NotTo(BeNil()) - Expect(ep.Id).To(Equal(epInfo.EndpointID)) - Expect(ep.Gateways).NotTo(BeNil()) - Expect(len(ep.Gateways)).To(Equal(1)) - Expect(ep.Gateways[0].String()).To(Equal("192.168.0.1")) - Expect(ep.IfName).To(Equal("accelnetNIC")) - }) - }) Context("When endpoint add failed", func() { It("Should not be added to the network", func() { nw := &network{ @@ -309,11 +310,11 @@ var _ = Describe("Test Endpoint", func() { } return nil - }), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + }), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).To(HaveOccurred()) Expect(ep).To(BeNil()) ep, err = nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).NotTo(HaveOccurred()) Expect(ep).NotTo(BeNil()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) @@ -342,14 +343,14 @@ var _ = Describe("Test Endpoint", func() { It("Should not add endpoint to the network when there is an error", func() { secondaryEpInfo.MacAddress = netio.BadHwAddr // mock netlink will fail to set link state on bad eth ep, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), secondaryEpInfo) + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, secondaryEpInfo) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("SecondaryEndpointClient Error: " + netlink.ErrorMockNetlink.Error())) Expect(ep).To(BeNil()) // should not panic or error when going through the unified endpoint impl flow with only the delegated nic type fields secondaryEpInfo.MacAddress = netio.HwAddr ep, err = nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), secondaryEpInfo) + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, secondaryEpInfo) Expect(err).ToNot(HaveOccurred()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) }) @@ -357,12 +358,12 @@ var _ = Describe("Test Endpoint", func() { It("Should add endpoint when there are no errors", func() { secondaryEpInfo.MacAddress = netio.HwAddr ep, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), secondaryEpInfo) + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, secondaryEpInfo) Expect(err).ToNot(HaveOccurred()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) ep, err = nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) Expect(err).ToNot(HaveOccurred()) Expect(ep.Id).To(Equal(epInfo.EndpointID)) }) diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index c912552846..edd52327f2 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -150,6 +150,7 @@ func (nw *network) newEndpointImpl( _ EndpointClient, _ NamespaceClientInterface, _ ipTablesClient, + _ dhcpClient, epInfo *EndpointInfo, ) (*endpoint, error) { if epInfo.NICType == cns.BackendNIC { @@ -308,7 +309,7 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE // macAddress type for InfraNIC is like "60:45:bd:12:45:65" // if NICType is delegatedVMNIC or AccelnetNIC, convert the macaddress format macAddress := epInfo.MacAddress.String() - if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC || epInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { + if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { // convert the format of macAddress that HNS can accept, i.e, "60-45-bd-12-45-65" if NIC type is delegated NIC macAddress = strings.Join(strings.Split(macAddress, ":"), "-") } @@ -321,8 +322,8 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE return nil, err } - // add hcnEndpoint policy for accelnet - if epInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { + // add hcnEndpoint policy for accelnet for frontendNIC + if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { endpointPolicy, err := policy.AddAccelnetPolicySetting() if err != nil { logger.Error("Failed to set iov endpoint policy", zap.Error(err)) @@ -521,7 +522,7 @@ func (nw *network) newEndpointImplHnsV2(cli apipaClient, epInfo *EndpointInfo) ( // deleteEndpointImpl deletes an existing endpoint from the network. func (nw *network) deleteEndpointImpl(_ netlink.NetlinkInterface, _ platform.ExecClient, _ EndpointClient, _ netio.NetIOInterface, _ NamespaceClientInterface, - _ ipTablesClient, ep *endpoint, + _ ipTablesClient, _ dhcpClient, ep *endpoint, ) error { // endpoint deletion is not required for IB if ep.NICType == cns.BackendNIC { @@ -709,7 +710,7 @@ func getLocationPath(instanceID string, plc platform.ExecClient) (string, error) return locationPath, nil } -// Get PnP device state +// Get PnP device state; PnP device objects represent the mounted/dismounted IB VFs // return devpkeyDeviceIsPresent and devpkeyDeviceProblemCode func getPnpDeviceState(instanceID string, plc platform.ExecClient) (string, string, error) { //nolint // get if device is present diff --git a/network/endpoint_windows_test.go b/network/endpoint_windows_test.go index 4b6588cd36..1dfb414bbc 100644 --- a/network/endpoint_windows_test.go +++ b/network/endpoint_windows_test.go @@ -107,7 +107,8 @@ func TestDeleteEndpointImplHnsV2ForIB(t *testing.T) { } mockCli := NewMockEndpointClient(nil) - err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &ep) + err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep) if err != nil { t.Fatal("endpoint deletion for IB is executed") } @@ -134,7 +135,8 @@ func TestDeleteEndpointImplHnsV2WithEmptyHNSID(t *testing.T) { // should return nil because HnsID is empty mockCli := NewMockEndpointClient(nil) - err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &ep) + err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep) if err != nil { t.Fatal("endpoint deletion gets executed") } @@ -492,7 +494,7 @@ func TestNewEndpointImplHnsv2ForIBHappyPath(t *testing.T) { // Happy Path endpoint, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) if endpoint != nil || err != nil { t.Fatalf("Endpoint is created for IB due to %v", err) @@ -522,7 +524,7 @@ func TestNewEndpointImplHnsv2ForIBUnHappyPath(t *testing.T) { // Set UnHappy Path _, err := nw.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(true), - netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), epInfo) + netio.NewMockNetIO(false, 0), NewMockEndpointClient(nil), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo) if err == nil { t.Fatal("Failed to test Endpoint creation for IB with unhappy path") @@ -562,205 +564,13 @@ func TestCreateAndDeleteEndpointImplHnsv2ForDelegatedHappyPath(t *testing.T) { } mockCli := NewMockEndpointClient(nil) - err = nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), ep) + err = nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep) if err != nil { t.Fatalf("Failed to delete endpoint for Delegated NIC due to %v", err) } } -// Test: if hnsID is empty, the endpoint and network cannot be deleted -func TestCreateAndDeleteEndpointStateForAccelnetNICWithEmptyHNSId(t *testing.T) { - nm := &networkManager{ - ExternalInterfaces: map[string]*externalInterface{}, - } - - // this hnsv2 variable overwrites the package level variable in network - // we do this to avoid passing around os specific objects in platform agnostic code - hnsFake := hnswrapper.NewHnsv2wrapperFake() - - Hnsv2 = hnswrapper.Hnsv2wrapperwithtimeout{ - Hnsv2: hnsFake, - HnsCallTimeout: 5 * time.Second, - } - - // create network for AccelnetNIC - accelnetNetwork := &hcn.HostComputeNetwork{ - Id: networkID, - Name: networkName, - } - _, err := Hnsv2.CreateNetwork(accelnetNetwork) - if err != nil { - t.Fatalf("Failed to create network for accelnetNIC due to %v", err) - } - - // make sure two networks are created: - networks := hnsFake.Cache.GetNetworks() - if len(networks) != 1 { - t.Fatal("Failed to create one network for accelnetNIC") - } - - // create endpoint for accelnetNIC - accelnetEndpointID := "accelnetEndpoint" - acclnetEndpoint := &hcn.HostComputeEndpoint{ - Id: accelnetEndpointID, - Name: accelnetEndpointID, - HostComputeNetwork: networkID, - MacAddress: macAddress, - } - - _, err = Hnsv2.CreateEndpoint(acclnetEndpoint) - if err != nil { - t.Fatalf("Failed to create endpoint for accelnetNIC due to %v", err) - } - - // make sure two endpoints are created: - endpoints := hnsFake.Cache.GetEndpoints() - if len(endpoints) != 1 { - t.Fatal("Failed to create an endpoint for accelnetNIC") - } - - accelnetEpInfo := &EndpointInfo{ - EndpointID: accelnetEndpointID, - Data: make(map[string]interface{}), - IfName: "eth1", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: net.HardwareAddr(macAddress), - HNSNetworkID: networkID, - } - - // mock DeleteEndpointState() to make sure endpoint and network is deleted from cache - // network and endpoint should be deleted from cache for accelnetnic - err = nm.DeleteEndpointState(networkID, accelnetEpInfo) - if err == nil { - t.Fatal("Successfully delete network when hns ID is empty") - } -} - -// mock to invoke endpointState deletion with InfraNIC + AccelnetNIC -func TestDeleteEndpointStateForInfraAccelnetNIC(t *testing.T) { - nm := &networkManager{ - ExternalInterfaces: map[string]*externalInterface{}, - } - - // this hnsv2 variable overwrites the package level variable in network - // we do this to avoid passing around os specific objects in platform agnostic code - hnsFake := hnswrapper.NewHnsv2wrapperFake() - - Hnsv2 = hnswrapper.Hnsv2wrapperwithtimeout{ - Hnsv2: hnsFake, - HnsCallTimeout: 5 * time.Second, - } - - // create network for InfraNIC - infraNetworkID := "azure" - infraNetwork := &hcn.HostComputeNetwork{ - Name: infraNetworkID, - } - - _, err := Hnsv2.CreateNetwork(infraNetwork) - if err != nil { - t.Fatalf("Failed to create network for infraNIC due to %v", err) - } - - // create network for AccelnetNIC - accelnetNetwork := &hcn.HostComputeNetwork{ - Id: networkID, - Name: networkName, - Type: l1vhNetworkType, - } - _, err = Hnsv2.CreateNetwork(accelnetNetwork) - if err != nil { - t.Fatalf("Failed to create network for AccelnetNIC due to %v", err) - } - - // make sure two networks are created: - networks := hnsFake.Cache.GetNetworks() - if len(networks) != 2 { - t.Fatal("Failed to create two networks for infraNIC and AccelnetNIC") - } - - // create endpoint for InfraNIC - infraEndpointID := "infraEndpoint" - infraEndpoint := &hcn.HostComputeEndpoint{ - Id: infraEndpointID, - Name: infraEndpointID, - HostComputeNetwork: infraNetworkID, - } - - _, err = Hnsv2.CreateEndpoint(infraEndpoint) - if err != nil { - t.Fatalf("Failed to create endpoint for infraNIC due to %v", err) - } - - // create endpoint for accelnet NIC - accelnetEndpointID := "accelnetEndpoint" - accelnetEndpoint := &hcn.HostComputeEndpoint{ - Id: accelnetEndpointID, - Name: accelnetEndpointID, - HostComputeNetwork: accelnetEndpointID, - MacAddress: macAddress, - } - - _, err = Hnsv2.CreateEndpoint(accelnetEndpoint) - if err != nil { - t.Fatalf("Failed to create endpoint for AccelnetNIC due to %v", err) - } - - // make sure two endpoints are created: - endpoints := hnsFake.Cache.GetEndpoints() - if len(endpoints) != 2 { - t.Fatal("Failed to create two endpoints for infraNIC and AccelnetNIC") - } - - infraEpInfo := &EndpointInfo{ - EndpointID: infraEndpointID, - Data: make(map[string]interface{}), - IfName: "eth0", - NICType: cns.InfraNIC, - HNSEndpointID: infraEndpointID, - HNSNetworkID: infraNetworkID, - } - - accelnetEpInfo := &EndpointInfo{ - EndpointID: accelnetEndpointID, - Data: make(map[string]interface{}), - IfName: "eth1", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: net.HardwareAddr(macAddress), - HNSEndpointID: accelnetEndpointID, - HNSNetworkID: networkID, - } - - // mock DeleteEndpointState() to make sure endpoint and network is deleted from cache - // network and endpoint should be deleted from cache for accelnetNIC - err = nm.DeleteEndpointState(networkID, accelnetEpInfo) - if err != nil { - t.Fatalf("Failed to delete endpoint for accelnetNIC state due to %v", err) - } - - // endpoint should be deleted from cache for accelnetNIC and network is still there - err = nm.DeleteEndpointState(infraNetworkID, infraEpInfo) - if err != nil { - t.Fatalf("Failed to delete endpoint for infraNIC state due to %v", err) - } - - // check cache if endpoints are deleted - endpoints = hnsFake.Cache.GetEndpoints() - if len(endpoints) != 0 { - t.Fatalf("Not all endpoints are deleted, the remaining endpoints are %v", endpoints) - } - - // check cache if accelnet network is deleted and infra network is still there - networks = hnsFake.Cache.GetNetworks() - if len(networks) != 1 { - t.Fatalf("Failed to delete networks") - } - - if _, ok := networks[infraNetworkID]; !ok { - t.Fatal("Network for InfraNIC does not exist") - } -} - // mock to nivoke endpointState deletion with InfraNIC + DelegatedNIC func TestDeleteEndpointStateForInfraDelegatedNIC(t *testing.T) { nm := &networkManager{ diff --git a/network/errors.go b/network/errors.go index 95ab57d2ca..c4c808357f 100644 --- a/network/errors.go +++ b/network/errors.go @@ -3,6 +3,9 @@ package network import "errors" var ( - errSubnetV6NotFound = errors.New("Couldn't find ipv6 subnet in network info") - errV6SnatRuleNotSet = errors.New("ipv6 snat rule not set. Might be VM ipv6 address missing") + errSubnetV6NotFound = errors.New("Couldn't find ipv6 subnet in network info") // nolint + errV6SnatRuleNotSet = errors.New("ipv6 snat rule not set. Might be VM ipv6 address missing") // nolint + ErrEndpointStateNotFound = errors.New("endpoint state could not be found in the statefile") + ErrConnectionFailure = errors.New("couldn't connect to CNS") + ErrGetEndpointStateFailure = errors.New("failure to obtain the endpoint state") ) diff --git a/network/manager.go b/network/manager.go index 607ec6cc1e..7bc1441fea 100644 --- a/network/manager.go +++ b/network/manager.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-container-networking/cns" cnsclient "github.com/Azure/azure-container-networking/cns/client" "github.com/Azure/azure-container-networking/cns/restserver" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/netio" @@ -86,6 +87,7 @@ type networkManager struct { plClient platform.ExecClient nsClient NamespaceClientInterface iptablesClient ipTablesClient + dhcpClient dhcpClient sync.Mutex } @@ -123,7 +125,7 @@ type NetworkManager interface { // Creates a new network manager. func NewNetworkManager(nl netlink.NetlinkInterface, plc platform.ExecClient, netioCli netio.NetIOInterface, nsc NamespaceClientInterface, - iptc ipTablesClient, + iptc ipTablesClient, dhcpc dhcpClient, ) (NetworkManager, error) { nm := &networkManager{ ExternalInterfaces: make(map[string]*externalInterface), @@ -132,6 +134,7 @@ func NewNetworkManager(nl netlink.NetlinkInterface, plc platform.ExecClient, net netio: netioCli, nsClient: nsc, iptablesClient: iptc, + dhcpClient: dhcpc, } return nm, nil @@ -386,7 +389,7 @@ func (nm *networkManager) createEndpoint(cli apipaClient, networkID string, epIn } } - ep, err := nw.newEndpoint(cli, nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, epInfo) + ep, err := nw.newEndpoint(cli, nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, epInfo) if err != nil { return nil, err } @@ -395,7 +398,7 @@ func (nm *networkManager) createEndpoint(cli apipaClient, networkID string, epIn if err != nil { logger.Error("Create endpoint failure", zap.Error(err)) logger.Info("Cleanup resources") - delErr := nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, ep.Id) + delErr := nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, ep.Id) if delErr != nil { logger.Error("Deleting endpoint after create endpoint failure failed with", zap.Error(delErr)) } @@ -419,13 +422,13 @@ func (nm *networkManager) UpdateEndpointState(eps []*endpoint) error { } ifnameToIPInfoMap := generateCNSIPInfoMap(eps) // key : interface name, value : IPInfo - for _, ipinfo := range ifnameToIPInfoMap { - logger.Info("Update endpoint state", zap.String("hnsEndpointID", ipinfo.HnsEndpointID), zap.String("hnsNetworkID", ipinfo.HnsNetworkID), + for key, ipinfo := range ifnameToIPInfoMap { + logger.Info("Update endpoint state", zap.String("ifname", key), zap.String("hnsEndpointID", ipinfo.HnsEndpointID), zap.String("hnsNetworkID", ipinfo.HnsNetworkID), zap.String("hostVethName", ipinfo.HostVethName), zap.String("macAddress", ipinfo.MacAddress), zap.String("nicType", string(ipinfo.NICType))) } - // we assume all endpoints have the same container id cnsEndpointID := eps[0].ContainerID + if err := validateUpdateEndpointState(cnsEndpointID, ifnameToIPInfoMap); err != nil { return errors.Wrap(err, "failed to validate update endpoint state that will be sent to cns") } @@ -436,6 +439,7 @@ func (nm *networkManager) UpdateEndpointState(eps []*endpoint) error { logger.Info("Update endpoint API returend ", zap.String("podname: ", response.ReturnCode.String())) return nil } + func validateUpdateEndpointState(endpointID string, ifNameToIPInfoMap map[string]*restserver.IPInfo) error { if endpointID == "" { return errors.New("endpoint id empty while validating update endpoint state") @@ -454,7 +458,13 @@ func validateUpdateEndpointState(endpointID string, ifNameToIPInfoMap map[string func (nm *networkManager) GetEndpointState(networkID, containerID string) ([]*EndpointInfo, error) { endpointResponse, err := nm.CnsClient.GetEndpoint(context.TODO(), containerID) if err != nil { - return nil, errors.Wrapf(err, "Get endpoint API returned with error") + if endpointResponse.Response.ReturnCode == types.NotFound { + return nil, ErrEndpointStateNotFound + } + if endpointResponse.Response.ReturnCode == types.ConnectionError { + return nil, ErrConnectionFailure + } + return nil, ErrGetEndpointStateFailure } epInfos := cnsEndpointInfotoCNIEpInfos(endpointResponse.EndpointInfo, containerID) @@ -489,7 +499,7 @@ func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *E return err } - err = nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, endpointID) + err = nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, endpointID) if err != nil { return err } @@ -531,7 +541,7 @@ func (nm *networkManager) DeleteEndpointState(networkID string, epInfo *Endpoint } logger.Info("Deleting endpoint with", zap.String("Endpoint Info: ", epInfo.PrettyString()), zap.String("HNISID : ", ep.HnsId)) - err := nw.deleteEndpointImpl(netlink.NewNetlink(), platform.NewExecClient(logger), nil, nil, nil, nil, ep) + err := nw.deleteEndpointImpl(netlink.NewNetlink(), platform.NewExecClient(logger), nil, nil, nil, nil, nil, ep) if err != nil { return err } diff --git a/network/manager_test.go b/network/manager_test.go index e18b1c1ba0..8c8545b97a 100644 --- a/network/manager_test.go +++ b/network/manager_test.go @@ -486,51 +486,6 @@ var _ = Describe("Test Manager", func() { }, )) }) - It("Should generate the cns endpoint info data from the endpoint structs for infraNIC+AccelnetNIC", func() { - mac1, _ := net.ParseMAC("12:34:56:78:9a:bc") - mac2, _ := net.ParseMAC("22:34:56:78:9a:bc") - endpoints := []*endpoint{ - { - IfName: "eth0", - NICType: cns.InfraNIC, - HnsId: "hnsEndpointID1", - HNSNetworkID: "hnsNetworkID1", - HostIfName: "hostIfName1", - MacAddress: mac1, - }, - { - IfName: "eth1", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - HnsId: "hnsEndpointID2", - HNSNetworkID: "hnsNetworkID2", - HostIfName: "hostIfName2", - MacAddress: mac2, - }, - } - cnsEpInfos := generateCNSIPInfoMap(endpoints) - Expect(len(cnsEpInfos)).To(Equal(2)) - - Expect(cnsEpInfos["eth0"]).To(Equal( - &restserver.IPInfo{ - NICType: cns.InfraNIC, - HnsEndpointID: "hnsEndpointID1", - HnsNetworkID: "hnsNetworkID1", - HostVethName: "hostIfName1", - MacAddress: "12:34:56:78:9a:bc", - }, - )) - - Expect(cnsEpInfos).To(HaveKey("eth1")) - Expect(cnsEpInfos["eth1"]).To(Equal( - &restserver.IPInfo{ - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - HnsEndpointID: "hnsEndpointID2", - HnsNetworkID: "hnsNetworkID2", - HostVethName: "hostIfName2", - MacAddress: "22:34:56:78:9a:bc", - }, - )) - }) }) }) }) diff --git a/network/network_windows.go b/network/network_windows.go index 41093eb0fe..a467b20983 100644 --- a/network/network_windows.go +++ b/network/network_windows.go @@ -24,7 +24,6 @@ import ( const ( // HNS network types. hnsL2bridge = "l2bridge" - hnsL2tunnel = "l2tunnel" CnetAddressSpace = "cnetAddressSpace" vEthernetAdapterPrefix = "vEthernet" baseDecimal = 10 @@ -113,6 +112,7 @@ func (nm *networkManager) newNetworkImplHnsV1(nwInfo *EndpointInfo, extIf *exter // Initialize HNS network. hnsNetwork := &hcsshim.HNSNetwork{ Name: nwInfo.NetworkID, + Type: hnsL2bridge, NetworkAdapterName: networkAdapterName, Policies: policy.SerializePolicies(policy.NetworkPolicy, nwInfo.NetworkPolicies, nil, false, false), } @@ -132,16 +132,6 @@ func (nm *networkManager) newNetworkImplHnsV1(nwInfo *EndpointInfo, extIf *exter vlanid = (int)(vlanPolicy.VLAN) } - // Set network mode. - switch nwInfo.Mode { - case opModeBridge: - hnsNetwork.Type = hnsL2bridge - case opModeTunnel: - hnsNetwork.Type = hnsL2tunnel - default: - return nil, errNetworkModeInvalid - } - // Populate subnets. for _, subnet := range nwInfo.Subnets { hnsSubnet := hcsshim.Subnet{ @@ -233,6 +223,7 @@ func (nm *networkManager) configureHcnNetwork(nwInfo *EndpointInfo, extIf *exter // Initialize HNS network. hcnNetwork := &hcn.HostComputeNetwork{ Name: nwInfo.NetworkID, + Type: hcn.L2Bridge, Ipams: []hcn.Ipam{ { Type: hcnIpamTypeStatic, @@ -287,27 +278,9 @@ func (nm *networkManager) configureHcnNetwork(nwInfo *EndpointInfo, extIf *exter vlanid = (int)(vlanID) } - // Set network mode. - switch nwInfo.Mode { - case opModeBridge: - hcnNetwork.Type = hcn.L2Bridge - case opModeTunnel: - hcnNetwork.Type = hcn.L2Tunnel - default: - return nil, errNetworkModeInvalid - } - - // DelegatedNIC flag: hcn.DisableHostPort(1024) - if nwInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { - hcnNetwork.Type = hcn.Transparent - // set transparent network as non-persistent so that networks will be gone after the node gets rebooted - // hcnNetwork.flags = hcn.DisableHostPort | hcn.EnableNonPersistent (1024 + 8 = 1032) - hcnNetwork.Flags = hcn.DisableHostPort | hcn.EnableNonPersistent - } - - // AccelnetNIC flag: hcn.EnableIov(9216) + // AccelnetNIC flag: hcn.EnableIov(9216) - treat Delegated/FrontendNIC also the same as Accelnet // For L1VH with accelnet, hcn.DisableHostPort and hcn.EnableIov must be configured - if nwInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { + if nwInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { hcnNetwork.Type = hcn.Transparent // set transparent network as non-persistent so that networks will be gone after the node gets rebooted // hcnNetwork.flags = hcn.DisableHostPort | hcn.EnableIov | hcn.EnableNonPersistent (1024 + 8192 + 8 = 9224) @@ -454,7 +427,7 @@ func (nm *networkManager) newNetworkImpl(nwInfo *EndpointInfo, extIf *externalIn // DeleteNetworkImpl deletes an existing container network. func (nm *networkManager) deleteNetworkImpl(nw *network, nicType cns.NICType) error { - if nicType != cns.NodeNetworkInterfaceFrontendNIC && nicType != cns.NodeNetworkInterfaceAccelnetFrontendNIC { //nolint + if nicType != cns.NodeNetworkInterfaceFrontendNIC { //nolint return nil } diff --git a/network/network_windows_test.go b/network/network_windows_test.go index 1aa0cbb670..eabe114648 100644 --- a/network/network_windows_test.go +++ b/network/network_windows_test.go @@ -60,7 +60,6 @@ func TestNewAndDeleteNetworkImplHnsV2(t *testing.T) { } err = nm.deleteNetworkImplHnsV2(network) - if err != nil { fmt.Printf("+%v", err) t.Fatal(err) @@ -95,7 +94,6 @@ func TestSuccesfulNetworkCreationWhenAlreadyExists(t *testing.T) { } _, err = nm.newNetworkImplHnsV2(nwInfo, extInterface) - if err != nil { fmt.Printf("+%v", err) t.Fatal(err) @@ -468,44 +466,6 @@ func TestNewAndDeleteNetworkImplHnsV2ForDelegated(t *testing.T) { } err = nm.deleteNetworkImpl(network, cns.NodeNetworkInterfaceFrontendNIC) - - if err != nil { - fmt.Printf("+%v", err) - t.Fatal(err) - } -} - -// mock hns network creation and deletion for AccelnetNIC -func TestNewAndDeleteNetworkImplHnsV2ForAccelnet(t *testing.T) { - nm := &networkManager{ - ExternalInterfaces: map[string]*externalInterface{}, - } - - // this hnsv2 variable overwrites the package level variable in network - // we do this to avoid passing around os specific objects in platform agnostic code - Hnsv2 = hnswrapper.NewHnsv2wrapperFake() - - nwInfo := &EndpointInfo{ - NetworkID: "d3e97a83-ba4c-45d5-ba88-dc56757ece28", - MasterIfName: "eth0", - Mode: "bridge", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - MacAddress: net.HardwareAddr("12:34:56:78:9a:bc"), - } - - extInterface := &externalInterface{ - Name: "eth0", - Subnets: []string{"subnet1", "subnet2"}, - } - - network, err := nm.newNetworkImplHnsV2(nwInfo, extInterface) - if err != nil { - fmt.Printf("+%v", err) - t.Fatal(err) - } - - err = nm.deleteNetworkImpl(network, cns.NodeNetworkInterfaceAccelnetFrontendNIC) - if err != nil { fmt.Printf("+%v", err) t.Fatal(err) @@ -525,40 +485,6 @@ func TestSkipNetworkDeletion(t *testing.T) { } } -func TestTransparentNetworkCreationForAccelnet(t *testing.T) { - nm := &networkManager{ - ExternalInterfaces: map[string]*externalInterface{}, - } - - // this hnsv2 variable overwrites the package level variable in network - // we do this to avoid passing around os specific objects in platform agnostic code - Hnsv2 = hnswrapper.NewHnsv2wrapperFake() - - nwInfo := &EndpointInfo{ - NetworkID: "d3e97a83-ba4c-45d5-ba88-dc56757ece28", - MasterIfName: "eth1", - Mode: "bridge", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, - } - - extInterface := &externalInterface{ - Name: "eth0", - Subnets: []string{"subnet1", "subnet2"}, - } - - _, err := nm.newNetworkImplHnsV2(nwInfo, extInterface) - if err != nil { - fmt.Printf("+%v", err) - t.Fatal(err) - } - - // create a network again with same name and it should return error for transparent network - _, err = nm.newNetworkImplHnsV2(nwInfo, extInterface) - if err == nil { - t.Fatal("network creation does not return error") - } -} - func TestTransparentNetworkCreationForDelegated(t *testing.T) { nm := &networkManager{ ExternalInterfaces: map[string]*externalInterface{}, @@ -593,44 +519,49 @@ func TestTransparentNetworkCreationForDelegated(t *testing.T) { } } -// Test Configure HCN Network for Swiftv2 DelegatedNIC HostComputeNetwork fields -func TestConfigureHCNNetworkSwiftv2DelegatedNIC(t *testing.T) { - expectedSwiftv2NetworkMode := hcn.Transparent - expectedSwifv2NetworkFlags := hcn.EnableNonPersistent | hcn.DisableHostPort +// Test Configure HNC network for infraNIC ensuring the hcn network type is always l2 bridge +func TestConfigureHCNNetworkInfraNIC(t *testing.T) { + expectedHcnNetworkType := hcn.L2Bridge nm := &networkManager{ ExternalInterfaces: map[string]*externalInterface{}, } extIf := externalInterface{ - Name: "eth1", + Name: "eth0", } nwInfo := &EndpointInfo{ - AdapterName: "eth1", + AdapterName: "eth0", NetworkID: "d3e97a83-ba4c-45d5-ba88-dc56757ece28", - MasterIfName: "eth1", - Mode: "bridge", - NICType: cns.NodeNetworkInterfaceFrontendNIC, + MasterIfName: "eth0", + NICType: cns.InfraNIC, + IfIndex: 1, + EndpointID: "753d3fb6-e9b3-49e2-a109-2acc5dda61f1", + ContainerID: "545055c2-1462-42c8-b222-e75d0b291632", + NetNsPath: "fakeNameSpace", + IfName: "eth0", + Data: make(map[string]interface{}), + EndpointDNS: DNSInfo{ + Suffix: "10.0.0.0", + Servers: []string{"10.0.0.1, 10.0.0.2"}, + Options: nil, + }, + HNSNetworkID: "853d3fb6-e9b3-49e2-a109-2acc5dda61f1", } hostComputeNetwork, err := nm.configureHcnNetwork(nwInfo, &extIf) if err != nil { - t.Fatalf("Failed to configure hcn network for delegatedVMNIC interface due to: %v", err) - } - - if hostComputeNetwork.Type != expectedSwiftv2NetworkMode { - t.Fatalf("host network mode is not configured as %v mode when interface NIC type is delegatedVMNIC", expectedSwiftv2NetworkMode) + t.Fatalf("Failed to configure hcn network for infraNIC interface due to: %v", err) } - // make sure network type is transparent and flags is 1032 - if hostComputeNetwork.Flags != expectedSwifv2NetworkFlags { - t.Fatalf("host network flags is not configured as %v when interface NIC type is delegatedVMNIC", expectedSwifv2NetworkFlags) + if hostComputeNetwork.Type != expectedHcnNetworkType { + t.Fatalf("Host network mode is not configured as %v mode when interface NIC type is infraNIC", expectedHcnNetworkType) } } -// Test Configure HCN Network for Swiftv2 AccelnetNIC HostComputeNetwork fields -func TestConfigureHCNNetworkSwiftv2AccelnetNIC(t *testing.T) { +// Test Configure HCN Network for Swiftv2 DelegatedNIC HostComputeNetwork fields +func TestConfigureHCNNetworkSwiftv2DelegatedNIC(t *testing.T) { expectedSwiftv2NetworkMode := hcn.Transparent expectedSwifv2NetworkFlags := hcn.EnableNonPersistent | hcn.DisableHostPort | hcn.EnableIov @@ -647,20 +578,21 @@ func TestConfigureHCNNetworkSwiftv2AccelnetNIC(t *testing.T) { NetworkID: "d3e97a83-ba4c-45d5-ba88-dc56757ece28", MasterIfName: "eth1", Mode: "bridge", - NICType: cns.NodeNetworkInterfaceAccelnetFrontendNIC, + NICType: cns.NodeNetworkInterfaceFrontendNIC, } hostComputeNetwork, err := nm.configureHcnNetwork(nwInfo, &extIf) if err != nil { - t.Fatalf("Failed to configure hcn network for accelnetNIC interface due to: %v", err) + t.Fatalf("Failed to configure hcn network for delegatedVMNIC interface due to: %v", err) } if hostComputeNetwork.Type != expectedSwiftv2NetworkMode { - t.Fatalf("host network mode is not configured as %v mode when interface NIC type is accelnetNIC", expectedSwiftv2NetworkMode) + t.Fatalf("host network mode is not configured as %v mode when interface NIC type is delegatedVMNIC", expectedSwiftv2NetworkMode) } // make sure network type is transparent and flags is 9224 + // TODO: check if this is expected for both delegated&accelnet if hostComputeNetwork.Flags != expectedSwifv2NetworkFlags { - t.Fatalf("host network flags is not configured as %v when interface NIC type is accelnetNIC", expectedSwifv2NetworkFlags) + t.Fatalf("host network flags is not configured as %v when interface NIC type is delegatedVMNIC", expectedSwifv2NetworkFlags) } } diff --git a/network/ovs_endpoint_snatroute_linux.go b/network/ovs_endpoint_snatroute_linux.go index 12bd40ab80..3f0858af5a 100644 --- a/network/ovs_endpoint_snatroute_linux.go +++ b/network/ovs_endpoint_snatroute_linux.go @@ -33,6 +33,7 @@ func (client *OVSEndpointClient) NewSnatClient(snatBridgeIP, localIP string, epI client.netlink, client.plClient, client.iptablesClient, + client.netioshim, ) } } diff --git a/network/ovs_networkclient_linux_test.go b/network/ovs_networkclient_linux_test.go index 924468b474..928ecb153d 100644 --- a/network/ovs_networkclient_linux_test.go +++ b/network/ovs_networkclient_linux_test.go @@ -28,27 +28,6 @@ func TestAddRoutes(t *testing.T) { } } -func TestCreateBridge(t *testing.T) { - ovsctlClient := ovsctl.NewMockOvsctl(false, "", "") - f, err := os.Create(ovsConfigFile) - if err != nil { - t.Errorf("Unable to create %v before test: %v", ovsConfigFile, err) - return - } - defer f.Close() - if _, err := f.WriteString("FORCE_COREFILES=yes"); err != nil { - t.Errorf("Unable to write to file %v: %v", ovsConfigFile, err) - } - - ovsClient := NewOVSClient(bridgeName, hostIntf, ovsctlClient, - netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false)) - if err := ovsClient.CreateBridge(); err != nil { - t.Errorf("Error creating OVS bridge: %v", err) - } - - os.Remove(ovsConfigFile) -} - func TestDeleteBridge(t *testing.T) { ovsctlClient := ovsctl.NewMockOvsctl(false, "", "") diff --git a/network/policy/policy_windows_test.go b/network/policy/policy_windows_test.go index 0fa59845f4..1278da5214 100644 --- a/network/policy/policy_windows_test.go +++ b/network/policy/policy_windows_test.go @@ -4,8 +4,10 @@ package policy import ( + "encoding/json" "testing" + "github.com/Microsoft/hcsshim/hcn" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -40,11 +42,104 @@ var _ = Describe("Windows Policies", func() { }}`), } - expected_policy := `{"InboundProxyPort":"15003","OutboundProxyPort":"15001","FilterTuple":{"Protocols":"6"},"UserSID":"S-1-5-32-556","InboundExceptions":{},"OutboundExceptions":{}}` + expectedPolicy := `{"InboundProxyPort":"15003","OutboundProxyPort":"15001","FilterTuple":{"Protocols":"6"},"UserSID":"S-1-5-32-556","InboundExceptions":{},"OutboundExceptions":{}}` generatedPolicy, err := GetHcnL4WFPProxyPolicy(policy) Expect(err).To(BeNil()) - Expect(string(generatedPolicy.Settings)).To(Equal(expected_policy)) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) + }) + }) + + Describe("Test GetHcnACLPolicy", func() { + It("Should raise error for invalid json", func() { + policy := Policy{ + Type: ACLPolicy, + Data: []byte(`invalid json`), + } + + _, err := GetHcnACLPolicy(policy) + Expect(err).NotTo(BeNil()) + }) + + It("Should marshall the ACL policy correctly", func() { + policy := Policy{ + Type: ACLPolicy, + Data: []byte(`{ + "Type": "ACL", + "Protocols": "TCP", + "Direction": "In", + "Action": "Allow" + }`), + } + expectedPolicy := `{"Protocols":"TCP","Action":"Allow","Direction":"In"}` + + generatedPolicy, err := GetHcnACLPolicy(policy) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) + }) + }) + + Describe("Test GetHcnOutBoundNATPolicy", func() { + It("Should raise error for invalid json", func() { + policy := Policy{ + Type: OutBoundNatPolicy, + Data: []byte(`invalid json`), + } + + _, err := GetHcnOutBoundNATPolicy(policy, nil) + Expect(err).NotTo(BeNil()) + }) + + It("Should marshall the OutBoundNAT policy correctly", func() { + policy := Policy{ + Type: OutBoundNatPolicy, + Data: []byte(`{ + "Type": "OutBoundNAT", + "ExceptionList": ["10.240.0.0/16","10.0.0.0/8"] + }`), + } + expectedPolicy := `{"Exceptions":["10.240.0.0/16","10.0.0.0/8"]}` + + generatedPolicy, err := GetHcnOutBoundNATPolicy(policy, nil) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) + + // test getHncOutBoundNATPolicy with epInfoData + expectedPolicy = `{"Exceptions":["10.240.0.0/16","10.0.0.0/8","50.1.1.1","60.1.1.1"]}` + + epInfoData := make(map[string]interface{}) + epInfoData[CnetAddressSpace] = []string{"50.1.1.1", "60.1.1.1"} + generatedPolicy, err = GetHcnOutBoundNATPolicy(policy, epInfoData) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) + }) + }) + + Describe("Test GetHcnRoutePolicy", func() { + It("Should raise error for invalid json", func() { + policy := Policy{ + Type: RoutePolicy, + Data: []byte(`invalid json`), + } + + _, err := GetHcnRoutePolicy(policy) + Expect(err).NotTo(BeNil()) + }) + + It("Should marshall the Route policy correctly", func() { + policy := Policy{ + Type: RoutePolicy, + Data: []byte(`{ + "Type": "ROUTE", + "DestinationPrefix": "10.0.0.0/8", + "NeedEncap": true + }`), + } + expectedPolicy := `{"DestinationPrefix":"10.0.0.0/8","NeedEncap":true}` + + generatedPolicy, err := GetHcnRoutePolicy(policy) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) }) }) @@ -57,4 +152,138 @@ var _ = Describe("Windows Policies", func() { Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) }) }) + + Describe("Test AddNATPolicyV1", func() { + It("Should marshall the NAT policy v1 correctly", func() { + expectedPolicy := `{"Type":"OutBoundNAT","Destinations":["168.63.129.16"]}` + + generatedPolicy, err := AddDnsNATPolicyV1() + Expect(err).To(BeNil()) + Expect(string(generatedPolicy)).To(Equal(expectedPolicy)) + }) + }) + + Describe("Test AddNATPolicyV2", func() { + It("Should marshall the NAT policy v2 correctly", func() { + vip := "vip" + destinations := []string{"192.168.1.1", "192.169.1.1"} + + expectedPolicy := `{"VirtualIP":"vip","Destinations":["192.168.1.1","192.169.1.1"]}` + + generatedPolicy, err := AddNATPolicyV2(vip, destinations) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy.Settings)).To(Equal(expectedPolicy)) + }) + }) + + Describe("Test GetHcnEndpointPolicies", func() { + It("Should marshall the policy correctly", func() { + testPolicies := []Policy{} + + rawPortMappingPolicy, _ := json.Marshal(&hcn.PortMappingPolicySetting{ + ExternalPort: 8008, + InternalPort: 8080, + }) + + portMappingPolicy, _ := json.Marshal(&hcn.EndpointPolicy{ + Type: hcn.PortMapping, + Settings: rawPortMappingPolicy, + }) + + hnsPolicy := Policy{ + Type: PortMappingPolicy, + Data: portMappingPolicy, + } + + testPolicies = append(testPolicies, hnsPolicy) + + generatedPolicy, err := GetHcnEndpointPolicies(PortMappingPolicy, testPolicies, nil, false, true, nil) + Expect(err).To(BeNil()) + Expect(string(generatedPolicy[0].Settings)).To(Equal(string(rawPortMappingPolicy))) + }) + }) + + Describe("Test GetHcnEndpointPolicies with invalid policy type", func() { + It("Should return error with invalid policy type", func() { + testPolicies := []Policy{} + + rawPortMappingPolicy, _ := json.Marshal(&hcn.PortMappingPolicySetting{ + ExternalPort: 8008, + InternalPort: 8080, + }) + + portMappingPolicy, _ := json.Marshal(&hcn.EndpointPolicy{ + Type: "invalidType", // should return error with invalid policy type + Settings: rawPortMappingPolicy, + }) + + hnsPolicy := Policy{ + Type: PortMappingPolicy, + Data: portMappingPolicy, + } + + testPolicies = append(testPolicies, hnsPolicy) + + _, err := GetHcnEndpointPolicies(PortMappingPolicy, testPolicies, nil, false, true, nil) + Expect(err).NotTo(BeNil()) + }) + }) + + Describe("Test GetHcnEndpointPolicies with multiple policies", func() { + It("Should marshall all policies correctly", func() { + testPolicies := []Policy{} + + // add first portMapping policy to testPolicies + rawPortMappingPolicyOne, _ := json.Marshal(&hcn.PortMappingPolicySetting{ + ExternalPort: 8008, + InternalPort: 8080, + }) + + portMappingPolicyOne, _ := json.Marshal(&hcn.EndpointPolicy{ + Type: hcn.PortMapping, + Settings: rawPortMappingPolicyOne, + }) + + portMappinghnsPolicyOne := Policy{ + Type: PortMappingPolicy, + Data: portMappingPolicyOne, + } + + testPolicies = append(testPolicies, portMappinghnsPolicyOne) + + // add second portMapping policy to testPolicies + rawPortMappingPolicyTwo, _ := json.Marshal(&hcn.PortMappingPolicySetting{ + ExternalPort: 9008, + InternalPort: 9090, + }) + + portMappingPolicyTwo, _ := json.Marshal(&hcn.EndpointPolicy{ + Type: hcn.PortMapping, + Settings: rawPortMappingPolicyTwo, + }) + + portMappinghnsPolicyTwo := Policy{ + Type: PortMappingPolicy, + Data: portMappingPolicyTwo, + } + + testPolicies = append(testPolicies, portMappinghnsPolicyTwo) + + generatedPolicy, err := GetHcnEndpointPolicies(PortMappingPolicy, testPolicies, nil, false, true, nil) + Expect(err).To(BeNil()) + + expectedPolicy := []hcn.EndpointPolicy{ + { + Type: "PortMapping", + Settings: []byte(`{"InternalPort":8080,"ExternalPort":8008}`), + }, + { + Type: "PortMapping", + Settings: []byte(`{"InternalPort":9090,"ExternalPort":9008}`), + }, + } + + Expect(generatedPolicy).To(Equal(expectedPolicy)) + }) + }) }) diff --git a/network/secondary_endpoint_client_linux.go b/network/secondary_endpoint_client_linux.go index 46fc3c26f4..6d9d5c3230 100644 --- a/network/secondary_endpoint_client_linux.go +++ b/network/secondary_endpoint_client_linux.go @@ -1,8 +1,10 @@ package network import ( + "context" "os" "strings" + "time" "github.com/Azure/azure-container-networking/netio" "github.com/Azure/azure-container-networking/netlink" @@ -11,6 +13,7 @@ import ( "github.com/Azure/azure-container-networking/platform" "github.com/pkg/errors" "go.uber.org/zap" + "k8s.io/kubernetes/pkg/kubelet" ) var errorSecondaryEndpointClient = errors.New("SecondaryEndpointClient Error") @@ -25,6 +28,7 @@ type SecondaryEndpointClient struct { plClient platform.ExecClient netUtilsClient networkutils.NetworkUtils nsClient NamespaceClientInterface + dhcpClient dhcpClient ep *endpoint } @@ -33,6 +37,7 @@ func NewSecondaryEndpointClient( nioc netio.NetIOInterface, plc platform.ExecClient, nsc NamespaceClientInterface, + dhcpClient dhcpClient, endpoint *endpoint, ) *SecondaryEndpointClient { client := &SecondaryEndpointClient{ @@ -41,6 +46,7 @@ func NewSecondaryEndpointClient( plClient: plc, netUtilsClient: networkutils.NewNetworkUtils(nl, plc), nsClient: nsc, + dhcpClient: dhcpClient, ep: endpoint, } @@ -127,6 +133,19 @@ func (client *SecondaryEndpointClient) ConfigureContainerInterfacesAndRoutes(epI ifInfo.Routes = append(ifInfo.Routes, epInfo.Routes...) + // issue dhcp discover packet to ensure mapping created for dns via wireserver to work + // we do not use the response for anything + numSecs := 3 + timeout := time.Duration(numSecs) * time.Second + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(timeout)) + defer cancel() + logger.Info("Sending DHCP packet", zap.Any("macAddress", epInfo.MacAddress), zap.String("ifName", epInfo.IfName)) + err := client.dhcpClient.DiscoverRequest(ctx, epInfo.MacAddress, epInfo.IfName) + if err != nil { + return errors.Wrap(err, kubelet.NetworkNotReadyErrorMsg+" - failed to issue dhcp discover packet to create mapping in host") + } + logger.Info("Finished configuring container interfaces and routes for secondary endpoint client") + return nil } diff --git a/network/secondary_endpoint_linux_test.go b/network/secondary_endpoint_linux_test.go index 2d8ca05569..5ec6a27fd6 100644 --- a/network/secondary_endpoint_linux_test.go +++ b/network/secondary_endpoint_linux_test.go @@ -4,6 +4,7 @@ package network import ( + "context" "net" "testing" @@ -12,9 +13,18 @@ import ( "github.com/Azure/azure-container-networking/netlink" "github.com/Azure/azure-container-networking/network/networkutils" "github.com/Azure/azure-container-networking/platform" + "github.com/pkg/errors" "github.com/stretchr/testify/require" + "k8s.io/kubernetes/pkg/kubelet" ) +// mockDHCPFail is a mock DHCP client that always returns an error +type mockDHCPFail struct{} + +func (m *mockDHCPFail) DiscoverRequest(context.Context, net.HardwareAddr, string) error { + return errors.New("mock DHCP discover request failed") +} + func TestSecondaryAddEndpoints(t *testing.T) { nl := netlink.NewMockNetlink(false, "") plc := platform.NewMockExecClient(false) @@ -36,6 +46,7 @@ func TestSecondaryAddEndpoints(t *testing.T) { netUtilsClient: networkutils.NewNetworkUtils(nl, plc), netioshim: netio.NewMockNetIO(false, 0), ep: &endpoint{SecondaryInterfaces: make(map[string]*InterfaceInfo)}, + dhcpClient: &mockDHCP{}, }, epInfo: &EndpointInfo{MacAddress: mac}, wantErr: false, @@ -78,6 +89,7 @@ func TestSecondaryAddEndpoints(t *testing.T) { } else { require.NoError(t, err) require.Equal(t, tt.client.ep.SecondaryInterfaces["eth1"].MacAddress, tt.epInfo.MacAddress) + require.Equal(t, "eth1", tt.epInfo.IfName, "interface name should update based on mac address here before being referenced later") } }) } @@ -255,6 +267,7 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { plClient: platform.NewMockExecClient(false), netUtilsClient: networkutils.NewNetworkUtils(nl, plc), netioshim: netio.NewMockNetIO(false, 0), + dhcpClient: &mockDHCP{}, ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, }, epInfo: &EndpointInfo{ @@ -280,6 +293,7 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { plClient: platform.NewMockExecClient(false), netUtilsClient: networkutils.NewNetworkUtils(netlink.NewMockNetlink(true, ""), plc), netioshim: netio.NewMockNetIO(false, 0), + dhcpClient: &mockDHCP{}, ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, }, epInfo: &EndpointInfo{ @@ -301,6 +315,7 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { plClient: platform.NewMockExecClient(false), netUtilsClient: networkutils.NewNetworkUtils(nl, plc), netioshim: netio.NewMockNetIO(true, 1), + dhcpClient: &mockDHCP{}, ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, }, epInfo: &EndpointInfo{ @@ -327,6 +342,7 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { plClient: platform.NewMockExecClient(false), netUtilsClient: networkutils.NewNetworkUtils(nl, plc), netioshim: netio.NewMockNetIO(false, 0), + dhcpClient: &mockDHCP{}, ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, }, epInfo: &EndpointInfo{ @@ -348,6 +364,7 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { plClient: platform.NewMockExecClient(false), netUtilsClient: networkutils.NewNetworkUtils(nl, plc), netioshim: netio.NewMockNetIO(false, 0), + dhcpClient: &mockDHCP{}, ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, }, epInfo: &EndpointInfo{ @@ -356,6 +373,33 @@ func TestSecondaryConfigureContainerInterfacesAndRoutes(t *testing.T) { wantErr: true, wantErrMsg: "SecondaryEndpointClient Error: routes expected for eth1", }, + { + name: "Configure Interface and routes DHCP discover fail", + client: &SecondaryEndpointClient{ + netlink: netlink.NewMockNetlink(false, ""), + plClient: platform.NewMockExecClient(false), + netUtilsClient: networkutils.NewNetworkUtils(nl, plc), + netioshim: netio.NewMockNetIO(false, 0), + dhcpClient: &mockDHCPFail{}, + ep: &endpoint{SecondaryInterfaces: map[string]*InterfaceInfo{"eth1": {Name: "eth1"}}}, + }, + epInfo: &EndpointInfo{ + IfName: "eth1", + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("192.168.0.4"), + Mask: net.CIDRMask(subnetv4Mask, ipv4Bits), + }, + }, + Routes: []RouteInfo{ + { + Dst: net.IPNet{IP: net.ParseIP("192.168.0.4"), Mask: net.CIDRMask(ipv4FullMask, ipv4Bits)}, + }, + }, + }, + wantErr: true, + wantErrMsg: kubelet.NetworkNotReadyErrorMsg, + }, } for _, tt := range tests { diff --git a/network/snat/snat_linux.go b/network/snat/snat_linux.go index 936ce8ef84..76c541c4f6 100644 --- a/network/snat/snat_linux.go +++ b/network/snat/snat_linux.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-container-networking/cni/log" "github.com/Azure/azure-container-networking/ebtables" "github.com/Azure/azure-container-networking/iptables" + "github.com/Azure/azure-container-networking/netio" "github.com/Azure/azure-container-networking/netlink" "github.com/Azure/azure-container-networking/network/networkutils" "github.com/Azure/azure-container-networking/platform" @@ -55,6 +56,7 @@ type Client struct { netlink netlink.NetlinkInterface plClient platform.ExecClient ipTablesClient ipTablesClient + netioClient netio.NetIOInterface } func NewSnatClient(hostIfName string, @@ -67,6 +69,7 @@ func NewSnatClient(hostIfName string, nl netlink.NetlinkInterface, plClient platform.ExecClient, iptc ipTablesClient, + nio netio.NetIOInterface, ) Client { snatClient := Client{ hostSnatVethName: hostIfName, @@ -78,6 +81,7 @@ func NewSnatClient(hostIfName string, netlink: nl, plClient: plClient, ipTablesClient: iptc, + netioClient: nio, } snatClient.SkipAddressesFromBlock = append(snatClient.SkipAddressesFromBlock, skipAddressesFromBlock...) @@ -223,7 +227,11 @@ func (client *Client) AllowInboundFromHostToNC() error { return newErrorSnatClient(err.Error()) } - snatContainerVeth, _ := net.InterfaceByName(client.containerSnatVethName) + snatContainerVeth, err := client.netioClient.GetNetworkInterfaceByName(client.containerSnatVethName) + if err != nil { + logger.Info("Could not find interface", zap.String("containerSnatVethName", client.containerSnatVethName)) + return errors.Wrap(newErrorSnatClient(err.Error()), "could not find container snat veth name for allow host to nc") + } // Add static arp entry for localIP to prevent arp going out of VM logger.Info("Adding static arp entry for ip", zap.Any("containerIP", containerIP), @@ -319,7 +327,11 @@ func (client *Client) AllowInboundFromNCToHost() error { return err } - snatContainerVeth, _ := net.InterfaceByName(client.containerSnatVethName) + snatContainerVeth, err := client.netioClient.GetNetworkInterfaceByName(client.containerSnatVethName) + if err != nil { + logger.Info("Could not find interface", zap.String("containerSnatVethName", client.containerSnatVethName)) + return errors.Wrap(newErrorSnatClient(err.Error()), "could not find container snat veth name for allow nc to host") + } // Add static arp entry for localIP to prevent arp going out of VM logger.Info("Adding static arp entry for ip", zap.Any("containerIP", containerIP), zap.String("HardwareAddr", snatContainerVeth.HardwareAddr.String())) @@ -416,7 +428,7 @@ func (client *Client) DropArpForSnatBridgeApipaRange(snatBridgeIP, azSnatVethIfN // This function creates linux bridge which will be used for outbound connectivity by NCs func (client *Client) createSnatBridge(snatBridgeIP, hostPrimaryMac string) error { - _, err := net.InterfaceByName(SnatBridgeName) + _, err := client.netioClient.GetNetworkInterfaceByName(SnatBridgeName) if err == nil { logger.Info("Snat Bridge already exists") } else { diff --git a/network/snat/snat_linux_test.go b/network/snat/snat_linux_test.go index 0ffee1ebf2..fcf4a861d3 100644 --- a/network/snat/snat_linux_test.go +++ b/network/snat/snat_linux_test.go @@ -4,12 +4,30 @@ import ( "os" "testing" - "github.com/Azure/azure-container-networking/iptables" + "github.com/Azure/azure-container-networking/netio" "github.com/Azure/azure-container-networking/netlink" ) var anyInterface = "dummy" +type mockIPTablesClient struct{} + +func (c mockIPTablesClient) InsertIptableRule(_, _, _, _, _ string) error { + return nil +} + +func (c mockIPTablesClient) AppendIptableRule(_, _, _, _, _ string) error { + return nil +} + +func (c mockIPTablesClient) DeleteIptableRule(_, _, _, _, _ string) error { + return nil +} + +func (c mockIPTablesClient) CreateChain(_, _, _ string) error { + return nil +} + func TestMain(m *testing.M) { exitCode := m.Run() @@ -18,16 +36,22 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func TestAllowInboundFromHostToNC(t *testing.T) { - nl := netlink.NewNetlink() - iptc := iptables.NewClient() - client := &Client{ +func GetTestClient(nl netlink.NetlinkInterface, iptc ipTablesClient, nio netio.NetIOInterface) *Client { + return &Client{ SnatBridgeIP: "169.254.0.1/16", localIP: "169.254.0.4/16", containerSnatVethName: anyInterface, netlink: nl, ipTablesClient: iptc, + netioClient: nio, } +} + +func TestAllowInboundFromHostToNC(t *testing.T) { + nl := netlink.NewMockNetlink(false, "") + iptc := &mockIPTablesClient{} + nio := netio.NewMockNetIO(false, 0) + client := GetTestClient(nl, iptc, nio) if err := nl.AddLink(&netlink.DummyLink{ LinkInfo: netlink.LinkInfo{ @@ -65,18 +89,18 @@ func TestAllowInboundFromHostToNC(t *testing.T) { if err := nl.DeleteLink(SnatBridgeName); err != nil { t.Errorf("Error removing snat bridge: %v", err) } + + client.netioClient = netio.NewMockNetIO(true, 1) + if err := client.AllowInboundFromHostToNC(); err == nil { + t.Errorf("Expected error when interface not found in allow host to nc but got nil") + } } func TestAllowInboundFromNCToHost(t *testing.T) { - nl := netlink.NewNetlink() - iptc := iptables.NewClient() - client := &Client{ - SnatBridgeIP: "169.254.0.1/16", - localIP: "169.254.0.4/16", - containerSnatVethName: anyInterface, - netlink: nl, - ipTablesClient: iptc, - } + nl := netlink.NewMockNetlink(false, "") + iptc := &mockIPTablesClient{} + nio := netio.NewMockNetIO(false, 0) + client := GetTestClient(nl, iptc, nio) if err := nl.AddLink(&netlink.DummyLink{ LinkInfo: netlink.LinkInfo{ @@ -114,4 +138,9 @@ func TestAllowInboundFromNCToHost(t *testing.T) { if err := nl.DeleteLink(SnatBridgeName); err != nil { t.Errorf("Error removing snat bridge: %v", err) } + + client.netioClient = netio.NewMockNetIO(true, 1) + if err := client.AllowInboundFromNCToHost(); err == nil { + t.Errorf("Expected error when interface not found in allow nc to host but got nil") + } } diff --git a/network/transparent_vlan_endpoint_snatroute_linux.go b/network/transparent_vlan_endpoint_snatroute_linux.go index d997ead960..4c3902ddd3 100644 --- a/network/transparent_vlan_endpoint_snatroute_linux.go +++ b/network/transparent_vlan_endpoint_snatroute_linux.go @@ -21,6 +21,7 @@ func (client *TransparentVlanEndpointClient) NewSnatClient(snatBridgeIP, localIP client.netlink, client.plClient, client.iptablesClient, + client.netioshim, ) } } diff --git a/network/transparent_vlan_endpointclient_linux.go b/network/transparent_vlan_endpointclient_linux.go index 731353c231..fc4399ec3d 100644 --- a/network/transparent_vlan_endpointclient_linux.go +++ b/network/transparent_vlan_endpointclient_linux.go @@ -622,7 +622,7 @@ func (client *TransparentVlanEndpointClient) AddDefaultArp(interfaceName, destMa func (client *TransparentVlanEndpointClient) DeleteEndpoints(ep *endpoint) error { // Vnet NS - err := ExecuteInNS(client.nsClient, client.vnetNSName, func() error { + _ = ExecuteInNS(client.nsClient, client.vnetNSName, func() error { // Passing in functionality to get number of routes after deletion getNumRoutesLeft := func() (int, error) { routes, err := vishnetlink.RouteList(nil, vishnetlink.FAMILY_V4) @@ -632,11 +632,9 @@ func (client *TransparentVlanEndpointClient) DeleteEndpoints(ep *endpoint) error return len(routes), nil } - return client.DeleteEndpointsImpl(ep, getNumRoutesLeft) + client.DeleteEndpointsImpl(ep, getNumRoutesLeft) + return nil }) - if err != nil { - return err - } // VM NS if err := client.DeleteSnatEndpoint(); err != nil { @@ -646,16 +644,16 @@ func (client *TransparentVlanEndpointClient) DeleteEndpoints(ep *endpoint) error } // getNumRoutesLeft is a function which gets the current number of routes in the namespace. Namespace: Vnet -func (client *TransparentVlanEndpointClient) DeleteEndpointsImpl(ep *endpoint, _ func() (int, error)) error { +func (client *TransparentVlanEndpointClient) DeleteEndpointsImpl(ep *endpoint, _ func() (int, error)) { routeInfoList := client.GetVnetRoutes(ep.IPAddresses) if err := deleteRoutes(client.netlink, client.netioshim, client.vnetVethName, routeInfoList); err != nil { - return errors.Wrap(err, "failed to remove routes") + logger.Error("Failed to remove routes", zap.Error(err)) } logger.Info("Deleting host veth", zap.String("vnetVethName", client.vnetVethName)) // Delete Host Veth if err := client.netlink.DeleteLink(client.vnetVethName); err != nil { - return errors.Wrapf(err, "deleteLink for %v failed", client.vnetVethName) + logger.Error("Failed to delete link", zap.Error(err), zap.String("vnetVethName", client.vnetVethName)) } // TODO: revist if this require in future. @@ -670,7 +668,6 @@ func (client *TransparentVlanEndpointClient) DeleteEndpointsImpl(ep *endpoint, _ } } */ - return nil } // Helper function that allows executing a function in a VM namespace diff --git a/network/transparent_vlan_endpointclient_linux_test.go b/network/transparent_vlan_endpointclient_linux_test.go index be64142bc5..f21dab9bf4 100644 --- a/network/transparent_vlan_endpointclient_linux_test.go +++ b/network/transparent_vlan_endpointclient_linux_test.go @@ -15,9 +15,11 @@ import ( "github.com/stretchr/testify/require" ) -var errNetnsMock = errors.New("mock netns error") -var errMockNetIOFail = errors.New("netio fail") -var errMockNetIONoIfFail = &net.OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: errors.New("no such network interface")} +var ( + errNetnsMock = errors.New("mock netns error") + errMockNetIOFail = errors.New("netio fail") + errMockNetIONoIfFail = &net.OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: errors.New("no such network interface")} +) func newNetnsErrorMock(errStr string) error { return errors.Wrap(errNetnsMock, errStr) @@ -631,7 +633,6 @@ func TestTransparentVlanDeleteEndpoints(t *testing.T) { routesLeft: func() (int, error) { return numDefaultRoutes, nil }, - wantErr: false, }, { name: "Delete endpoint do not delete vnet ns it is still in use", @@ -657,7 +658,6 @@ func TestTransparentVlanDeleteEndpoints(t *testing.T) { routesLeft: func() (int, error) { return numDefaultRoutes + 1, nil }, - wantErr: false, }, //nolint gocritic /* { @@ -692,15 +692,47 @@ func TestTransparentVlanDeleteEndpoints(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - err := tt.client.DeleteEndpointsImpl(tt.ep, tt.routesLeft) - if tt.wantErr { - require.Error(t, err) - require.Contains(t, err.Error(), tt.wantErrMsg, "Expected:%v actual:%v", tt.wantErrMsg, err.Error()) - } else { - require.NoError(t, err) - } + tt.client.DeleteEndpointsImpl(tt.ep, tt.routesLeft) }) } + + t.Run("Delete endpoint runs even if delete routes fails", func(t *testing.T) { + nl := netlink.NewMockNetlink(true, "netlink failure") + // count number of times delete and link and set route are called + // even if deleting the routes fail, we should still delete the veth pair in the vnet ns + deleteLinkFlag := 0 + nl.DeleteLinkFn = func(_ string) error { + deleteLinkFlag++ + return errors.New("err mock") + } + errOnDeleteRouteFlag := 0 + nl.SetDeleteRouteValidationFn(func(_ *netlink.Route) error { + errOnDeleteRouteFlag++ + return errors.New("err mock") + }) + + client := TransparentVlanEndpointClient{ + primaryHostIfName: "eth0", + vlanIfName: "eth0.1", + vnetVethName: "A1veth0", + containerVethName: "B1veth0", + vnetNSName: "az_ns_1", + netnsClient: &mockNetns{ + deleteNamed: defaultDeleteNamed, + }, + netlink: nl, + plClient: platform.NewMockExecClient(false), + netUtilsClient: networkutils.NewNetworkUtils(nl, plc), + netioshim: netio.NewMockNetIO(false, 0), + } + ep := &endpoint{ + IPAddresses: IPAddresses, + } + client.DeleteEndpointsImpl(ep, func() (int, error) { return 0, nil }) + + require.Equal(t, 1, errOnDeleteRouteFlag, "error must occur during delete route path") + require.Equal(t, 1, deleteLinkFlag, "delete link must still be called") + }) } func TestTransparentVlanConfigureContainerInterfacesAndRoutes(t *testing.T) { diff --git a/nmagent/client.go b/nmagent/client.go index 8eea299bc6..71a0810978 100644 --- a/nmagent/client.go +++ b/nmagent/client.go @@ -44,9 +44,8 @@ type Client struct { httpClient *http.Client // config - host string - port uint16 - + host string + port uint16 enableTLS bool retrier interface { @@ -284,6 +283,37 @@ func (c *Client) GetHomeAz(ctx context.Context) (AzResponse, error) { return homeAzResponse, nil } +// GetInterfaceIPInfo fetches the node's interface IP information from nmagent +func (c *Client) GetInterfaceIPInfo(ctx context.Context) (Interfaces, error) { + req, err := c.buildRequest(ctx, &GetSecondaryIPsRequest{}) + var out Interfaces + + if err != nil { + return out, errors.Wrap(err, "building request") + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return out, errors.Wrap(err, "submitting request") + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return out, die(resp.StatusCode, resp.Header, resp.Body, req.URL.Path) + } + + if resp.StatusCode != http.StatusOK { + return out, die(resp.StatusCode, resp.Header, resp.Body, req.URL.Path) + } + + err = xml.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return out, errors.Wrap(err, "decoding response") + } + + return out, nil +} + func die(code int, headers http.Header, body io.ReadCloser, path string) error { // nolint:errcheck // make a best effort to return whatever information we can // returning an error here without the code and source would diff --git a/nmagent/client_test.go b/nmagent/client_test.go index c45c46d8eb..8ecbab92b3 100644 --- a/nmagent/client_test.go +++ b/nmagent/client_test.go @@ -3,9 +3,11 @@ package nmagent_test import ( "context" "encoding/json" + "encoding/xml" "fmt" "net/http" "net/http/httptest" + "net/netip" "strings" "testing" @@ -749,7 +751,7 @@ func TestGetHomeAz(t *testing.T) { }{ { "happy path", - nmagent.AzResponse{HomeAz: uint(1)}, + nmagent.AzResponse{HomeAz: uint(1), AppliedFixes: nil}, "/machine/plugins?comp=nmagent&type=GetHomeAz%2Fapi-version%2F1", map[string]interface{}{ "httpStatusCode": "200", @@ -757,6 +759,17 @@ func TestGetHomeAz(t *testing.T) { }, false, }, + { + "happy path with new version", + nmagent.AzResponse{HomeAz: uint(1), AppliedFixes: []nmagent.HomeAZFix{nmagent.HomeAZFixIPv6}}, + "/machine/plugins?comp=nmagent&type=GetHomeAz%2Fapi-version%2F1", + map[string]interface{}{ + "httpStatusCode": "200", + "HomeAz": 1, + "APIVersion": 2, + }, + false, + }, { "empty response", nmagent.AzResponse{}, @@ -809,3 +822,86 @@ func TestGetHomeAz(t *testing.T) { }) } } + +func TestGetInterfaceIPInfo(t *testing.T) { + tests := []struct { + name string + expURL string + response nmagent.Interfaces + respStr string + }{ + { + "happy path", + "/machine/plugins?comp=nmagent&type=getinterfaceinfov1", + nmagent.Interfaces{ + Entries: []nmagent.Interface{ + { + MacAddress: nmagent.MACAddress{0x00, 0x0D, 0x3A, 0xF9, 0xDC, 0xA6}, + IsPrimary: true, + InterfaceSubnets: []nmagent.InterfaceSubnet{ + { + Prefix: "10.240.0.0/16", + IPAddress: []nmagent.NodeIP{ + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 5})), + IsPrimary: true, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 6})), + IsPrimary: false, + }, + }, + }, + }, + }, + }, + }, + "" + + "" + + "", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var gotURL string + client := nmagent.NewTestClient(&TestTripper{ + RoundTripF: func(req *http.Request) (*http.Response, error) { + gotURL = req.URL.RequestURI() + rr := httptest.NewRecorder() + rr.WriteHeader(http.StatusOK) + err := xml.NewEncoder(rr).Encode(test.response) + if err != nil { + t.Fatal("unexpected error encoding response: err:", err) + } + return rr.Result(), nil + }, + }) + + ctx, cancel := testContext(t) + defer cancel() + + resp, err := client.GetInterfaceIPInfo(ctx) + checkErr(t, err, false) + + if gotURL != test.expURL { + t.Error("received URL differs from expected: got:", gotURL, "exp:", test.expURL) + } + + if got := resp; !cmp.Equal(got, test.response) { + t.Error("response differs from expectation: diff:", cmp.Diff(got, test.response)) + } + + var unmarshaled nmagent.Interfaces + err = xml.Unmarshal([]byte(test.respStr), &unmarshaled) + checkErr(t, err, false) + + if !cmp.Equal(resp, unmarshaled) { + t.Error("response differs from expected decoded string: diff:", cmp.Diff(resp, unmarshaled)) + } + }) + } +} diff --git a/nmagent/equality.go b/nmagent/equality.go new file mode 100644 index 0000000000..67381e9897 --- /dev/null +++ b/nmagent/equality.go @@ -0,0 +1,51 @@ +package nmagent + +// Equal compares two Interfaces objects for equality. +func (i Interfaces) Equal(other Interfaces) bool { + if len(i.Entries) != len(other.Entries) { + return false + } + for idx, entry := range i.Entries { + if !entry.Equal(other.Entries[idx]) { + return false + } + } + return true +} + +// Equal compares two Interface objects for equality. +func (i Interface) Equal(other Interface) bool { + if len(i.InterfaceSubnets) != len(other.InterfaceSubnets) { + return false + } + for idx, subnet := range i.InterfaceSubnets { + if !subnet.Equal(other.InterfaceSubnets[idx]) { + return false + } + } + if i.IsPrimary != other.IsPrimary || !i.MacAddress.Equal(other.MacAddress) { + return false + } + return true +} + +// Equal compares two InterfaceSubnet objects for equality. +func (s InterfaceSubnet) Equal(other InterfaceSubnet) bool { + if len(s.IPAddress) != len(other.IPAddress) { + return false + } + if s.Prefix != other.Prefix { + return false + } + for idx, ip := range s.IPAddress { + if !ip.Equal(other.IPAddress[idx]) { + return false + } + } + return true +} + +// Equal compares two NodeIP objects for equality. +func (ip NodeIP) Equal(other NodeIP) bool { + return ip.IsPrimary == other.IsPrimary && ip.Address.Equal(other.Address) +} diff --git a/nmagent/error.go b/nmagent/error.go index 582e4fca31..ef50a969a2 100644 --- a/nmagent/error.go +++ b/nmagent/error.go @@ -11,6 +11,14 @@ import ( pkgerrors "github.com/pkg/errors" ) +type HomeAzAPIVersionError struct { + ReceivedAPIVersion uint +} + +func (h HomeAzAPIVersionError) Error() string { + return fmt.Sprintf("invalid homeaz api version (must be 0 or 2): received %d", h.ReceivedAPIVersion) +} + var deleteNetworkPattern = regexp.MustCompile(`/NetworkManagement/joinedVirtualNetworks/[^/]+/api-version/\d+/method/DELETE`) // ContentError is encountered when an unexpected content type is obtained from diff --git a/nmagent/ipaddress.go b/nmagent/ipaddress.go new file mode 100644 index 0000000000..2090bdbe86 --- /dev/null +++ b/nmagent/ipaddress.go @@ -0,0 +1,52 @@ +package nmagent + +import ( + "encoding/xml" + "net/netip" + + "github.com/pkg/errors" +) + +type IPAddress netip.Addr + +func (h IPAddress) Equal(other IPAddress) bool { + return netip.Addr(h).Compare(netip.Addr(other)) == 0 +} + +func (h *IPAddress) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ipStr string + if err := d.DecodeElement(&ipStr, &start); err != nil { + return errors.Wrap(err, "decoding IP address") + } + + ip, err := netip.ParseAddr(ipStr) + if err != nil { + return errors.Wrap(err, "parsing IP address") + } + + *h = IPAddress(ip) + return nil +} + +func (h *IPAddress) UnmarshalXMLAttr(attr xml.Attr) error { + ipStr := attr.Value + ip, err := netip.ParseAddr(ipStr) + if err != nil { + return errors.Wrap(err, "parsing IP address") + } + + *h = IPAddress(ip) + return nil +} + +func (h IPAddress) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + err := e.EncodeElement(netip.Addr(h).String(), start) + return errors.Wrap(err, "encoding IP address") +} + +func (h IPAddress) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { + return xml.Attr{ + Name: name, + Value: netip.Addr(h).String(), + }, nil +} diff --git a/nmagent/macaddress.go b/nmagent/macaddress.go new file mode 100644 index 0000000000..fa81afc7ef --- /dev/null +++ b/nmagent/macaddress.go @@ -0,0 +1,78 @@ +package nmagent + +import ( + "encoding/hex" + "encoding/xml" + "net" + + "github.com/pkg/errors" +) + +const ( + MACAddressSize = 6 +) + +type MACAddress net.HardwareAddr + +func (h MACAddress) Equal(other MACAddress) bool { + if len(h) != len(other) { + return false + } + for i := range h { + if h[i] != other[i] { + return false + } + } + return true +} + +func (h *MACAddress) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var macStr string + if err := d.DecodeElement(&macStr, &start); err != nil { + return errors.Wrap(err, "decoding MAC address") + } + + // Convert the string (without colons) into a valid MACAddress + mac, err := hex.DecodeString(macStr) + if err != nil { + return &net.ParseError{Type: "MAC address", Text: macStr} + } + + *h = MACAddress(mac) + return nil +} + +func (h *MACAddress) UnmarshalXMLAttr(attr xml.Attr) error { + macStr := attr.Value + mac, err := hex.DecodeString(macStr) + if err != nil { + return &net.ParseError{Type: "MAC address", Text: macStr} + } + + *h = MACAddress(mac) + return nil +} + +func (h MACAddress) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if len(h) != MACAddressSize { + return &net.AddrError{Err: "invalid MAC address", Addr: hex.EncodeToString(h)} + } + + macStr := hex.EncodeToString(h) + err := e.EncodeElement(macStr, start) + return errors.Wrap(err, "encoding MAC address") +} + +func (h MACAddress) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { + if len(h) != MACAddressSize { + return xml.Attr{}, &net.AddrError{Err: "invalid MAC address", Addr: hex.EncodeToString(h)} + } + + macStr := hex.EncodeToString(h) + attr := xml.Attr{ + Name: name, + Value: macStr, + } + + return attr, nil +} diff --git a/nmagent/requests.go b/nmagent/requests.go index 01182bfbb3..6a173080fa 100644 --- a/nmagent/requests.go +++ b/nmagent/requests.go @@ -536,3 +536,29 @@ func (g *GetHomeAzRequest) Path() string { func (g *GetHomeAzRequest) Validate() error { return nil } + +var _ Request = &GetSecondaryIPsRequest{} + +type GetSecondaryIPsRequest struct{} + +// Body is a no-op method to satisfy the Request interface while indicating +// that there is no body for a GetSecondaryIPsRequest Request. +func (g *GetSecondaryIPsRequest) Body() (io.Reader, error) { + return nil, nil +} + +// Method indicates that GetSecondaryIPsRequest requests are GET requests. +func (g *GetSecondaryIPsRequest) Method() string { + return http.MethodGet +} + +// Path returns the necessary URI path for invoking a GetSecondaryIPsRequest request. +func (g *GetSecondaryIPsRequest) Path() string { + return "getinterfaceinfov1" +} + +// Validate is a no-op method because parameters are hard coded in the path, +// no customization needed. +func (g *GetSecondaryIPsRequest) Validate() error { + return nil +} diff --git a/nmagent/requests_test.go b/nmagent/requests_test.go index f556efbefe..e1da51a5be 100644 --- a/nmagent/requests_test.go +++ b/nmagent/requests_test.go @@ -568,3 +568,16 @@ func TestNCVersionRequestValidate(t *testing.T) { }) } } + +func TestGetSecondaryIPsRequest(t *testing.T) { + const exp string = "getinterfaceinfov1" + req := nmagent.GetSecondaryIPsRequest{} + + if err := req.Validate(); err != nil { + t.Error("Validation failed on GetSecondaryIpsRequest ", req) + } + + if req.Path() != exp { + t.Error("unexpected path: exp:", exp, "got:", req.Path()) + } +} diff --git a/nmagent/responses.go b/nmagent/responses.go index e5324d59f9..e5a88ae5bc 100644 --- a/nmagent/responses.go +++ b/nmagent/responses.go @@ -1,5 +1,11 @@ package nmagent +import ( + "encoding/json" + + "github.com/pkg/errors" +) + type VirtualNetwork struct { CNetSpace string `json:"cnetSpace"` DefaultGateway string `json:"defaultGateway"` @@ -31,12 +37,101 @@ type NCVersion struct { Version string `json:"version"` // the current network container version } -// NetworkContainerListResponse is a collection of network container IDs mapped +// NCVersionList is a collection of network container IDs mapped // to their current versions. type NCVersionList struct { Containers []NCVersion `json:"networkContainers"` } +// HomeAZFix is an indication that a particular bugfix has been applied to some +// HomeAZ. +type HomeAZFix int + +func (h HomeAZFix) String() string { + switch h { + case HomeAZFixInvalid: + return "HomeAZFixInvalid" + case HomeAZFixIPv6: + return "HomeAZFixIPv6" + default: + return "Unknown HomeAZ Fix" + } +} + +const ( + HomeAZFixInvalid HomeAZFix = iota + HomeAZFixIPv6 +) + type AzResponse struct { - HomeAz uint `json:"homeAz"` + HomeAz uint + AppliedFixes []HomeAZFix +} + +func (az *AzResponse) UnmarshalJSON(in []byte) error { + type resp struct { + HomeAz uint `json:"homeAz"` + APIVersion uint `json:"apiVersion"` + } + + var rsp resp + err := json.Unmarshal(in, &rsp) + if err != nil { + return errors.Wrap(err, "unmarshaling raw home az response") + } + + if rsp.APIVersion != 0 && rsp.APIVersion != 2 { + return HomeAzAPIVersionError{ + ReceivedAPIVersion: rsp.APIVersion, + } + } + + az.HomeAz = rsp.HomeAz + + if rsp.APIVersion == 2 { // nolint:gomnd // ignore magic number 2 + az.AppliedFixes = append(az.AppliedFixes, HomeAZFixIPv6) + } + + return nil +} + +// ContainsFixes reports whether all fixes requested are present in the +// AzResponse returned. +func (az AzResponse) ContainsFixes(requestedFixes ...HomeAZFix) bool { + for _, requested := range requestedFixes { + found := false + for _, present := range az.AppliedFixes { + if requested == present { + found = true + } + } + + if !found { + return false + } + } + return true +} + +type NodeIP struct { + Address IPAddress `xml:"Address,attr"` + IsPrimary bool `xml:"IsPrimary,attr"` +} + +type InterfaceSubnet struct { + IPAddress []NodeIP `xml:"IPAddress"` + Prefix string `xml:"Prefix,attr"` +} + +type Interface struct { + InterfaceSubnets []InterfaceSubnet `xml:"IPSubnet"` + MacAddress MACAddress `xml:"MacAddress,attr"` + IsPrimary bool `xml:"IsPrimary,attr"` +} + +// Response from NMAgent for getinterfaceinfov1 (interface IP information) +// If we change this name, we need to tell the XML encoder to look for +// "Interfaces" in the respose. +type Interfaces struct { + Entries []Interface `xml:"Interface"` } diff --git a/nmagent/responses_test.go b/nmagent/responses_test.go new file mode 100644 index 0000000000..86455eebc1 --- /dev/null +++ b/nmagent/responses_test.go @@ -0,0 +1,137 @@ +package nmagent_test + +import ( + "encoding/json" + "testing" + + "github.com/Azure/azure-container-networking/nmagent" + "github.com/google/go-cmp/cmp" +) + +func TestContainsFixes(t *testing.T) { + tests := []struct { + name string + resp nmagent.AzResponse + fixes []nmagent.HomeAZFix + exp bool + }{ + { + "empty", + nmagent.AzResponse{}, + []nmagent.HomeAZFix{}, + true, + }, + { + "one present", + nmagent.AzResponse{ + AppliedFixes: []nmagent.HomeAZFix{ + nmagent.HomeAZFixIPv6, + }, + }, + []nmagent.HomeAZFix{nmagent.HomeAZFixIPv6}, + true, + }, + { + "one absent", + nmagent.AzResponse{ + AppliedFixes: []nmagent.HomeAZFix{}, + }, + []nmagent.HomeAZFix{nmagent.HomeAZFixIPv6}, + false, + }, + { + "one with empty request", + nmagent.AzResponse{ + AppliedFixes: []nmagent.HomeAZFix{ + nmagent.HomeAZFixIPv6, + }, + }, + []nmagent.HomeAZFix{}, + true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + got := test.resp.ContainsFixes(test.fixes...) + + exp := test.exp + if got != exp { + t.Error("unexpected response from ContainsFixes: exp:", exp, "got:", got) + } + }) + } +} + +func TestUnmarshalAzResponse(t *testing.T) { + tests := []struct { + name string + in string + exp nmagent.AzResponse + shouldErr bool + }{ + { + "empty", + "{}", + nmagent.AzResponse{}, + false, + }, + { + "only homeaz", + `{"homeAz": 42}`, + nmagent.AzResponse{ + HomeAz: 42, + }, + false, + }, + { + "valid apiversion", + `{"homeAz": 42, "apiVersion": 0}`, + nmagent.AzResponse{ + HomeAz: 42, + }, + false, + }, + { + "valid apiversion ipv6", + `{"homeAz": 42, "apiVersion": 2}`, + nmagent.AzResponse{ + HomeAz: 42, + AppliedFixes: []nmagent.HomeAZFix{ + nmagent.HomeAZFixIPv6, + }, + }, + false, + }, + { + "invalid apiversion", + `{"homeAz": 42, "apiVersion": 42}`, + nmagent.AzResponse{}, + true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var got nmagent.AzResponse + err := json.Unmarshal([]byte(test.in), &got) + if err != nil && !test.shouldErr { + t.Fatal("unexpected error unmarshaling JSON: err:", err) + } + + if err == nil && test.shouldErr { + t.Fatal("expected error but received none") + } + + exp := test.exp + if !cmp.Equal(got, exp) { + t.Error("received response differs from expected: diff:", cmp.Diff(got, exp)) + } + }) + } +} diff --git a/npm/azure-npm.yaml b/npm/azure-npm.yaml index 80a7a1a0f7..a19a1b974e 100644 --- a/npm/azure-npm.yaml +++ b/npm/azure-npm.yaml @@ -112,7 +112,6 @@ spec: - name: tmp mountPath: /tmp hostNetwork: true - hostUsers: false nodeSelector: kubernetes.io/os: linux volumes: @@ -173,5 +172,6 @@ data: "PlaceAzureChainFirst": false, "ApplyInBackground": true, "NetPolInBackground": true - } + }, + "LogLevel": "info" } diff --git a/npm/cacheencoder.go b/npm/cacheencoder.go index 208d0fa85e..3c926a1322 100644 --- a/npm/cacheencoder.go +++ b/npm/cacheencoder.go @@ -28,8 +28,9 @@ func CacheEncoder(nodeName string) json.Marshaler { cfg := npmconfig.DefaultConfig cfg.Toggles.EnableHTTPDebugAPI = true cfg.Toggles.EnableV2NPM = false + cfg.Toggles.EnableNPMLite = false // TODO test v2 NPM debug API when it's implemented - npMgr := NewNetworkPolicyManager(cfg, kubeInformer, &dpmocks.MockGenericDataplane{}, exec, npmVersion, fakeK8sVersion) + npMgr := NewNetworkPolicyManager(cfg, kubeInformer, kubeInformer, &dpmocks.MockGenericDataplane{}, exec, npmVersion, fakeK8sVersion) npMgr.NodeName = nodeName return npMgr } diff --git a/npm/cmd/start.go b/npm/cmd/start.go index 223751ba16..3a9e6467ed 100644 --- a/npm/cmd/start.go +++ b/npm/cmd/start.go @@ -20,6 +20,7 @@ import ( "github.com/Azure/azure-container-networking/npm/util" "github.com/spf13/cobra" "github.com/spf13/viper" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" k8sversion "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/informers" @@ -56,7 +57,10 @@ func newStartNPMCmd() *cobra.Command { KubeConfigPath: viper.GetString(flagKubeConfigPath), } - return start(*config, flags) + // start is blocking, unless there's an error + err = start(*config, flags) + metrics.Close() + return err }, } @@ -115,14 +119,37 @@ func start(config npmconfig.Config, flags npmconfig.Flags) error { factor := rand.Float64() + 1 //nolint resyncPeriod := time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) klog.Infof("Resync period for NPM pod is set to %d.", int(resyncPeriod/time.Minute)) + factory := informers.NewSharedInformerFactory(clientset, resyncPeriod) + podFactory := factory // // Separate podFactory for different versions in npm and npm lite. + // npm-lite -> daemon set will listen to pods only in its own node + if config.Toggles.EnableNPMLite { + podFactory = informers.NewSharedInformerFactoryWithOptions( + clientset, + resyncPeriod, + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + // Use field selector to filter pods based on their assigned node + klog.Infof("NPM agent is listening to pods only under its node") + options.FieldSelector = "spec.nodeName=" + models.GetNodeName() + }), + ) + } - k8sServerVersion := k8sServerVersion(clientset) + logLevel := config.LogLevel + if logLevel == "" { + logLevel = npmconfig.DefaultConfig.LogLevel + } + err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata(), logLevel) + if err != nil { + klog.Infof("CreateTelemetryHandle failed with error %v. AITelemetry is not initialized.", err) + } var dp dataplane.GenericDataplane stopChannel := wait.NeverStop if config.Toggles.EnableV2NPM { // update the dataplane config + npmV2DataplaneCfg.EnableNPMLite = config.Toggles.EnableNPMLite + npmV2DataplaneCfg.MaxBatchedACLsPerPod = config.MaxBatchedACLsPerPod npmV2DataplaneCfg.NetPolInBackground = config.Toggles.NetPolInBackground @@ -181,11 +208,9 @@ func start(config npmconfig.Config, flags npmconfig.Flags) error { } dp.RunPeriodicTasks() } - npMgr := npm.NewNetworkPolicyManager(config, factory, dp, exec.New(), version, k8sServerVersion) - err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata()) - if err != nil { - klog.Infof("CreateTelemetryHandle failed with error %v. AITelemetry is not initialized.", err) - } + + k8sServerVersion := k8sServerVersion(clientset) + npMgr := npm.NewNetworkPolicyManager(config, factory, podFactory, dp, exec.New(), version, k8sServerVersion) go restserver.NPMRestServerListenAndServe(config, npMgr) diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index d0af8f276e..1067ff0591 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -94,7 +94,11 @@ func startDaemon(config npmconfig.Config) error { return fmt.Errorf("failed to create dataplane: %w", err) } - err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata()) + logLevel := config.LogLevel + if logLevel == "" { + logLevel = npmconfig.DefaultConfig.LogLevel + } + err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata(), logLevel) if err != nil { klog.Infof("CreateTelemetryHandle failed with error %v. AITelemetry is not initialized.", err) } diff --git a/npm/cmd/start_server.go b/npm/cmd/start_server.go index 6137902d30..fdd863bbe8 100644 --- a/npm/cmd/start_server.go +++ b/npm/cmd/start_server.go @@ -113,7 +113,11 @@ func startControlplane(config npmconfig.Config, flags npmconfig.Flags) error { return fmt.Errorf("failed to create NPM controlplane manager: %w", err) } - err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata()) + logLevel := config.LogLevel + if logLevel == "" { + logLevel = npmconfig.DefaultConfig.LogLevel + } + err = metrics.CreateTelemetryHandle(config.NPMVersion(), version, npm.GetAIMetadata(), logLevel) if err != nil { klog.Infof("CreateTelemetryHandle failed with error %v. AITelemetry is not initialized.", err) } diff --git a/npm/config/config.go b/npm/config/config.go index 0bd45a35d7..c0a592c969 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -51,7 +51,11 @@ var DefaultConfig = Config{ ApplyInBackground: true, // NetPolInBackground is currently used in Linux to apply NetPol controller Add events in the background NetPolInBackground: true, + EnableNPMLite: false, }, + + // Setting LogLevel to "info" by default. Set to "debug" to get application insight logs (creates a listener that outputs diagnosticMessageWriter logs). + LogLevel: "info", } type GrpcServerConfig struct { @@ -81,6 +85,7 @@ type Config struct { MaxPendingNetPols int `json:"MaxPendingNetPols,omitempty"` NetPolInvervalInMilliseconds int `json:"NetPolInvervalInMilliseconds,omitempty"` Toggles Toggles `json:"Toggles,omitempty"` + LogLevel string `json:"LogLevel,omitempty"` } type Toggles struct { @@ -94,6 +99,7 @@ type Toggles struct { ApplyInBackground bool // NetPolInBackground NetPolInBackground bool + EnableNPMLite bool } type Flags struct { diff --git a/npm/controller/server.go b/npm/controller/server.go index 5ddc2d0c05..8980d8335e 100644 --- a/npm/controller/server.go +++ b/npm/controller/server.go @@ -91,7 +91,7 @@ func NewNetworkPolicyServer( n.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*common.Namespace)} n.PodControllerV2 = controllersv2.NewPodController(n.PodInformer, dp, n.NpmNamespaceCacheV2) n.NamespaceControllerV2 = controllersv2.NewNamespaceController(n.NsInformer, dp, n.NpmNamespaceCacheV2) - n.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(n.NpInformer, dp) + n.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(n.NpInformer, dp, config.Toggles.EnableNPMLite) return n, nil } diff --git a/npm/examples/azure-npm-lite.yaml b/npm/examples/azure-npm-lite.yaml new file mode 100644 index 0000000000..cbd8666536 --- /dev/null +++ b/npm/examples/azure-npm-lite.yaml @@ -0,0 +1,177 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + azure.npm/scrapeable: "" + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.45.3 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: false + capabilities: + add: + - NET_ADMIN + readOnlyRootFilesystem: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + - name: tmp + mountPath: /tmp + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + - name: tmp + emptyDir: {} + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "ApplyIntervalInMilliseconds": 500, + "ApplyMaxBatches": 100, + "MaxBatchedACLsPerPod": 30, + "NetPolInvervalInMilliseconds": 500, + "MaxPendingNetPols": 100, + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": true, + "PlaceAzureChainFirst": false, + "ApplyInBackground": true, + "NetPolInBackground": true, + "EnableNPMLite": true + } + } diff --git a/npm/examples/windows/azure-npm-lite-win.yaml b/npm/examples/windows/azure-npm-lite-win.yaml new file mode 100644 index 0000000000..12552cb45a --- /dev/null +++ b/npm/examples/windows/azure-npm-lite-win.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm-win + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + azure.npm/scrapeable: "" + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.5.5 + command: ["powershell.exe"] + args: + [ + '$env:CONTAINER_SANDBOX_MOUNT_POINT/npm.exe', + "start", + ] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: .\\etc\\azure-npm\\azure-npm.json + volumeMounts: + - name: azure-npm-config + mountPath: .\\etc\\azure-npm + nodeSelector: + kubernetes.io/os: windows + volumes: + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "ApplyIntervalInMilliseconds": 500, + "ApplyMaxBatches": 100, + "MaxBatchedACLsPerPod": 30, + "NetPolInvervalInMilliseconds": 500, + "MaxPendingNetPols": 100, + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": true, + "PlaceAzureChainFirst": false, + "ApplyIPSetsOnNeed": false, + "ApplyInBackground": true, + "NetPolInBackground": true, + "EnableNPMLite": true + } + } diff --git a/npm/iptm/iptm_test.go b/npm/iptm/iptm_test.go index 912be83495..630035f86e 100644 --- a/npm/iptm/iptm_test.go +++ b/npm/iptm/iptm_test.go @@ -13,95 +13,103 @@ import ( var ( initCalls = []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, // NOTE the following grep call stdouts are misleading. The first grep returns 3, and the second one returns "" (i.e. line 0) // a fix is coming for fakeexec stdout and exit code problems from piping commands (e.g. what we do with grep) - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL {Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "4 "}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL {Cmd: []string{"grep", "AZURE-NPM"}, Stdout: "4 "}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-INGRESS"}}, // broken here - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-and-EGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "ACCEPT-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-on-connection-state"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "MARK", "--set-mark", "0x0", "-m", "comment", "--comment", "Clear-AZURE-NPM-MARKS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-All-packets"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-INGRESS"}}, // broken here + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-and-EGRESS-mark-0x3000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "ACCEPT-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-on-connection-state"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "MARK", "--set-mark", "0x0", "-m", "comment", "--comment", "Clear-AZURE-NPM-MARKS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-All-packets"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, // ///////// - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "AZURE-NPM-INGRESS-FROM", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "AZURE-NPM-EGRESS-TO", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "AZURE-NPM-INGRESS-FROM", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{ + "iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000", + }}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "AZURE-NPM-EGRESS-TO", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{ + "iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000", + }}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, } initWithJumpToAzureAtTopCalls = []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-INGRESS"}}, // broken here - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-and-EGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "ACCEPT-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-on-connection-state"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "MARK", "--set-mark", "0x0", "-m", "comment", "--comment", "Clear-AZURE-NPM-MARKS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-All-packets"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-INGRESS"}}, // broken here + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-and-EGRESS-mark-0x3000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "ACCEPT-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-j", "AZURE-NPM-ACCEPT", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "ACCEPT-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-on-connection-state"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "MARK", "--set-mark", "0x0", "-m", "comment", "--comment", "Clear-AZURE-NPM-MARKS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-ACCEPT", "-j", "ACCEPT", "-m", "comment", "--comment", "ACCEPT-All-packets"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS", "-j", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, // ///////// - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "AZURE-NPM-INGRESS-FROM", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "AZURE-NPM-EGRESS-TO", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-INGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-PORT", "-j", "AZURE-NPM-INGRESS-FROM", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS", "-j", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{ + "iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000", + }}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-PORT", "-j", "AZURE-NPM-EGRESS-TO", "-m", "comment", "--comment", "ALL-JUMP-TO-AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-INGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x2000", "-m", "comment", "--comment", "RETURN-on-INGRESS-mark-0x2000"}}, + {Cmd: []string{ + "iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x3000", "-m", "comment", "--comment", "RETURN-on-EGRESS-and-INGRESS-mark-0x3000", + }}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "AZURE-NPM-EGRESS-DROPS", "-j", "RETURN", "-m", "mark", "--mark", "0x1000", "-m", "comment", "--comment", "RETURN-on-EGRESS-mark-0x1000"}}, } ) @@ -124,69 +132,69 @@ func TestUninitNpmChains(t *testing.T) { { name: "no v2 npm chains exist", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, {Cmd: []string{"grep", "Chain AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-TARGET-SETS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INRGESS-DROPS"}}, // can we remove this rule now? - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-TARGET-SETS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INRGESS-DROPS"}}, // can we delete this rule now? + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-TARGET-SETS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INRGESS-DROPS"}}, // can we remove this rule now? + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-TARGET-SETS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INRGESS-DROPS"}}, // can we delete this rule now? }, }, { name: " v2 exists chian exists", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, {Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: "Chain AZURE-NPM-INGRESS-ALLOW-MARK (1 references)\n"}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-TARGET-SETS"}}, - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INRGESS-DROPS"}}, // can we remove this rule now? - {Cmd: []string{"iptables", "-w", "60", "-F", "AZURE-NPM-INGRESS-ALLOW-MARK"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-ACCEPT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-FROM"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-PORT"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-TO"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-EGRESS-DROPS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-TARGET-SETS"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INRGESS-DROPS"}}, // can we delete this rule now? - {Cmd: []string{"iptables", "-w", "60", "-X", "AZURE-NPM-INGRESS-ALLOW-MARK"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-TARGET-SETS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INRGESS-DROPS"}}, // can we remove this rule now? + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM-INGRESS-ALLOW-MARK"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-FROM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-PORT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-TO"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-EGRESS-DROPS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-TARGET-SETS"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INRGESS-DROPS"}}, // can we delete this rule now? + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "AZURE-NPM-INGRESS-ALLOW-MARK"}}, }, }, // currently can't test multiple v2 chains existing because AllCurrentAzureChains() returns a map and fexec needs the exact order of commands @@ -228,9 +236,9 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "add missing jump to azure at top", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, // "rule does not exist" - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, // "rule does not exist" + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, }, @@ -239,11 +247,11 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "add missing jump to azure after kube services", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT - {Cmd: []string{"grep", "KUBE-SERVICES"}, ExitCode: 1}, // THIS IS THE EXIT CODE FOR CHECK command below ("rule doesn't exist") - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"grep", "KUBE-SERVICES"}, ExitCode: 1}, // THIS IS THE EXIT CODE FOR CHECK command below ("rule doesn't exist") + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, }, @@ -252,9 +260,9 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "jump to azure already at top", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "1 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "1 "}, // THIS IS THE GREP CALL STDOUT {Cmd: []string{"grep", "AZURE-NPM"}}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, @@ -264,11 +272,11 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "jump to azure already after kube services", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT {Cmd: []string{"grep", "KUBE-SERVICES"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, Stdout: "4 "}, // THIS IS THE GREP CALL STDOUT - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, Stdout: "4 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}}, {Cmd: []string{"grep", "AZURE-NPM"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, @@ -278,12 +286,12 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "move jump to azure to top", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "5 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "5 "}, // THIS IS THE GREP CALL STDOUT {Cmd: []string{"grep", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, }, @@ -292,14 +300,14 @@ func TestCheckAndAddForwardChain(t *testing.T) { name: "move jump to azure after kube services", args: args{ calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "AZURE-NPM"}}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, Stdout: "3 "}, // THIS IS THE GREP CALL STDOUT {Cmd: []string{"grep", "KUBE-SERVICES"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, Stdout: "2 "}, // THIS IS THE GREP CALL STDOUT - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, Stdout: "2 "}, // THIS IS THE GREP CALL STDOUT + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}}, {Cmd: []string{"grep", "AZURE-NPM"}}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, }, @@ -319,7 +327,7 @@ func TestCheckAndAddForwardChain(t *testing.T) { func TestExists(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "ACCEPT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "ACCEPT"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -341,7 +349,7 @@ func TestExists(t *testing.T) { func TestAddChain(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "TEST-CHAIN"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "TEST-CHAIN"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -355,8 +363,8 @@ func TestAddChain(t *testing.T) { func TestDeleteChain(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "TEST-CHAIN"}}, - {Cmd: []string{"iptables", "-w", "60", "-X", "TEST-CHAIN"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "TEST-CHAIN"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-X", "TEST-CHAIN"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -374,7 +382,7 @@ func TestDeleteChain(t *testing.T) { func TestAdd(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "REJECT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "REJECT"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -420,9 +428,9 @@ func testPrometheusMetrics(t *testing.T, expectedNumACLRules, expectedExecCount func TestDelete(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "REJECT"}}, - {Cmd: []string{"iptables", "-w", "60", "-C", "FORWARD", "-j", "REJECT"}}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "REJECT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "REJECT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-C", "FORWARD", "-j", "REJECT"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "REJECT"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -450,7 +458,7 @@ func TestDelete(t *testing.T) { func TestRun(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-N", "TEST-CHAIN"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-N", "TEST-CHAIN"}}, } fexec := testutils.GetFakeExecWithScripts(calls) @@ -508,7 +516,7 @@ func TestGetChainLineNumber(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-t", "filter", "-n", "--list", "FORWARD", "--line-numbers"}, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, Stdout: tt.stdout, ExitCode: tt.exitCode}, } fexec := testutils.GetFakeExecWithScripts(calls) diff --git a/npm/linux.Dockerfile b/npm/linux.Dockerfile index ef03905016..3f5d05f8f2 100644 --- a/npm/linux.Dockerfile +++ b/npm/linux.Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23-azurelinux3.0 AS builder ARG VERSION ARG NPM_AI_PATH ARG NPM_AI_ID @@ -6,8 +6,8 @@ WORKDIR /usr/local/src COPY . . RUN CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm -ldflags "-X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go -FROM mcr.microsoft.com/mirror/docker/library/ubuntu:20.04 as linux +FROM mcr.microsoft.com/mirror/docker/library/ubuntu:24.04 as linux COPY --from=builder /usr/local/bin/azure-npm /usr/bin/azure-npm -RUN apt-get update && apt-get install -y iptables ipset ca-certificates && apt-get autoremove -y && apt-get clean +RUN apt-get update && apt-get install -y libsystemd0=255.4-1ubuntu8.8 libudev1=255.4-1ubuntu8.8 libpam-modules=1.5.3-5ubuntu5.4 libpam-modules-bin=1.5.3-5ubuntu5.4 libpam-runtime=1.5.3-5ubuntu5.4 libpam0g=1.5.3-5ubuntu5.4 iptables ipset ca-certificates && apt-get autoremove -y && apt-get clean RUN chmod +x /usr/bin/azure-npm ENTRYPOINT ["/usr/bin/azure-npm", "start"] diff --git a/npm/metrics/ai-utils.go b/npm/metrics/ai-utils.go index 20de3009ff..b6b15f1422 100644 --- a/npm/metrics/ai-utils.go +++ b/npm/metrics/ai-utils.go @@ -3,6 +3,7 @@ package metrics import ( "fmt" "strconv" + "strings" "time" "github.com/Azure/azure-container-networking/aitelemetry" @@ -11,6 +12,8 @@ import ( "k8s.io/klog" ) +const telemetryCloseWaitTimeSeconds = 10 + var ( th aitelemetry.TelemetryHandle npmVersion int @@ -19,15 +22,18 @@ var ( ) // CreateTelemetryHandle creates a handler to initialize AI telemetry -func CreateTelemetryHandle(npmVersionNum int, imageVersion, aiMetadata string) error { +func CreateTelemetryHandle(npmVersionNum int, imageVersion, aiMetadata, logLevel string) error { npmVersion = npmVersionNum + debugMode := strings.EqualFold(logLevel, "debug") + klog.Infof("LogLevel is %s. Debugmode is set to %v.", logLevel, debugMode) + aiConfig := aitelemetry.AIConfig{ AppName: util.AzureNpmFlag, AppVersion: imageVersion, BatchSize: util.BatchSizeInBytes, BatchInterval: util.BatchIntervalInSecs, RefreshTimeout: util.RefreshTimeoutInSecs, - DebugMode: util.DebugMode, + DebugMode: debugMode, GetEnvRetryCount: util.GetEnvRetryCount, GetEnvRetryWaitTimeInSecs: util.GetEnvRetryWaitTimeInSecs, } @@ -54,6 +60,15 @@ func CreateTelemetryHandle(npmVersionNum int, imageVersion, aiMetadata string) e return nil } +// Close cleans up the telemetry handle, which effectively waits for all telemetry data to be sent +func Close() { + if th == nil { + return + } + + th.Close(telemetryCloseWaitTimeSeconds) +} + // SendErrorLogAndMetric sends a metric through AI telemetry and sends a log to the Kusto Messages table func SendErrorLogAndMetric(operationID int, format string, args ...interface{}) { // Send error metrics diff --git a/npm/npm.go b/npm/npm.go index ef3554aa55..914809b437 100644 --- a/npm/npm.go +++ b/npm/npm.go @@ -36,6 +36,8 @@ type NetworkPolicyManager struct { Dataplane dataplane.GenericDataplane + NpmLiteToggle bool + // ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management // and uses lock to avoid unintentional race condictions in IpsetManager. ipsMgr *ipsm.IpsetManager @@ -58,6 +60,7 @@ type NetworkPolicyManager struct { // NewNetworkPolicyManager creates a NetworkPolicyManager func NewNetworkPolicyManager(config npmconfig.Config, informerFactory informers.SharedInformerFactory, + podFactory informers.SharedInformerFactory, dp dataplane.GenericDataplane, exec utilexec.Interface, npmVersion string, @@ -65,13 +68,15 @@ func NewNetworkPolicyManager(config npmconfig.Config, klog.Infof("API server version: %+v AI metadata %+v", k8sServerVersion, aiMetadata) npMgr := &NetworkPolicyManager{ - config: config, - Dataplane: dp, + config: config, + Dataplane: dp, + NpmLiteToggle: config.Toggles.EnableNPMLite, Informers: models.Informers{ - InformerFactory: informerFactory, - PodInformer: informerFactory.Core().V1().Pods(), - NsInformer: informerFactory.Core().V1().Namespaces(), - NpInformer: informerFactory.Networking().V1().NetworkPolicies(), + InformerFactory: informerFactory, + PodInformerFactory: podFactory, + PodInformer: podFactory.Core().V1().Pods(), + NsInformer: informerFactory.Core().V1().Namespaces(), + NpInformer: informerFactory.Networking().V1().NetworkPolicies(), }, AzureConfig: models.AzureConfig{ K8sServerVersion: k8sServerVersion, @@ -87,7 +92,7 @@ func NewNetworkPolicyManager(config npmconfig.Config, npMgr.PodControllerV2 = controllersv2.NewPodController(npMgr.PodInformer, dp, npMgr.NpmNamespaceCacheV2) npMgr.NamespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.NsInformer, dp, npMgr.NpmNamespaceCacheV2) // Question(jungukcho): Is config.Toggles.PlaceAzureChainFirst needed for v2? - npMgr.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.NpInformer, dp) + npMgr.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.NpInformer, dp, config.Toggles.EnableNPMLite) return npMgr } @@ -187,6 +192,11 @@ func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan // Starts all informers manufactured by npMgr's informerFactory. npMgr.InformerFactory.Start(stopCh) + // npm lite + if npMgr.NpmLiteToggle { + npMgr.PodInformerFactory.Start(stopCh) + } + // Wait for the initial sync of local cache. if !cache.WaitForCacheSync(stopCh, npMgr.PodInformer.Informer().HasSynced) { return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure) diff --git a/npm/pkg/controlplane/controllers/v2/namespaceController.go b/npm/pkg/controlplane/controllers/v2/namespaceController.go index ca85ea3958..654609ced2 100644 --- a/npm/pkg/controlplane/controllers/v2/namespaceController.go +++ b/npm/pkg/controlplane/controllers/v2/namespaceController.go @@ -164,14 +164,17 @@ func (nsc *NamespaceController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer nsc.workqueue.ShutDown() - klog.Info("Starting Namespace controller\n") - klog.Info("Starting workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Starting Namespace controller\n") + // klog.Info("Starting workers") // Launch workers to process namespace resources go wait.Until(nsc.runWorker, time.Second, stopCh) - klog.Info("Started workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Started workers") <-stopCh - klog.Info("Shutting down workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Shutting down workers") } func (nsc *NamespaceController) runWorker() { @@ -209,7 +212,8 @@ func (nsc *NamespaceController) processNextWorkItem() bool { // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. nsc.workqueue.Forget(obj) - klog.Infof("Successfully synced '%s'", key) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Successfully synced '%s'", key) return nil }(obj) if err != nil { @@ -321,7 +325,8 @@ func (nsc *NamespaceController) syncAddNamespace(nsObj *corev1.Namespace) error // Add the namespace to its label's ipset list. for nsLabelKey, nsLabelVal := range nsObj.ObjectMeta.Labels { nsLabelKeyValue := util.GetIpSetFromLabelKV(nsLabelKey, nsLabelVal) - klog.Infof("Adding namespace %s to ipset list %s and %s", nsObj.ObjectMeta.Name, nsLabelKey, nsLabelKeyValue) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding namespace %s to ipset list %s and %s", nsObj.ObjectMeta.Name, nsLabelKey, nsLabelKeyValue) labelIPSets := []*ipsets.IPSetMetadata{ ipsets.NewIPSetMetadata(nsLabelKey, ipsets.KeyLabelOfNamespace), ipsets.NewIPSetMetadata(nsLabelKeyValue, ipsets.KeyValueLabelOfNamespace), @@ -344,7 +349,8 @@ func (nsc *NamespaceController) syncAddNamespace(nsObj *corev1.Namespace) error func (nsc *NamespaceController) syncUpdateNamespace(newNsObj *corev1.Namespace) (metrics.OperationKind, error) { var err error newNsName, newNsLabel := newNsObj.ObjectMeta.Name, newNsObj.ObjectMeta.Labels - klog.Infof("NAMESPACE UPDATING:\n namespace: [%s/%v]", newNsName, newNsLabel) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("NAMESPACE UPDATING:\n namespace: [%s/%v]", newNsName, newNsLabel) // If previous syncAddNamespace failed for some reasons // before caching npm namespace object or syncUpdateNamespace is called due to namespace creation event, @@ -373,7 +379,8 @@ func (nsc *NamespaceController) syncUpdateNamespace(newNsObj *corev1.Namespace) } toBeRemoved := []*ipsets.IPSetMetadata{ipsets.NewIPSetMetadata(newNsName, ipsets.Namespace)} - klog.Infof("Deleting namespace %s from ipset list %s", newNsName, nsLabelVal) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting namespace %s from ipset list %s", newNsName, nsLabelVal) if err = nsc.dp.RemoveFromList(labelSet, toBeRemoved); err != nil { metrics.SendErrorLogAndMetric(util.NSID, "[UpdateNamespace] Error: failed to delete namespace %s from ipset list %s with err: %v", newNsName, nsLabelVal, err) return metrics.UpdateOp, fmt.Errorf("failed to remove from list during sync update namespace with err %w", err) @@ -389,7 +396,8 @@ func (nsc *NamespaceController) syncUpdateNamespace(newNsObj *corev1.Namespace) // Add the namespace to its label's ipset list. for _, nsLabelVal := range addToIPSets { - klog.Infof("Adding namespace %s to ipset list %s", newNsName, nsLabelVal) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding namespace %s to ipset list %s", newNsName, nsLabelVal) var labelSet []*ipsets.IPSetMetadata if util.IsKeyValueLabelSetName(nsLabelVal) { @@ -422,13 +430,14 @@ func (nsc *NamespaceController) syncUpdateNamespace(newNsObj *corev1.Namespace) // cleanDeletedNamespace handles deleting namespace from ipset. func (nsc *NamespaceController) cleanDeletedNamespace(cachedNsKey string) error { - klog.Infof("NAMESPACE DELETING: [%s]", cachedNsKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("NAMESPACE DELETING: [%s]", cachedNsKey) cachedNsObj, exists := nsc.npmNamespaceCache.NsMap[cachedNsKey] if !exists { return nil } - - klog.Infof("NAMESPACE DELETING cached labels: [%s/%v]", cachedNsKey, cachedNsObj.LabelsMap) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("NAMESPACE DELETING cached labels: [%s/%v]", cachedNsKey, cachedNsObj.LabelsMap) var err error toBeDeletedNs := []*ipsets.IPSetMetadata{ipsets.NewIPSetMetadata(cachedNsKey, ipsets.Namespace)} @@ -436,7 +445,8 @@ func (nsc *NamespaceController) cleanDeletedNamespace(cachedNsKey string) error for nsLabelKey, nsLabelVal := range cachedNsObj.LabelsMap { labelKey := ipsets.NewIPSetMetadata(nsLabelKey, ipsets.KeyLabelOfNamespace) - klog.Infof("Deleting namespace %s from ipset list %s", cachedNsKey, labelKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting namespace %s from ipset list %s", cachedNsKey, labelKey) if err = nsc.dp.RemoveFromList(labelKey, toBeDeletedNs); err != nil { metrics.SendErrorLogAndMetric(util.NSID, "[DeleteNamespace] Error: failed to delete namespace %s from ipset list %s with err: %v", cachedNsKey, labelKey, err) return fmt.Errorf("failed to clean deleted namespace when deleting key with err %w", err) @@ -444,7 +454,8 @@ func (nsc *NamespaceController) cleanDeletedNamespace(cachedNsKey string) error labelIpsetName := util.GetIpSetFromLabelKV(nsLabelKey, nsLabelVal) labelKeyValue := ipsets.NewIPSetMetadata(labelIpsetName, ipsets.KeyValueLabelOfNamespace) - klog.Infof("Deleting namespace %s from ipset list %s", cachedNsKey, labelIpsetName) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting namespace %s from ipset list %s", cachedNsKey, labelIpsetName) if err = nsc.dp.RemoveFromList(labelKeyValue, toBeDeletedNs); err != nil { metrics.SendErrorLogAndMetric(util.NSID, "[DeleteNamespace] Error: failed to delete namespace %s from ipset list %s with err: %v", cachedNsKey, labelIpsetName, err) return fmt.Errorf("failed to clean deleted namespace when deleting key value with err %w", err) diff --git a/npm/pkg/controlplane/controllers/v2/networkPolicyController.go b/npm/pkg/controlplane/controllers/v2/networkPolicyController.go index c8baf98efd..31ed83b506 100644 --- a/npm/pkg/controlplane/controllers/v2/networkPolicyController.go +++ b/npm/pkg/controlplane/controllers/v2/networkPolicyController.go @@ -31,10 +31,11 @@ var ( type NetworkPolicyController struct { sync.RWMutex - netPolLister netpollister.NetworkPolicyLister - workqueue workqueue.RateLimitingInterface - rawNpSpecMap map[string]*networkingv1.NetworkPolicySpec // Key is / - dp dataplane.GenericDataplane + netPolLister netpollister.NetworkPolicyLister + workqueue workqueue.RateLimitingInterface + rawNpSpecMap map[string]*networkingv1.NetworkPolicySpec // Key is / + dp dataplane.GenericDataplane + npmLiteToggle bool } func (c *NetworkPolicyController) GetCache() map[string]*networkingv1.NetworkPolicySpec { @@ -43,12 +44,13 @@ func (c *NetworkPolicyController) GetCache() map[string]*networkingv1.NetworkPol return c.rawNpSpecMap } -func NewNetworkPolicyController(npInformer networkinginformers.NetworkPolicyInformer, dp dataplane.GenericDataplane) *NetworkPolicyController { +func NewNetworkPolicyController(npInformer networkinginformers.NetworkPolicyInformer, dp dataplane.GenericDataplane, npmLiteToggle bool) *NetworkPolicyController { netPolController := &NetworkPolicyController{ - netPolLister: npInformer.Lister(), - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NetworkPolicy"), - rawNpSpecMap: make(map[string]*networkingv1.NetworkPolicySpec), - dp: dp, + netPolLister: npInformer.Lister(), + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NetworkPolicy"), + rawNpSpecMap: make(map[string]*networkingv1.NetworkPolicySpec), + dp: dp, + npmLiteToggle: npmLiteToggle, } npInformer.Informer().AddEventHandler( @@ -146,12 +148,15 @@ func (c *NetworkPolicyController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - klog.Infof("Starting Network Policy worker") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Starting Network Policy worker") go wait.Until(c.runWorker, time.Second, stopCh) - klog.Infof("Started Network Policy worker") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Started Network Policy worker") <-stopCh - klog.Info("Shutting down Network Policy workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Shutting down Network Policy workers") } func (c *NetworkPolicyController) runWorker() { @@ -188,7 +193,8 @@ func (c *NetworkPolicyController) processNextWorkItem() bool { // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) - klog.Infof("Successfully synced '%s'", key) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Successfully synced '%s'", key) return nil }(obj) if err != nil { @@ -283,7 +289,7 @@ func (c *NetworkPolicyController) syncAddAndUpdateNetPol(netPolObj *networkingv1 } // install translated rules into kernel - npmNetPolObj, err := translation.TranslatePolicy(netPolObj) + npmNetPolObj, err := translation.TranslatePolicy(netPolObj, c.npmLiteToggle) if err != nil { if isUnsupportedWindowsTranslationErr(err) { klog.Warningf("NetworkPolicy %s in namespace %s is not translated because it has unsupported translated features of Windows: %s", diff --git a/npm/pkg/controlplane/controllers/v2/networkPolicyController_test.go b/npm/pkg/controlplane/controllers/v2/networkPolicyController_test.go index 32fadf21f6..d14f6f67f1 100644 --- a/npm/pkg/controlplane/controllers/v2/networkPolicyController_test.go +++ b/npm/pkg/controlplane/controllers/v2/networkPolicyController_test.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-container-networking/npm/metrics/promutil" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" dpmocks "github.com/Azure/azure-container-networking/npm/pkg/dataplane/mocks" + "github.com/Azure/azure-container-networking/npm/util" gomock "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -32,7 +33,8 @@ type netPolFixture struct { kubeobjects []runtime.Object netPolController *NetworkPolicyController - kubeInformer kubeinformers.SharedInformerFactory + + kubeInformer kubeinformers.SharedInformerFactory } func newNetPolFixture(t *testing.T) *netPolFixture { @@ -44,11 +46,11 @@ func newNetPolFixture(t *testing.T) *netPolFixture { return f } -func (f *netPolFixture) newNetPolController(_ chan struct{}, dp dataplane.GenericDataplane) { +func (f *netPolFixture) newNetPolController(_ chan struct{}, dp dataplane.GenericDataplane, npmLiteToggle bool) { kubeclient := k8sfake.NewSimpleClientset(f.kubeobjects...) f.kubeInformer = kubeinformers.NewSharedInformerFactory(kubeclient, noResyncPeriodFunc()) - f.netPolController = NewNetworkPolicyController(f.kubeInformer.Networking().V1().NetworkPolicies(), dp) + f.netPolController = NewNetworkPolicyController(f.kubeInformer.Networking().V1().NetworkPolicies(), dp, npmLiteToggle) for _, netPol := range f.netPolLister { err := f.kubeInformer.Networking().V1().NetworkPolicies().Informer().GetIndexer().Add(netPol) @@ -74,6 +76,11 @@ func createNetPol() *networkingv1.NetworkPolicy { Namespace: "test-nwpolicy", }, Spec: networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ { From: []networkingv1.NetworkPolicyPeer{ @@ -111,6 +118,54 @@ func createNetPol() *networkingv1.NetworkPolicy { } } +func createNetPolNpmLite() *networkingv1.NetworkPolicy { + tcp := corev1.ProtocolTCP + port8000 := intstr.FromInt(8000) + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-ingress", + Namespace: "test-nwpolicy", + }, + Spec: networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: &tcp, + Port: &port8000, + }}, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: &tcp, + Port: &intstr.IntOrString{IntVal: 8000}, // namedPort + }}, + }, + }, + }, + } +} + func addNetPol(f *netPolFixture, netPolObj *networkingv1.NetworkPolicy) { // simulate "network policy" add event and add network policy object to sharedInformer cache f.netPolController.addNetworkPolicy(netPolObj) @@ -247,19 +302,28 @@ func TestAddMultipleNetworkPolicies(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) - - dp.EXPECT().UpdatePolicy(gomock.Any()).Times(2) + f.newNetPolController(stopCh, dp, false) + var testCases []expectedNetPolValues + + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + // named ports are not allowed on windows + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(2) + testCases = []expectedNetPolValues{ + {2, 0, netPolPromVals{2, 2, 0, 0}}, + } + } addNetPol(f, netPolObj1) addNetPol(f, netPolObj2) // already exists (will be a no-op) addNetPol(f, netPolObj1) - testCases := []expectedNetPolValues{ - {2, 0, netPolPromVals{2, 2, 0, 0}}, - } checkNetPolTestResult("TestAddMultipleNetPols", f, testCases) } @@ -275,19 +339,55 @@ func TestAddNetworkPolicy(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, false) + + var testCases []expectedNetPolValues + + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + // named ports are not allowed on windows + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) + + testCases = []expectedNetPolValues{ + {1, 0, netPolPromVals{1, 1, 0, 0}}, + } + } + addNetPol(f, netPolObj) + + checkNetPolTestResult("TestAddNetPol", f, testCases) +} + +func TestAddNetworkPolicyWithNumericPort(t *testing.T) { + netPolObj := createNetPol() + netPolObj.Spec.Egress[0].Ports[0].Port = &intstr.IntOrString{IntVal: 8000} + f := newNetPolFixture(t) + f.netPolLister = append(f.netPolLister, netPolObj) + f.kubeobjects = append(f.kubeobjects, netPolObj) + stopCh := make(chan struct{}) + defer close(stopCh) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dp := dpmocks.NewMockGenericDataplane(ctrl) + f.newNetPolController(stopCh, dp, false) + + var testCases []expectedNetPolValues dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) addNetPol(f, netPolObj) - testCases := []expectedNetPolValues{ + testCases = []expectedNetPolValues{ {1, 0, netPolPromVals{1, 1, 0, 0}}, } checkNetPolTestResult("TestAddNetPol", f, testCases) } -func TestDeleteNetworkPolicy(t *testing.T) { +func TestAddNetworkPolicyWithNPMLite_Failure(t *testing.T) { netPolObj := createNetPol() f := newNetPolFixture(t) @@ -299,15 +399,74 @@ func TestDeleteNetworkPolicy(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, true) + + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + + addNetPol(f, netPolObj) + testCases := []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + + checkNetPolTestResult("TestAddNetPol", f, testCases) +} + +func TestAddNetworkPolicyWithNPMLite(t *testing.T) { + netPolObj := createNetPolNpmLite() + + f := newNetPolFixture(t) + f.netPolLister = append(f.netPolLister, netPolObj) + f.kubeobjects = append(f.kubeobjects, netPolObj) + stopCh := make(chan struct{}) + defer close(stopCh) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dp := dpmocks.NewMockGenericDataplane(ctrl) + f.newNetPolController(stopCh, dp, true) dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) - dp.EXPECT().RemovePolicy(gomock.Any()).Times(1) - deleteNetPol(t, f, netPolObj, DeletedFinalStateknownObject) + addNetPol(f, netPolObj) testCases := []expectedNetPolValues{ - {0, 0, netPolPromVals{0, 1, 0, 1}}, + {1, 0, netPolPromVals{1, 1, 0, 0}}, } + + checkNetPolTestResult("TestAddNetPol", f, testCases) +} + +func TestDeleteNetworkPolicy(t *testing.T) { + netPolObj := createNetPol() + + f := newNetPolFixture(t) + f.netPolLister = append(f.netPolLister, netPolObj) + f.kubeobjects = append(f.kubeobjects, netPolObj) + stopCh := make(chan struct{}) + defer close(stopCh) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dp := dpmocks.NewMockGenericDataplane(ctrl) + f.newNetPolController(stopCh, dp, false) + + var testCases []expectedNetPolValues + + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + dp.EXPECT().RemovePolicy(gomock.Any()).Times(0) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) + dp.EXPECT().RemovePolicy(gomock.Any()).Times(1) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 1, 0, 1}}, + } + } + deleteNetPol(t, f, netPolObj, DeletedFinalStateknownObject) checkNetPolTestResult("TestDelNetPol", f, testCases) } @@ -323,7 +482,7 @@ func TestDeleteNetworkPolicyWithTombstone(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, false) netPolKey := getKey(netPolObj, t) tombstone := cache.DeletedFinalStateUnknown{ @@ -350,15 +509,27 @@ func TestDeleteNetworkPolicyWithTombstoneAfterAddingNetworkPolicy(t *testing.T) defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, false) - dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) - dp.EXPECT().RemovePolicy(gomock.Any()).Times(1) + var testCases []expectedNetPolValues - deleteNetPol(t, f, netPolObj, DeletedFinalStateUnknownObject) - testCases := []expectedNetPolValues{ - {0, 0, netPolPromVals{0, 1, 0, 1}}, + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + dp.EXPECT().RemovePolicy(gomock.Any()).Times(0) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) + dp.EXPECT().RemovePolicy(gomock.Any()).Times(1) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 1, 0, 1}}, + } } + deleteNetPol(t, f, netPolObj, DeletedFinalStateUnknownObject) + checkNetPolTestResult("TestDeleteNetworkPolicyWithTombstoneAfterAddingNetworkPolicy", f, testCases) } @@ -376,18 +547,29 @@ func TestUpdateNetworkPolicy(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, false) newNetPolObj := oldNetPolObj.DeepCopy() // oldNetPolObj.ResourceVersion value is "0" newRV, _ := strconv.Atoi(oldNetPolObj.ResourceVersion) newNetPolObj.ResourceVersion = fmt.Sprintf("%d", newRV+1) - dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) + var testCases []expectedNetPolValues - updateNetPol(t, f, oldNetPolObj, newNetPolObj) - testCases := []expectedNetPolValues{ - {1, 0, netPolPromVals{1, 1, 0, 0}}, + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(1) + + testCases = []expectedNetPolValues{ + {1, 0, netPolPromVals{1, 1, 0, 0}}, + } } + updateNetPol(t, f, oldNetPolObj, newNetPolObj) + checkNetPolTestResult("TestUpdateNetPol", f, testCases) } @@ -403,7 +585,7 @@ func TestLabelUpdateNetworkPolicy(t *testing.T) { defer ctrl.Finish() dp := dpmocks.NewMockGenericDataplane(ctrl) - f.newNetPolController(stopCh, dp) + f.newNetPolController(stopCh, dp, false) newNetPolObj := oldNetPolObj.DeepCopy() // update podSelctor in a new network policy field @@ -416,12 +598,23 @@ func TestLabelUpdateNetworkPolicy(t *testing.T) { // oldNetPolObj.ResourceVersion value is "0" newRV, _ := strconv.Atoi(oldNetPolObj.ResourceVersion) newNetPolObj.ResourceVersion = fmt.Sprintf("%d", newRV+1) - dp.EXPECT().UpdatePolicy(gomock.Any()).Times(2) - updateNetPol(t, f, oldNetPolObj, newNetPolObj) + var testCases []expectedNetPolValues - testCases := []expectedNetPolValues{ - {1, 0, netPolPromVals{1, 1, 1, 0}}, + if util.IsWindowsDP() { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(0) + + testCases = []expectedNetPolValues{ + {0, 0, netPolPromVals{0, 0, 0, 0}}, + } + } else { + dp.EXPECT().UpdatePolicy(gomock.Any()).Times(2) + + testCases = []expectedNetPolValues{ + {1, 0, netPolPromVals{1, 1, 1, 0}}, + } } + updateNetPol(t, f, oldNetPolObj, newNetPolObj) + checkNetPolTestResult("TestUpdateNetPol", f, testCases) } diff --git a/npm/pkg/controlplane/controllers/v2/podController.go b/npm/pkg/controlplane/controllers/v2/podController.go index 47b4f0c296..3a3e193058 100644 --- a/npm/pkg/controlplane/controllers/v2/podController.go +++ b/npm/pkg/controlplane/controllers/v2/podController.go @@ -170,7 +170,8 @@ func (c *PodController) deletePod(obj interface{}) { } } - klog.Infof("[POD DELETE EVENT] for %s in %s", podObj.Name, podObj.Namespace) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[POD DELETE EVENT] for %s in %s", podObj.Name, podObj.Namespace) if isHostNetworkPod(podObj) { return } @@ -191,12 +192,15 @@ func (c *PodController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - klog.Infof("Starting Pod worker") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Starting Pod worker") go wait.Until(c.runWorker, time.Second, stopCh) - klog.Info("Started Pod workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Started Pod workers") <-stopCh - klog.Info("Shutting down Pod workers") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("Shutting down Pod workers") } func (c *PodController) runWorker() { @@ -234,7 +238,8 @@ func (c *PodController) processNextWorkItem() bool { // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) - klog.Infof("Successfully synced '%s'", key) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Successfully synced '%s'", key) return nil }(obj) if err != nil { @@ -347,8 +352,9 @@ func (c *PodController) syncPod(key string) error { } func (c *PodController) syncAddedPod(podObj *corev1.Pod) error { - klog.Infof("POD CREATING: [%s/%s/%s/%s/%+v/%s]", string(podObj.GetUID()), podObj.Namespace, - podObj.Name, podObj.Spec.NodeName, podObj.Labels, podObj.Status.PodIP) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("POD CREATING: [%s/%s/%s/%s/%+v/%s]", string(podObj.GetUID()), podObj.Namespace, + // podObj.Name, podObj.Spec.NodeName, podObj.Labels, podObj.Status.PodIP) if !util.IsIPV4(podObj.Status.PodIP) { msg := fmt.Sprintf("[syncAddedPod] warning: ADD POD [%s/%s/%s/%+v] ignored as the PodIP is not valid ipv4 address. ip: [%s]", podObj.Namespace, @@ -369,7 +375,8 @@ func (c *PodController) syncAddedPod(podObj *corev1.Pod) error { namespaceSet := []*ipsets.IPSetMetadata{ipsets.NewIPSetMetadata(podObj.Namespace, ipsets.Namespace)} // Add the pod ip information into namespace's ipset. - klog.Infof("Adding pod %s (ip : %s) to ipset %s", podKey, podObj.Status.PodIP, podObj.Namespace) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding pod %s (ip : %s) to ipset %s", podKey, podObj.Status.PodIP, podObj.Namespace) if err = c.dp.AddToSets(namespaceSet, podMetadata); err != nil { return fmt.Errorf("[syncAddedPod] Error: failed to add pod to namespace ipset with err: %w", err) } @@ -387,8 +394,9 @@ func (c *PodController) syncAddedPod(podObj *corev1.Pod) error { targetSetKeyValue := ipsets.NewIPSetMetadata(labelKeyValue, ipsets.KeyValueLabelOfPod) allSets := []*ipsets.IPSetMetadata{targetSetKey, targetSetKeyValue} - klog.Infof("Creating ipsets %+v and %+v if they do not exist", targetSetKey, targetSetKeyValue) - klog.Infof("Adding pod %s (ip : %s) to ipset %s and %s", podKey, npmPodObj.PodIP, labelKey, labelKeyValue) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Creating ipsets %+v and %+v if they do not exist", targetSetKey, targetSetKeyValue) + // klog.Infof("Adding pod %s (ip : %s) to ipset %s and %s", podKey, npmPodObj.PodIP, labelKey, labelKeyValue) if err = c.dp.AddToSets(allSets, podMetadata); err != nil { return fmt.Errorf("[syncAddedPod] Error: failed to add pod to label ipset with err: %w", err) } @@ -396,7 +404,8 @@ func (c *PodController) syncAddedPod(podObj *corev1.Pod) error { } // Add pod's named ports from its ipset. - klog.Infof("Adding named port ipsets") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding named port ipsets") containerPorts := common.GetContainerPortList(podObj) if err = c.manageNamedPortIpsets(containerPorts, podKey, npmPodObj.PodIP, podObj.Spec.NodeName, addNamedPort); err != nil { return fmt.Errorf("[syncAddedPod] Error: failed to add pod to named port ipset with err: %w", err) @@ -430,7 +439,8 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper c.npmNamespaceCache.Unlock() cachedNpmPod, exists := c.podMap[podKey] - klog.Infof("[syncAddAndUpdatePod] updating Pod with key %s", podKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[syncAddAndUpdatePod] updating Pod with key %s", podKey) // No cached npmPod exists. start adding the pod in a cache if !exists { return metrics.CreateOp, c.syncAddedPod(newPodObj) @@ -446,15 +456,18 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper // NPM should clean up existing references of cached pod obj and its IP. // then, re-add new pod obj. if cachedNpmPod.PodIP != newPodObj.Status.PodIP { - klog.Infof("Pod (Namespace:%s, Name:%s, newUid:%s), has cachedPodIp:%s which is different from PodIp:%s", - newPodObj.Namespace, newPodObj.Name, string(newPodObj.UID), cachedNpmPod.PodIP, newPodObj.Status.PodIP) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Pod (Namespace:%s, Name:%s, newUid:%s), has cachedPodIp:%s which is different from PodIp:%s", + // newPodObj.Namespace, newPodObj.Name, string(newPodObj.UID), cachedNpmPod.PodIP, newPodObj.Status.PodIP) - klog.Infof("Deleting cached Pod with key:%s first due to IP Mistmatch", podKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting cached Pod with key:%s first due to IP Mistmatch", podKey) if er := c.cleanUpDeletedPod(podKey); er != nil { return metrics.UpdateOp, er } - klog.Infof("Adding back Pod with key:%s after IP Mistmatch", podKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding back Pod with key:%s after IP Mistmatch", podKey) return metrics.UpdateOp, c.syncAddedPod(newPodObj) } @@ -468,7 +481,8 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper cachedPodMetadata := dataplane.NewPodMetadata(podKey, cachedNpmPod.PodIP, newPodMetadata.NodeName) // Delete the pod from its label's ipset. for _, removeIPSetName := range deleteFromIPSets { - klog.Infof("Deleting pod %s (ip : %s) from ipset %s", podKey, cachedNpmPod.PodIP, removeIPSetName) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting pod %s (ip : %s) from ipset %s", podKey, cachedNpmPod.PodIP, removeIPSetName) var toRemoveSet *ipsets.IPSetMetadata if util.IsKeyValueLabelSetName(removeIPSetName) { @@ -490,7 +504,8 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper // Add the pod to its label's ipset. for _, addIPSetName := range addToIPSets { - klog.Infof("Creating ipset %s if it doesn't already exist", addIPSetName) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Creating ipset %s if it doesn't already exist", addIPSetName) var toAddSet *ipsets.IPSetMetadata if util.IsKeyValueLabelSetName(addIPSetName) { @@ -499,7 +514,8 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper toAddSet = ipsets.NewIPSetMetadata(addIPSetName, ipsets.KeyLabelOfPod) } - klog.Infof("Adding pod %s (ip : %s) to ipset %s", podKey, newPodObj.Status.PodIP, addIPSetName) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Adding pod %s (ip : %s) to ipset %s", podKey, newPodObj.Status.PodIP, addIPSetName) if err = c.dp.AddToSets([]*ipsets.IPSetMetadata{toAddSet}, newPodMetadata); err != nil { return metrics.UpdateOp, fmt.Errorf("[syncAddAndUpdatePod] Error: failed to add pod to label ipset with err: %w", err) } @@ -542,7 +558,8 @@ func (c *PodController) syncAddAndUpdatePod(newPodObj *corev1.Pod) (metrics.Oper // cleanUpDeletedPod cleans up all ipset associated with this pod func (c *PodController) cleanUpDeletedPod(cachedNpmPodKey string) error { - klog.Infof("[cleanUpDeletedPod] deleting Pod with key %s", cachedNpmPodKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[cleanUpDeletedPod] deleting Pod with key %s", cachedNpmPodKey) // If cached npmPod does not exist, return nil cachedNpmPod, exist := c.podMap[cachedNpmPodKey] if !exist { @@ -562,7 +579,8 @@ func (c *PodController) cleanUpDeletedPod(cachedNpmPodKey string) error { // Get lists of podLabelKey and podLabelKey + podLavelValue ,and then start deleting them from ipsets for labelKey, labelVal := range cachedNpmPod.Labels { labelKeyValue := util.GetIpSetFromLabelKV(labelKey, labelVal) - klog.Infof("Deleting pod %s (ip : %s) from ipsets %s and %s", cachedNpmPodKey, cachedNpmPod.PodIP, labelKey, labelKeyValue) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("Deleting pod %s (ip : %s) from ipsets %s and %s", cachedNpmPodKey, cachedNpmPod.PodIP, labelKey, labelKeyValue) if err = c.dp.RemoveFromSets( []*ipsets.IPSetMetadata{ ipsets.NewIPSetMetadata(labelKey, ipsets.KeyLabelOfPod), @@ -595,7 +613,8 @@ func (c *PodController) manageNamedPortIpsets(portList []corev1.ContainerPort, p return nil } for _, port := range portList { - klog.Infof("port is %+v", port) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("port is %+v", port) if port.Name == "" { continue } diff --git a/npm/pkg/controlplane/translation/translatePolicy.go b/npm/pkg/controlplane/translation/translatePolicy.go index 6cd5e74014..9b029b4616 100644 --- a/npm/pkg/controlplane/translation/translatePolicy.go +++ b/npm/pkg/controlplane/translation/translatePolicy.go @@ -34,6 +34,8 @@ var ( ) // ErrUnsupportedIPAddress is returned when an unsupported IP address, such as IPV6, is used ErrUnsupportedIPAddress = errors.New("unsupported IP address") + // ErrUnsupportedNonCIDR is returned when non-CIDR blocks are passed in with NPM Lite enabled. NPM Lite allows deny-all and allow-all policies + ErrUnsupportedNonCIDR = errors.New("Non-CIDR blocks, named ports, and ingress/egress namespace/pod selectors are not supported when NPM Lite is enabled, allowing only CIDR-based policies") ) type podSelectorResult struct { @@ -222,7 +224,8 @@ func ipBlockIPSet(policyName, ns string, direction policies.Direction, ipBlockSe // ipBlockRule translates IPBlock field in networkpolicy object to translatedIPSet and SetInfo. // ipBlockSetIndex parameter is used to diffentiate ipBlock fields in one networkpolicy object. func ipBlockRule(policyName, ns string, direction policies.Direction, matchType policies.MatchType, ipBlockSetIndex, ipBlockPeerIndex int, - ipBlockRule *networkingv1.IPBlock) (*ipsets.TranslatedIPSet, policies.SetInfo, error) { //nolint // gofumpt + ipBlockRule *networkingv1.IPBlock, +) (*ipsets.TranslatedIPSet, policies.SetInfo, error) { //nolint // gofumpt if ipBlockRule == nil || ipBlockRule.CIDR == "" { return nil, policies.SetInfo{}, nil } @@ -332,7 +335,7 @@ func ruleExists(ports []networkingv1.NetworkPolicyPort, peer []networkingv1.Netw // peerAndPortRule deals with composite rules including ports and peers // (e.g., IPBlock, podSelector, namespaceSelector, or both podSelector and namespaceSelector). -func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Direction, ports []networkingv1.NetworkPolicyPort, setInfo []policies.SetInfo) error { +func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Direction, ports []networkingv1.NetworkPolicyPort, setInfo []policies.SetInfo, npmLiteToggle bool) error { if len(ports) == 0 { acl := policies.NewACLPolicy(policies.Allowed, direction) acl.AddSetInfo(setInfo) @@ -346,6 +349,11 @@ func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Di return err } + err = checkForNamedPortType(portKind, npmLiteToggle) + if err != nil { + return err + } + acl := policies.NewACLPolicy(policies.Allowed, direction) acl.AddSetInfo(setInfo) npmNetPol.RuleIPSets = portRule(npmNetPol.RuleIPSets, acl, &ports[i], portKind) @@ -355,8 +363,15 @@ func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Di } // translateRule translates ingress or egress rules and update npmNetPol object. -func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, direction policies.Direction, matchType policies.MatchType, ruleIndex int, - ports []networkingv1.NetworkPolicyPort, peers []networkingv1.NetworkPolicyPeer) error { +func translateRule(npmNetPol *policies.NPMNetworkPolicy, + netPolName string, + direction policies.Direction, + matchType policies.MatchType, + ruleIndex int, + ports []networkingv1.NetworkPolicyPort, + peers []networkingv1.NetworkPolicyPeer, + npmLiteToggle bool, +) error { // TODO(jungukcho): need to clean up it. // Leave allowExternal variable now while the condition is checked before calling this function. allowExternal, portRuleExists, peerRuleExists := ruleExists(ports, peers) @@ -365,6 +380,9 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire // The code inside if condition is to handle allowing all internal traffic, but the case is handled in #2.4. // So, this code may not execute. After confirming this, need to delete it. if !portRuleExists && !peerRuleExists && !allowExternal { + if npmLiteToggle { + return ErrUnsupportedNonCIDR + } acl := policies.NewACLPolicy(policies.Allowed, direction) ruleIPSets, allowAllInternalSetInfo := allowAllInternal(matchType) npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, ruleIPSets) @@ -373,22 +391,17 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire return nil } - // #1. Only Ports fields exist in rule - if portRuleExists && !peerRuleExists && !allowExternal { - for i := range ports { - portKind, err := portType(ports[i]) - if err != nil { - return err - } - - portACL := policies.NewACLPolicy(policies.Allowed, direction) - npmNetPol.RuleIPSets = portRule(npmNetPol.RuleIPSets, portACL, &ports[i], portKind) - npmNetPol.ACLs = append(npmNetPol.ACLs, portACL) - } + err := checkOnlyPortRuleExists(portRuleExists, peerRuleExists, allowExternal, ports, npmLiteToggle, direction, npmNetPol) + if err != nil { + return err } // #2. From or To fields exist in rule for peerIdx, peer := range peers { + // NPM Lite is enabled and peer is non-cidr block + if npmLiteToggle && peer.IPBlock == nil { + return ErrUnsupportedNonCIDR + } // #2.1 Handle IPBlock and port if exist if peer.IPBlock != nil { if len(peer.IPBlock.CIDR) > 0 { @@ -398,13 +411,21 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire } npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, ipBlockIPSet) - err = peerAndPortRule(npmNetPol, direction, ports, []policies.SetInfo{ipBlockSetInfo}) + err = peerAndPortRule(npmNetPol, direction, ports, []policies.SetInfo{ipBlockSetInfo}, npmLiteToggle) if err != nil { return err } } + + // if npm lite is configured, check network policy only consists of CIDR blocks + err := npmLiteValidPolicy(peer, npmLiteToggle) + if err != nil { + return err + } + // Do not need to run below code to translate PodSelector and NamespaceSelector // since IPBlock field is exclusive in NetworkPolicyPeer (i.e., peer in this code). + continue } @@ -425,7 +446,7 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire for i := range flattenNSSelector { nsSelectorIPSets, nsSelectorList := nameSpaceSelector(matchType, &flattenNSSelector[i]) npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, nsSelectorIPSets...) - err := peerAndPortRule(npmNetPol, direction, ports, nsSelectorList) + err := peerAndPortRule(npmNetPol, direction, ports, nsSelectorList, npmLiteToggle) if err != nil { return err } @@ -441,7 +462,7 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire } npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, psResult.psSets...) npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, psResult.childPSSets...) - err = peerAndPortRule(npmNetPol, direction, ports, psResult.psList) + err = peerAndPortRule(npmNetPol, direction, ports, psResult.psList, npmLiteToggle) if err != nil { return err } @@ -467,7 +488,7 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, dire nsSelectorIPSets, nsSelectorList := nameSpaceSelector(matchType, &flattenNSSelector[i]) npmNetPol.RuleIPSets = append(npmNetPol.RuleIPSets, nsSelectorIPSets...) nsSelectorList = append(nsSelectorList, psResult.psList...) - err := peerAndPortRule(npmNetPol, direction, ports, nsSelectorList) + err := peerAndPortRule(npmNetPol, direction, ports, nsSelectorList, npmLiteToggle) if err != nil { return err } @@ -502,7 +523,7 @@ func isAllowAllToIngress(ingress []networkingv1.NetworkPolicyIngressRule) bool { // ingressPolicy traslates NetworkPolicyIngressRule in NetworkPolicy object // to NPMNetworkPolicy object. -func ingressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, ingress []networkingv1.NetworkPolicyIngressRule) error { +func ingressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, ingress []networkingv1.NetworkPolicyIngressRule, npmLiteToggle bool) error { // #1. Allow all traffic from both internal and external. // In yaml file, it is specified with '{}'. if isAllowAllToIngress(ingress) { @@ -521,7 +542,7 @@ func ingressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, ingr // #3. Ingress rule is not AllowAll (including internal and external) and DenyAll policy. // So, start translating ingress policy. for i, rule := range ingress { - if err := translateRule(npmNetPol, netPolName, policies.Ingress, policies.SrcMatch, i, rule.Ports, rule.From); err != nil { + if err := translateRule(npmNetPol, netPolName, policies.Ingress, policies.SrcMatch, i, rule.Ports, rule.From, npmLiteToggle); err != nil { return err } } @@ -545,7 +566,7 @@ func isAllowAllToEgress(egress []networkingv1.NetworkPolicyEgressRule) bool { // egressPolicy traslates NetworkPolicyEgressRule in networkpolicy object // to NPMNetworkPolicy object. -func egressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, egress []networkingv1.NetworkPolicyEgressRule) error { +func egressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, egress []networkingv1.NetworkPolicyEgressRule, npmLiteToggle bool) error { // #1. Allow all traffic to both internal and external. // In yaml file, it is specified with '{}'. if isAllowAllToEgress(egress) { @@ -564,7 +585,7 @@ func egressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, egres // #3. Egress rule is not AllowAll (including internal and external) and DenyAll. // So, start translating egress policy. for i, rule := range egress { - err := translateRule(npmNetPol, netPolName, policies.Egress, policies.DstMatch, i, rule.Ports, rule.To) + err := translateRule(npmNetPol, netPolName, policies.Egress, policies.DstMatch, i, rule.Ports, rule.To, npmLiteToggle) if err != nil { return err } @@ -579,7 +600,7 @@ func egressPolicy(npmNetPol *policies.NPMNetworkPolicy, netPolName string, egres // TranslatePolicy translates networkpolicy object to NPMNetworkPolicy object // and returns the NPMNetworkPolicy object. -func TranslatePolicy(npObj *networkingv1.NetworkPolicy) (*policies.NPMNetworkPolicy, error) { +func TranslatePolicy(npObj *networkingv1.NetworkPolicy, npmLiteToggle bool) (*policies.NPMNetworkPolicy, error) { netPolName := npObj.Name npmNetPol := policies.NewNPMNetworkPolicy(netPolName, npObj.Namespace) @@ -598,12 +619,12 @@ func TranslatePolicy(npObj *networkingv1.NetworkPolicy) (*policies.NPMNetworkPol // and Egress will be set if the NetworkPolicy has any egress rules. for _, ptype := range npObj.Spec.PolicyTypes { if ptype == networkingv1.PolicyTypeIngress { - err := ingressPolicy(npmNetPol, netPolName, npObj.Spec.Ingress) + err := ingressPolicy(npmNetPol, netPolName, npObj.Spec.Ingress, npmLiteToggle) if err != nil { return nil, err } } else { - err := egressPolicy(npmNetPol, netPolName, npObj.Spec.Egress) + err := egressPolicy(npmNetPol, netPolName, npObj.Spec.Egress, npmLiteToggle) if err != nil { return nil, err } @@ -620,3 +641,46 @@ func TranslatePolicy(npObj *networkingv1.NetworkPolicy) (*policies.NPMNetworkPol } return npmNetPol, nil } + +// validates only CIDR based peer is present + no combination of CIDR with pod/namespace selectors are present +func npmLiteValidPolicy(peer networkingv1.NetworkPolicyPeer, npmLiteEnabled bool) error { + if npmLiteEnabled && (peer.PodSelector != nil || peer.NamespaceSelector != nil) { + return ErrUnsupportedNonCIDR + } + return nil +} + +func checkForNamedPortType(portKind netpolPortType, npmLiteToggle bool) error { + if npmLiteToggle && portKind == namedPortType { + return ErrUnsupportedNonCIDR + } + return nil +} + +func checkOnlyPortRuleExists( + portRuleExists, + peerRuleExists, + allowExternal bool, + ports []networkingv1.NetworkPolicyPort, + npmLiteToggle bool, + direction policies.Direction, + npmNetPol *policies.NPMNetworkPolicy, +) error { + // #1. Only Ports fields exist in rule + if portRuleExists && !peerRuleExists && !allowExternal { + for i := range ports { + portKind, err := portType(ports[i]) + if err != nil { + return err + } + err = checkForNamedPortType(portKind, npmLiteToggle) + if err != nil { + return err + } + portACL := policies.NewACLPolicy(policies.Allowed, direction) + npmNetPol.RuleIPSets = portRule(npmNetPol.RuleIPSets, portACL, &ports[i], portKind) + npmNetPol.ACLs = append(npmNetPol.ACLs, portACL) + } + } + return nil +} diff --git a/npm/pkg/controlplane/translation/translatePolicy_test.go b/npm/pkg/controlplane/translation/translatePolicy_test.go index 2129017a8f..dc49c7bec3 100644 --- a/npm/pkg/controlplane/translation/translatePolicy_test.go +++ b/npm/pkg/controlplane/translation/translatePolicy_test.go @@ -1436,6 +1436,7 @@ func TestPeerAndPortRule(t *testing.T) { for i, tt := range tests { tt := tt setInfo := setInfos[i] + npmLiteToggle := false t.Run(tt.name, func(t *testing.T) { t.Parallel() for _, acl := range tt.npmNetPol.ACLs { @@ -1446,7 +1447,7 @@ func TestPeerAndPortRule(t *testing.T) { PolicyKey: tt.npmNetPol.PolicyKey, ACLPolicyID: tt.npmNetPol.ACLPolicyID, } - err := peerAndPortRule(npmNetPol, policies.Ingress, tt.ports, setInfo) + err := peerAndPortRule(npmNetPol, policies.Ingress, tt.ports, setInfo, npmLiteToggle) if tt.skipWindows && util.IsWindowsDP() { require.Error(t, err) } else { @@ -2178,7 +2179,7 @@ func TestIngressPolicy(t *testing.T) { npmNetPol.PodSelectorList = psResult.psList splitPolicyKey := strings.Split(npmNetPol.PolicyKey, "/") require.Len(t, splitPolicyKey, 2, "policy key must include name") - err = ingressPolicy(npmNetPol, splitPolicyKey[1], tt.rules) + err = ingressPolicy(npmNetPol, splitPolicyKey[1], tt.rules, false) if tt.wantErr || (tt.skipWindows && util.IsWindowsDP()) { require.Error(t, err) } else { @@ -2909,7 +2910,7 @@ func TestEgressPolicy(t *testing.T) { npmNetPol.PodSelectorList = psResult.psList splitPolicyKey := strings.Split(npmNetPol.PolicyKey, "/") require.Len(t, splitPolicyKey, 2, "policy key must include name") - err = egressPolicy(npmNetPol, splitPolicyKey[1], tt.rules) + err = egressPolicy(npmNetPol, splitPolicyKey[1], tt.rules, false) if tt.wantErr || (tt.skipWindows && util.IsWindowsDP()) { require.Error(t, err) } else { @@ -2919,3 +2920,220 @@ func TestEgressPolicy(t *testing.T) { }) } } + +func TestNpmLiteCidrPolicy(t *testing.T) { + // Test 1) Npm lite enabled, CIDR + Namespace label Peers, returns error + // Test 2) NPM lite disabled, CIDR + Namespace label Peers, returns no error + // Test 3) Npm Lite enabled, CIDR Peers , returns no error + // Test 4) NPM Lite enabled, Combination of CIDR + Label in same peer, returns an error + // test 5) NPM Lite enabled, no peer, returns no error + // test 6) NPM Lite enabled, no cidr, no peer, only ports + protocol + + port8000 := intstr.FromInt(8000) + tcp := v1.ProtocolTCP + tests := []struct { + name string + targetSelector *metav1.LabelSelector + ports []networkingv1.NetworkPolicyPort + peersFrom []networkingv1.NetworkPolicyPeer + peersTo []networkingv1.NetworkPolicyPeer + npmLiteEnabled bool + wantErr bool + }{ + { + name: "CIDR + port + namespace", + targetSelector: nil, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &port8000, + }, + }, + peersFrom: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "peer-nsselector-kay": "peer-nsselector-value", + }, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + }, + }, + }, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: true, + wantErr: true, + }, + { + name: "cidr + namespace label + disabledLite ", + targetSelector: nil, + peersFrom: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "peer-nsselector-kay": "peer-nsselector-value", + }, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + }, + }, + }, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: false, + wantErr: false, + }, + { + name: "CIDR Only", + targetSelector: nil, + peersFrom: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, + }, + }, + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/16", + }, + }, + }, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: true, + wantErr: false, + }, + { + name: "CIDR + namespace labels", + targetSelector: nil, + peersFrom: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "172.17.0.0/17", + Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "peer-nsselector-kay": "peer-nsselector-value", + }, + }, + }, + }, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: true, + wantErr: true, + }, + { + name: "no peers", + targetSelector: nil, + peersFrom: []networkingv1.NetworkPolicyPeer{}, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: true, + wantErr: false, + }, + { + name: "port only", + targetSelector: nil, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &port8000, + }, + }, + peersFrom: []networkingv1.NetworkPolicyPeer{}, + peersTo: []networkingv1.NetworkPolicyPeer{}, + npmLiteEnabled: true, + wantErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // run the function passing in peers and a flag indicating whether npm lite is enabled + var err error + for _, peer := range tt.peersFrom { + err = npmLiteValidPolicy(peer, tt.npmLiteEnabled) + if err != nil { + break + } + } + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestCheckForNamedPortType(t *testing.T) { + port8000 := intstr.FromInt(8000) + namedPort := intstr.FromString("namedPort") + tcp := v1.ProtocolTCP + tests := []struct { + name string + targetSelector *metav1.LabelSelector + ports []networkingv1.NetworkPolicyPort + portKind netpolPortType + npmLiteEnabled bool + wantErr bool + }{ + { + name: "unnamedPortOnly", + targetSelector: nil, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &port8000, + }, + }, + portKind: numericPortType, + npmLiteEnabled: true, + wantErr: false, + }, + { + name: "namedPortOnly", + targetSelector: nil, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &namedPort, + }, + }, + portKind: namedPortType, + npmLiteEnabled: true, + wantErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // run the function passing in peers and a flag indicating whether npm lite is enabled + err := checkForNamedPortType(tt.portKind, tt.npmLiteEnabled) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/npm/pkg/dataplane/dataplane.go b/npm/pkg/dataplane/dataplane.go index 8958d58321..e41a54a4a1 100644 --- a/npm/pkg/dataplane/dataplane.go +++ b/npm/pkg/dataplane/dataplane.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" "github.com/Azure/azure-container-networking/common" @@ -18,11 +19,12 @@ import ( const ( reconcileDuration = time.Duration(5 * time.Minute) - contextBackground = "BACKGROUND" - contextApplyDP = "APPLY-DP" - contextAddNetPol = "ADD-NETPOL" - contextAddNetPolBootup = "BOOTUP-ADD-NETPOL" - contextDelNetPol = "DEL-NETPOL" + contextBackground = "BACKGROUND" + contextApplyDP = "APPLY-DP" + contextAddNetPol = "ADD-NETPOL" + contextAddNetPolBootup = "BOOTUP-ADD-NETPOL" + contextAddNetPolPrecaution = "ADD-NETPOL-PRECAUTION" + contextDelNetPol = "DEL-NETPOL" ) var ( @@ -43,10 +45,16 @@ type Config struct { NetPolInBackground bool MaxPendingNetPols int NetPolInterval time.Duration + EnableNPMLite bool *ipsets.IPSetManagerCfg *policies.PolicyManagerCfg } +type removePolicyInfo struct { + sync.Mutex + previousRemovePolicyIPSetsFailed bool +} + type DataPlane struct { *Config applyInBackground bool @@ -57,13 +65,17 @@ type DataPlane struct { nodeName string // endpointCache stores all endpoints of the network (including off-node) // Key is PodIP - endpointCache *endpointCache - ioShim *common.IOShim - updatePodCache *updatePodCache - endpointQuery *endpointQuery - applyInfo *applyInfo - netPolQueue *netPolQueue - stopChannel <-chan struct{} + endpointCache *endpointCache + ioShim *common.IOShim + updatePodCache *updatePodCache + endpointQuery *endpointQuery + endpointQueryAttachedState *endpointQuery // windows -> filter for state 2 (attached) endpoints in l1vh + applyInfo *applyInfo + netPolQueue *netPolQueue + // removePolicyInfo tracks when a policy was removed yet had ApplyIPSet failures. + // This field is only relevant for Linux. + removePolicyInfo removePolicyInfo + stopChannel <-chan struct{} } func NewDataPlane(nodeName string, ioShim *common.IOShim, cfg *Config, stopChannel <-chan struct{}) (*DataPlane, error) { @@ -78,11 +90,12 @@ func NewDataPlane(nodeName string, ioShim *common.IOShim, cfg *Config, stopChann policyMgr: policies.NewPolicyManager(ioShim, cfg.PolicyManagerCfg), ipsetMgr: ipsets.NewIPSetManager(cfg.IPSetManagerCfg, ioShim), // networkID is set when initializing Windows dataplane - networkID: "", - endpointCache: newEndpointCache(), - nodeName: nodeName, - ioShim: ioShim, - endpointQuery: new(endpointQuery), + networkID: "", + endpointCache: newEndpointCache(), + nodeName: nodeName, + ioShim: ioShim, + endpointQuery: new(endpointQuery), + endpointQueryAttachedState: new(endpointQuery), applyInfo: &applyInfo{ inBootupPhase: true, }, @@ -118,7 +131,6 @@ func NewDataPlane(nodeName string, ioShim *common.IOShim, cfg *Config, stopChann } else { metrics.SendLog(util.DaemonDataplaneID, "[DataPlane] dataplane configured to NOT add netpols in background", true) } - return dp, nil } @@ -245,7 +257,8 @@ func (dp *DataPlane) AddToSets(setNames []*ipsets.IPSetMetadata, podMetadata *Po } if dp.shouldUpdatePod() && podMetadata.NodeName == dp.nodeName { - klog.Infof("[DataPlane] Updating Sets to Add for pod key %s", podMetadata.PodKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Updating Sets to Add for pod key %s", podMetadata.PodKey) // lock updatePodCache while reading/modifying or setting the updatePod in the cache dp.updatePodCache.Lock() @@ -267,7 +280,8 @@ func (dp *DataPlane) RemoveFromSets(setNames []*ipsets.IPSetMetadata, podMetadat } if dp.shouldUpdatePod() && podMetadata.NodeName == dp.nodeName { - klog.Infof("[DataPlane] Updating Sets to Remove for pod key %s", podMetadata.PodKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Updating Sets to Remove for pod key %s", podMetadata.PodKey) // lock updatePodCache while reading/modifying or setting the updatePod in the cache dp.updatePodCache.Lock() @@ -316,10 +330,12 @@ func (dp *DataPlane) ApplyDataPlane() error { newCount := dp.applyInfo.numBatches dp.applyInfo.Unlock() - klog.Infof("[DataPlane] [%s] new batch count: %d", contextApplyDP, newCount) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] new batch count: %d", contextApplyDP, newCount) if newCount >= dp.ApplyMaxBatches { - klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextApplyDP, newCount) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextApplyDP, newCount) return dp.applyDataPlaneNow(contextApplyDP) } @@ -327,12 +343,17 @@ func (dp *DataPlane) ApplyDataPlane() error { } func (dp *DataPlane) applyDataPlaneNow(context string) error { - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] starting to apply ipsets", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] starting to apply ipsets", context) err := dp.ipsetMgr.ApplyIPSets() if err != nil { return fmt.Errorf("[DataPlane] [%s] error while applying IPSets: %w", context, err) } - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] finished applying ipsets", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] finished applying ipsets", context) + + // see comment in RemovePolicy() for why this is here + dp.setRemovePolicyFailure(false) if dp.applyInBackground { dp.applyInfo.Lock() @@ -350,7 +371,8 @@ func (dp *DataPlane) applyDataPlaneNow(context string) error { } dp.updatePodCache.Unlock() - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] refreshing endpoints before updating pods", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] refreshing endpoints before updating pods", context) err := dp.refreshPodEndpoints() if err != nil { @@ -359,14 +381,16 @@ func (dp *DataPlane) applyDataPlaneNow(context string) error { return nil } - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] refreshed endpoints", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] refreshed endpoints", context) // lock updatePodCache while driving goal state to kernel // prevents another ApplyDataplane call from updating the same pods dp.updatePodCache.Lock() defer dp.updatePodCache.Unlock() - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] starting to update pods", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] starting to update pods", context) for !dp.updatePodCache.isEmpty() { pod := dp.updatePodCache.dequeue() if pod == nil { @@ -384,14 +408,16 @@ func (dp *DataPlane) applyDataPlaneNow(context string) error { } } - klog.Infof("[DataPlane] [ApplyDataPlane] [%s] finished updating pods", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [ApplyDataPlane] [%s] finished updating pods", context) } return nil } // AddPolicy takes in a translated NPMNetworkPolicy object and applies on dataplane func (dp *DataPlane) AddPolicy(policy *policies.NPMNetworkPolicy) error { - klog.Infof("[DataPlane] Add Policy called for %s", policy.PolicyKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Add Policy called for %s", policy.PolicyKey) if !dp.netPolInBackground { return dp.addPolicies([]*policies.NPMNetworkPolicy{policy}) @@ -405,10 +431,12 @@ func (dp *DataPlane) AddPolicy(policy *policies.NPMNetworkPolicy) error { dp.netPolQueue.enqueue(policy) newCount := dp.netPolQueue.len() - klog.Infof("[DataPlane] [%s] new pending netpol count: %d", contextAddNetPol, newCount) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] new pending netpol count: %d", contextAddNetPol, newCount) if newCount >= dp.MaxPendingNetPols { - klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextAddNetPol, newCount) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextAddNetPol, newCount) dp.addPoliciesWithRetry(contextAddNetPol) } return nil @@ -418,12 +446,14 @@ func (dp *DataPlane) AddPolicy(policy *policies.NPMNetworkPolicy) error { // The caller must lock netPolQueue. func (dp *DataPlane) addPoliciesWithRetry(context string) { netPols := dp.netPolQueue.dump() - klog.Infof("[DataPlane] adding policies %+v", netPols) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] adding policies %+v", netPols) err := dp.addPolicies(netPols) if err == nil { // clear queue and return on success - klog.Infof("[DataPlane] [%s] added policies successfully", context) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] added policies successfully", context) dp.netPolQueue.clear() return } @@ -436,7 +466,8 @@ func (dp *DataPlane) addPoliciesWithRetry(context string) { err = dp.addPolicies([]*policies.NPMNetworkPolicy{netPol}) if err == nil { // remove from queue on success - klog.Infof("[DataPlane] [%s] added policy successfully one at a time. policyKey: %s", context, netPol.PolicyKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] added policy successfully one at a time. policyKey: %s", context, netPol.PolicyKey) dp.netPolQueue.delete(netPol.PolicyKey) } else { // keep in queue on failure @@ -454,7 +485,8 @@ func (dp *DataPlane) addPolicies(netPols []*policies.NPMNetworkPolicy) error { } if len(netPols) == 0 { - klog.Infof("[DataPlane] expected to have at least one NetPol in dp.addPolicies()") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] expected to have at least one NetPol in dp.addPolicies()") return nil } @@ -471,6 +503,20 @@ func (dp *DataPlane) addPolicies(netPols []*policies.NPMNetworkPolicy) error { } } + if dp.hadRemovePolicyFailure() { + if inBootupPhase { + // this should never happen because bootup phase is for windows, but just in case, we don't want to applyDataplaneNow() or else there will be a deadlock on dp.applyInfo + msg := fmt.Sprintf("[DataPlane] [%s] at risk of improperly applying a policy which is removed then readded", contextAddNetPolPrecaution) + klog.Warning(msg) + metrics.SendErrorLogAndMetric(util.DaemonDataplaneID, msg) + } else { + // prevent #2977 + if err := dp.applyDataPlaneNow(contextAddNetPolPrecaution); err != nil { + return err // nolint:wrapcheck // unnecessary to wrap error since the provided context is included in the error + } + } + } + // 1. Add IPSets and apply for each NetPol. // Apply IPSets after each NetworkPolicy unless ApplyInBackground=true and we're in the bootup phase (only happens for Windows currently) for _, netPol := range netPols { @@ -497,15 +543,21 @@ func (dp *DataPlane) addPolicies(netPols []*policies.NPMNetworkPolicy) error { // increment batch and apply IPSets if needed dp.applyInfo.numBatches++ newCount := dp.applyInfo.numBatches - klog.Infof("[DataPlane] [%s] new batch count: %d", contextAddNetPolBootup, newCount) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] new batch count: %d", contextAddNetPolBootup, newCount) if newCount >= dp.ApplyMaxBatches { - klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextAddNetPolBootup, newCount) - klog.Infof("[DataPlane] [%s] starting to apply ipsets", contextAddNetPolBootup) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] applying now since reached maximum batch count: %d", contextAddNetPolBootup, newCount) + // klog.Infof("[DataPlane] [%s] starting to apply ipsets", contextAddNetPolBootup) err = dp.ipsetMgr.ApplyIPSets() if err != nil { return fmt.Errorf("[DataPlane] [%s] error while applying IPSets: %w", contextAddNetPolBootup, err) } - klog.Infof("[DataPlane] [%s] finished applying ipsets", contextAddNetPolBootup) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] [%s] finished applying ipsets", contextAddNetPolBootup) + + // see comment in RemovePolicy() for why this is here + dp.setRemovePolicyFailure(false) dp.applyInfo.numBatches = 0 } @@ -542,7 +594,8 @@ func (dp *DataPlane) addPolicies(netPols []*policies.NPMNetworkPolicy) error { // RemovePolicy takes in network policyKey (namespace/name of network policy) and removes it from dataplane and cache func (dp *DataPlane) RemovePolicy(policyKey string) error { - klog.Infof("[DataPlane] Remove Policy called for %s", policyKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Remove Policy called for %s", policyKey) if dp.netPolInBackground { // make sure to not add this NetPol if we're deleting it @@ -603,16 +656,27 @@ func (dp *DataPlane) RemovePolicy(policyKey string) error { return err } - return dp.applyDataPlaneNow(contextApplyDP) + if err := dp.applyDataPlaneNow(contextDelNetPol); err != nil { + // Failed to apply IPSets while removing this policy. + // Consider this removepolicy call a failure until apply IPSets is successful. + // Related to #2977 + klog.Info("[DataPlane] remove policy has failed to apply ipsets. setting remove policy failure") + dp.setRemovePolicyFailure(true) + return err // nolint:wrapcheck // unnecessary to wrap error since the provided context is included in the error + } + + return nil } // UpdatePolicy takes in updated policy object, calculates the delta and applies changes // onto dataplane accordingly func (dp *DataPlane) UpdatePolicy(policy *policies.NPMNetworkPolicy) error { - klog.Infof("[DataPlane] Update Policy called for %s", policy.PolicyKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Update Policy called for %s", policy.PolicyKey) ok := dp.policyMgr.PolicyExists(policy.PolicyKey) if !ok { - klog.Infof("[DataPlane] Policy %s is not found.", policy.PolicyKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] Policy %s is not found.", policy.PolicyKey) return dp.AddPolicy(policy) } @@ -725,3 +789,23 @@ func (dp *DataPlane) deleteIPSetsAndReferences(sets []*ipsets.TranslatedIPSet, n } return nil } + +func (dp *DataPlane) setRemovePolicyFailure(failed bool) { + if util.IsWindowsDP() { + return + } + + dp.removePolicyInfo.Lock() + defer dp.removePolicyInfo.Unlock() + dp.removePolicyInfo.previousRemovePolicyIPSetsFailed = failed +} + +func (dp *DataPlane) hadRemovePolicyFailure() bool { + if util.IsWindowsDP() { + return false + } + + dp.removePolicyInfo.Lock() + defer dp.removePolicyInfo.Unlock() + return dp.removePolicyInfo.previousRemovePolicyIPSetsFailed +} diff --git a/npm/pkg/dataplane/dataplane_linux.go b/npm/pkg/dataplane/dataplane_linux.go index 368e499d24..99769bf0ae 100644 --- a/npm/pkg/dataplane/dataplane_linux.go +++ b/npm/pkg/dataplane/dataplane_linux.go @@ -2,7 +2,6 @@ package dataplane import ( "github.com/Azure/azure-container-networking/npm/pkg/dataplane/policies" - "github.com/Azure/azure-container-networking/npm/util" npmerrors "github.com/Azure/azure-container-networking/npm/util/errors" ) @@ -21,8 +20,6 @@ func (dp *DataPlane) updatePod(pod *updateNPMPod) error { } func (dp *DataPlane) bootupDataPlane() error { - util.DetectIptablesVersion(dp.ioShim) - // It is important to keep order to clean-up ACLs before ipsets. Otherwise we won't be able to delete ipsets referenced by ACLs if err := dp.policyMgr.Bootup(nil); err != nil { return npmerrors.ErrorWrapper(npmerrors.BootupDataplane, false, "failed to reset policy dataplane", err) diff --git a/npm/pkg/dataplane/dataplane_linux_test.go b/npm/pkg/dataplane/dataplane_linux_test.go index 7c48ac1d3b..9cb22138ad 100644 --- a/npm/pkg/dataplane/dataplane_linux_test.go +++ b/npm/pkg/dataplane/dataplane_linux_test.go @@ -1,7 +1,6 @@ package dataplane import ( - "fmt" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/Azure/azure-container-networking/npm/util" testutils "github.com/Azure/azure-container-networking/test/utils" "github.com/stretchr/testify/require" + "k8s.io/klog" ) var netpolInBackgroundCfg = &Config{ @@ -74,25 +74,29 @@ func TestNetPolInBackgroundUpdatePolicy(t *testing.T) { calls := append(getBootupTestCalls(), getAddPolicyTestCallsForDP(&testPolicyobj)...) calls = append(calls, getRemovePolicyTestCallsForDP(&testPolicyobj)...) calls = append(calls, getAddPolicyTestCallsForDP(&updatedTestPolicyobj)...) - for _, call := range calls { - fmt.Println(call) - } ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, nil) + + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(2000 * time.Millisecond) + klog.Info("defer for TestNetPolInBackgroundUpdatePolicy finished") + }() dp.RunPeriodicTasks() err = dp.AddPolicy(&testPolicyobj) require.NoError(t, err) - time.Sleep(100 * time.Millisecond) + time.Sleep(2000 * time.Millisecond) err = dp.UpdatePolicy(&updatedTestPolicyobj) require.NoError(t, err) - time.Sleep(100 * time.Millisecond) + time.Sleep(2000 * time.Millisecond) linuxPromVals{2, 1, 0, 0, 1}.assert(t) } @@ -103,8 +107,14 @@ func TestNetPolInBackgroundSkipAddAfterRemove(t *testing.T) { calls := getBootupTestCalls() ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + klog.Info("defer for TestNetPolInBackgroundSkipAddAfterRemove finished") + }() require.NoError(t, dp.AddPolicy(&testPolicyobj)) require.NoError(t, dp.RemovePolicy(testPolicyobj.PolicyKey)) @@ -133,38 +143,43 @@ func TestNetPolInBackgroundFailureToAddFirstTime(t *testing.T) { }, // restore will try twice per pMgr.AddPolicies() call testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1, }, testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1, }, // first policy succeeds testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 0, }, // second policy succeeds testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 0, }, // third policy fails // restore will try twice per pMgr.AddPolicies() call testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1, }, testutils.TestCmd{ - Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1, }, ) ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, netpolInBackgroundCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() require.NoError(t, dp.AddPolicy(&testPolicyobj)) require.NoError(t, dp.AddPolicy(&testPolicy2)) diff --git a/npm/pkg/dataplane/dataplane_test.go b/npm/pkg/dataplane/dataplane_test.go index c5bc3c4d37..be89796949 100644 --- a/npm/pkg/dataplane/dataplane_test.go +++ b/npm/pkg/dataplane/dataplane_test.go @@ -1,8 +1,8 @@ package dataplane import ( - "fmt" "testing" + "time" "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/npm/metrics" @@ -80,8 +80,13 @@ func TestNewDataPlane(t *testing.T) { calls := getBootupTestCalls() ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() assert.NotNil(t, dp) } @@ -91,8 +96,13 @@ func TestCreateAndDeleteIpSets(t *testing.T) { calls := getBootupTestCalls() ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() assert.NotNil(t, dp) setsTocreate := []*ipsets.IPSetMetadata{ { @@ -133,8 +143,13 @@ func TestAddToSet(t *testing.T) { calls := getBootupTestCalls() ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() setsTocreate := []*ipsets.IPSetMetadata{ { @@ -197,8 +212,13 @@ func TestApplyPolicy(t *testing.T) { calls := append(getBootupTestCalls(), getAddPolicyTestCallsForDP(&testPolicyobj)...) ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() err = dp.AddPolicy(&testPolicyobj) require.NoError(t, err) @@ -211,8 +231,13 @@ func TestRemovePolicy(t *testing.T) { calls = append(calls, getRemovePolicyTestCallsForDP(&testPolicyobj)...) ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() err = dp.AddPolicy(&testPolicyobj) require.NoError(t, err) @@ -221,6 +246,38 @@ func TestRemovePolicy(t *testing.T) { require.NoError(t, err) } +func TestHandle2977(t *testing.T) { + if util.IsWindowsDP() { + return + } + + metrics.InitializeAll() + + calls := append(getBootupTestCalls(), getAddPolicyTestCallsForDP(&testPolicyobj)...) + calls = append(calls, policies.GetRemovePolicyTestCalls(&testPolicyobj)...) + calls = append(calls, ipsets.GetApplyIPSetsFailureTestCalls()...) + calls = append(calls, ipsets.GetApplyIPSetsTestCalls(nil, getAffectedIPSets(&testPolicyobj))...) + calls = append(calls, getAddPolicyTestCallsForDP(&testPolicyobj)...) + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) + require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() + + err = dp.AddPolicy(&testPolicyobj) + require.NoError(t, err) + + err = dp.RemovePolicy(testPolicyobj.PolicyKey) + require.Error(t, err) + + err = dp.AddPolicy(&testPolicyobj) + require.NoError(t, err) +} + func TestUpdatePolicy(t *testing.T) { metrics.InitializeAll() @@ -235,13 +292,15 @@ func TestUpdatePolicy(t *testing.T) { calls := append(getBootupTestCalls(), getAddPolicyTestCallsForDP(&testPolicyobj)...) calls = append(calls, getRemovePolicyTestCallsForDP(&testPolicyobj)...) calls = append(calls, getAddPolicyTestCallsForDP(&updatedTestPolicyobj)...) - for _, call := range calls { - fmt.Println(call) - } ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) - dp, err := NewDataPlane("testnode", ioshim, dpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane("testnode", ioshim, dpCfg, stopCh) require.NoError(t, err) + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() err = dp.AddPolicy(&testPolicyobj) require.NoError(t, err) @@ -393,7 +452,7 @@ func TestUpdatePodCache(t *testing.T) { } func getBootupTestCalls() []testutils.TestCmd { - return append(policies.GetBootupTestCalls(true), ipsets.GetResetTestCalls()...) + return append(policies.GetBootupTestCalls(), ipsets.GetResetTestCalls()...) } func getAddPolicyTestCallsForDP(networkPolicy *policies.NPMNetworkPolicy) []testutils.TestCmd { diff --git a/npm/pkg/dataplane/dataplane_windows.go b/npm/pkg/dataplane/dataplane_windows.go index 701f759905..43af8bef6b 100644 --- a/npm/pkg/dataplane/dataplane_windows.go +++ b/npm/pkg/dataplane/dataplane_windows.go @@ -2,7 +2,6 @@ package dataplane import ( "encoding/json" - "errors" "fmt" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/Azure/azure-container-networking/npm/util" npmerrors "github.com/Azure/azure-container-networking/npm/util/errors" "github.com/Microsoft/hcsshim/hcn" + "github.com/pkg/errors" "k8s.io/klog" ) @@ -50,14 +50,31 @@ func (dp *DataPlane) initializeDataPlane() error { }, Flags: hcn.HostComputeQueryFlagsNone, } + // Initialize Endpoint query used to filter healthy endpoints (vNIC) of Windows pods on L1VH Node + dp.endpointQueryAttachedState.query = hcn.HostComputeQuery{ + SchemaVersion: hcn.SchemaVersion{ + Major: hcnSchemaMajorVersion, + Minor: hcnSchemaMinorVersion, + }, + Flags: hcn.HostComputeQueryFlagsNone, + } + // Filter out any endpoints that are not in "AttachedShared" State. All running Windows pods with networking must be in this state. filterMap := map[string]uint16{"State": hcnEndpointStateAttachedSharing} filter, err := json.Marshal(filterMap) if err != nil { - return npmerrors.SimpleErrorWrapper("failed to marshal endpoint filter map", err) + return errors.Wrap(err, "failed to marshal endpoint filter map for attachedsharing state") } dp.endpointQuery.query.Filter = string(filter) + // Filter out any endpoints that are not in "Attached" State. All running Windows pods on L1VH with networking must be in this state. + filterMapAttached := map[string]uint16{"State": hcnEndpointStateAttached} + filterAttached, err := json.Marshal(filterMapAttached) + if err != nil { + return errors.Wrap(err, "failed to marshal endpoint filter map for attched state") + } + dp.endpointQueryAttachedState.query.Filter = string(filterAttached) + // reset endpoint cache so that netpol references are removed for all endpoints while refreshing pod endpoints // no need to lock endpointCache at boot up dp.endpointCache.cache = make(map[string]*npmEndpoint) @@ -97,8 +114,7 @@ func (dp *DataPlane) bootupDataPlane() error { return npmerrors.SimpleErrorWrapper("failed to initialize dataplane", err) } - // for backwards compatibility, get remote allEndpoints to delete as well - allEndpoints, err := dp.getAllPodEndpoints() + allEndpoints, err := dp.getLocalPodEndpoints() if err != nil { return err } @@ -328,33 +344,30 @@ func (dp *DataPlane) getEndpointsToApplyPolicies(netPols []*policies.NPMNetworkP return endpointList, nil } -func (dp *DataPlane) getAllPodEndpoints() ([]*hcn.HostComputeEndpoint, error) { - klog.Infof("getting all endpoints for network ID %s", dp.networkID) +func (dp *DataPlane) getLocalPodEndpoints() ([]*hcn.HostComputeEndpoint, error) { + klog.Info("getting local endpoints") + + // Gets endpoints in state: Attached timer := metrics.StartNewTimer() - endpoints, err := dp.ioShim.Hns.ListEndpointsOfNetwork(dp.networkID) + endpointsAttached, err := dp.ioShim.Hns.ListEndpointsQuery(dp.endpointQueryAttachedState.query) metrics.RecordListEndpointsLatency(timer) if err != nil { metrics.IncListEndpointsFailures() - return nil, npmerrors.SimpleErrorWrapper("failed to get all pod endpoints", err) - } - - epPointers := make([]*hcn.HostComputeEndpoint, 0, len(endpoints)) - for k := range endpoints { - epPointers = append(epPointers, &endpoints[k]) + return nil, errors.Wrap(err, "failed to get local pod endpoints in state:attached") } - return epPointers, nil -} -func (dp *DataPlane) getLocalPodEndpoints() ([]*hcn.HostComputeEndpoint, error) { - klog.Info("getting local endpoints") - timer := metrics.StartNewTimer() + // Gets endpoints in state: AttachedSharing + timer = metrics.StartNewTimer() endpoints, err := dp.ioShim.Hns.ListEndpointsQuery(dp.endpointQuery.query) metrics.RecordListEndpointsLatency(timer) if err != nil { metrics.IncListEndpointsFailures() - return nil, npmerrors.SimpleErrorWrapper("failed to get local pod endpoints", err) + return nil, errors.Wrap(err, "failed to get local pod endpoints in state: attachedSharing") } + // Get endpoints unique to endpoints and endpointsAttached + endpoints = GetUniqueEndpoints(endpoints, endpointsAttached) + epPointers := make([]*hcn.HostComputeEndpoint, 0, len(endpoints)) for k := range endpoints { epPointers = append(epPointers, &endpoints[k]) @@ -362,6 +375,24 @@ func (dp *DataPlane) getLocalPodEndpoints() ([]*hcn.HostComputeEndpoint, error) return epPointers, nil } +func GetUniqueEndpoints(endpoints, endpointsAttached []hcn.HostComputeEndpoint) []hcn.HostComputeEndpoint { + // Store IDs of endpoints list in a map for quick lookup + idMap := make(map[string]struct{}, len(endpoints)) + for i := 0; i < len(endpoints); i++ { + ep := endpoints[i] + idMap[ep.Id] = struct{}{} + } + + // Add endpointsAttached list endpoints in endpoints list if the endpoint is not in the map + for i := 0; i < len(endpointsAttached); i++ { + ep := endpointsAttached[i] + if _, ok := idMap[ep.Id]; !ok { + endpoints = append(endpoints, ep) + } + } + return endpoints +} + // refreshPodEndpoints will refresh all the pod endpoints and create empty netpol references for new endpoints /* Key Assumption: a new pod event (w/ IP) cannot come before HNS knows (and can tell us) about the endpoint. diff --git a/npm/pkg/dataplane/dataplane_windows_test.go b/npm/pkg/dataplane/dataplane_windows_test.go index 5cd69a23c2..97be23ae67 100644 --- a/npm/pkg/dataplane/dataplane_windows_test.go +++ b/npm/pkg/dataplane/dataplane_windows_test.go @@ -10,6 +10,8 @@ import ( "github.com/Azure/azure-container-networking/npm/metrics" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ipsets" dptestutils "github.com/Azure/azure-container-networking/npm/pkg/dataplane/testutils" + "github.com/Microsoft/hcsshim/hcn" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -26,9 +28,14 @@ func TestMetrics(t *testing.T) { hns := ipsets.GetHNSFake(t, cfg.NetworkName) hns.Delay = defaultHNSLatency io := common.NewMockIOShimWithFakeHNS(hns) - dp, err := NewDataPlane(thisNode, io, cfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane(thisNode, io, cfg, stopCh) require.NoError(t, err, "failed to initialize dp") require.NotNil(t, dp, "failed to initialize dp (nil)") + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() count, err := metrics.TotalGetNetworkLatencyCalls() require.Nil(t, err, "failed to get metric") @@ -40,14 +47,14 @@ func TestMetrics(t *testing.T) { count, err = metrics.TotalListEndpointsLatencyCalls() require.Nil(t, err, "failed to get metric") - require.Equal(t, 1, count, "should have listed endpoints once") + require.Equal(t, 2, count, "should have listed endpoints twice") err = dp.refreshPodEndpoints() require.Nil(t, err, "failed to refresh pod endpoints") count, err = metrics.TotalListEndpointsLatencyCalls() require.Nil(t, err, "failed to get metric") - require.Equal(t, 2, count, "should have listed endpoints twice") + require.Equal(t, 4, count, "should have listed endpoints four times") count, err = metrics.TotalListEndpointsFailures() require.Nil(t, err, "failed to get metric") @@ -86,6 +93,68 @@ func TestMultiJobApplyInBackground(t *testing.T) { testMultiJobCases(t, multiJobApplyInBackgroundTests(), time.Duration(1*time.Second)) } +func TestRemoveCommonEndpoints(t *testing.T) { + tests := []struct { + name string + endpoints []hcn.HostComputeEndpoint + endpointsAttached []hcn.HostComputeEndpoint + expected []hcn.HostComputeEndpoint + }{ + { + name: "1 value same", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "123456"}, {Id: "560971"}}, + endpointsAttached: []hcn.HostComputeEndpoint{{Id: "567890"}, {Id: "123456"}, {Id: "789012"}}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "123456"}, {Id: "560971"}, {Id: "567890"}, {Id: "789012"}}, + }, + { + name: "no values same", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "560971"}}, + endpointsAttached: []hcn.HostComputeEndpoint{{Id: "567890"}, {Id: "789012"}}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "560971"}, {Id: "567890"}, {Id: "789012"}}, + }, + { + name: "1 value same", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "123456"}, {Id: "560971"}}, + endpointsAttached: []hcn.HostComputeEndpoint{{Id: "567890"}, {Id: "123456"}, {Id: "789012"}}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "123456"}, {Id: "560971"}, {Id: "567890"}, {Id: "789012"}}, + }, + { + name: "two values same", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "560971"}, {Id: "123456"}, {Id: "789012"}}, + endpointsAttached: []hcn.HostComputeEndpoint{{Id: "567890"}, {Id: "789012"}, {Id: "123456"}}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}, {Id: "560971"}, {Id: "123456"}, {Id: "789012"}, {Id: "567890"}}, + }, + { + name: "no values", + endpoints: []hcn.HostComputeEndpoint{}, + endpointsAttached: []hcn.HostComputeEndpoint{}, + expected: []hcn.HostComputeEndpoint{}, + }, + { + name: "1 value - same", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}}, + endpointsAttached: []hcn.HostComputeEndpoint{{Id: "456901"}}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}}, + }, + { + name: "1 value - different", + endpoints: []hcn.HostComputeEndpoint{{Id: "456901"}}, + endpointsAttached: []hcn.HostComputeEndpoint{}, + expected: []hcn.HostComputeEndpoint{{Id: "456901"}}, + }, + } + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + result := GetUniqueEndpoints(tt.endpoints, tt.endpointsAttached) + if !cmp.Equal(tt.expected, result) { + t.Errorf("Test %s failed: expected %v, got %v", tt.name, tt.expected, result) + } + }) + } +} + func testSerialCases(t *testing.T, tests []*SerialTestCase, finalSleep time.Duration) { for i, tt := range tests { i := i @@ -102,9 +171,14 @@ func testSerialCases(t *testing.T, tests []*SerialTestCase, finalSleep time.Dura require.Nil(t, err, "failed to create initial endpoint %+v", ep) } - dp, err := NewDataPlane(thisNode, io, tt.DpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane(thisNode, io, tt.DpCfg, stopCh) require.NoError(t, err, "failed to initialize dp") require.NotNil(t, dp, "failed to initialize dp (nil)") + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() dp.RunPeriodicTasks() @@ -142,8 +216,13 @@ func testMultiJobCases(t *testing.T, tests []*MultiJobTestCase, finalSleep time. } // the dp is necessary for NPM tests - dp, err := NewDataPlane(thisNode, io, tt.DpCfg, nil) + stopCh := make(chan struct{}, 1) + dp, err := NewDataPlane(thisNode, io, tt.DpCfg, stopCh) require.NoError(t, err, "failed to initialize dp") + defer func() { + stopCh <- struct{}{} + time.Sleep(100 * time.Millisecond) + }() dp.RunPeriodicTasks() diff --git a/npm/pkg/dataplane/ipsets/ipsetmanager.go b/npm/pkg/dataplane/ipsets/ipsetmanager.go index dc80dfae56..8f85810000 100644 --- a/npm/pkg/dataplane/ipsets/ipsetmanager.go +++ b/npm/pkg/dataplane/ipsets/ipsetmanager.go @@ -51,6 +51,9 @@ type IPSetManager struct { setMap map[string]*IPSet dirtyCache dirtyCacheInterface ioShim *common.IOShim + // consecutiveApplyFailures is used in Linux to count the number of consecutive failures to apply ipsets + // if this count exceeds a threshold, we will panic + consecutiveApplyFailures int sync.RWMutex } @@ -71,6 +74,8 @@ func NewIPSetManager(iMgrCfg *IPSetManagerCfg, ioShim *common.IOShim) *IPSetMana setMap: make(map[string]*IPSet), dirtyCache: newDirtyCache(), ioShim: ioShim, + // set to 0 to avoid lint error for windows + consecutiveApplyFailures: 0, } } @@ -88,7 +93,8 @@ func (iMgr *IPSetManager) Reconcile() { } numRemovedSets := originalNumSets - len(iMgr.setMap) if numRemovedSets > 0 { - klog.Infof("[IPSetManager] removed %d empty/unreferenced ipsets, updating toDeleteCache to: %+v", numRemovedSets, iMgr.dirtyCache.printDeleteCache()) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[IPSetManager] removed %d empty/unreferenced ipsets, updating toDeleteCache to: %+v", numRemovedSets, iMgr.dirtyCache.printDeleteCache()) } } @@ -303,10 +309,11 @@ func (iMgr *IPSetManager) RemoveFromSets(removeFromSets []*IPSetMetadata, ip, po } // in case the IP belongs to a new Pod, then ignore this Delete call as this might be stale if cachedPodKey != podKey { - klog.Infof( - "[IPSetManager] DeleteFromSet: PodOwner has changed for Ip: %s, setName:%s, Old podKey: %s, new podKey: %s. Ignore the delete as this is stale update", - ip, prefixedName, cachedPodKey, podKey, - ) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof( + // "[IPSetManager] DeleteFromSet: PodOwner has changed for Ip: %s, setName:%s, Old podKey: %s, new podKey: %s. Ignore the delete as this is stale update", + // ip, prefixedName, cachedPodKey, podKey, + // ) continue } @@ -448,14 +455,16 @@ func (iMgr *IPSetManager) ApplyIPSets() error { defer iMgr.Unlock() if iMgr.dirtyCache.numSetsToAddOrUpdate() == 0 && iMgr.dirtyCache.numSetsToDelete() == 0 { - klog.Info("[IPSetManager] No IPSets to apply") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Info("[IPSetManager] No IPSets to apply") return nil } - klog.Infof( - "[IPSetManager] dirty caches. toAddUpdateCache: %s, toDeleteCache: %s", - iMgr.dirtyCache.printAddOrUpdateCache(), iMgr.dirtyCache.printDeleteCache(), - ) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof( + // "[IPSetManager] dirty caches. toAddUpdateCache: %s, toDeleteCache: %s", + // iMgr.dirtyCache.printAddOrUpdateCache(), iMgr.dirtyCache.printDeleteCache(), + // ) iMgr.sanitizeDirtyCache() // Call the appropriate apply ipsets diff --git a/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go b/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go index 8a7a8adbf0..4c57cfa20a 100644 --- a/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go +++ b/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go @@ -54,6 +54,8 @@ const ( destroySectionPrefix = "delete" addOrUpdateSectionPrefix = "add/update" ipsetRestoreLineFailurePattern = "Error in line (\\d+):" + + maxConsecutiveFailures = 100 ) var ( @@ -65,36 +67,38 @@ var ( ) /* - based on ipset list output with azure-npm- prefix, create an ipset restore file where we flush all sets first, then destroy all sets - - NOTE: the behavior has changed to run two separate restore files. The first to flush all, the second to destroy all. In between restores, - we determine if there are any sets with leaked ipset reference counts. We ignore destroys for those sets in-line with v1. - - overall error handling: - - if flush fails because the set doesn't exist (should never happen because we're listing sets right before), then ignore it and the destroy - - if flush fails otherwise, then add to destroyFailureCount and continue (aborting the destroy too) - - if destroy fails because the set doesn't exist (should never happen since the flush operation would have worked), then ignore it - - if destroy fails for another reason, then ignore it and add to destroyFailureCount and mark for reconcile (TODO) - - example: - grep output: - azure-npm-123456 - azure-npm-987654 - azure-npm-777777 - - example restore file [flag meanings: -F (flush), -X (destroy)]: - -F azure-npm-123456 - -F azure-npm-987654 - -F azure-npm-777777 - -X azure-npm-123456 - -X azure-npm-987654 - -X azure-npm-777777 - - prometheus metrics: - After this function, NumIPSets should be 0 or the number of NPM IPSets that existed and failed to be destroyed. - When NPM restarts, Prometheus metrics will initialize at 0, but NPM IPSets may exist. - We will reset ipset entry metrics if the restore succeeds whether or not some flushes/destroys failed (NOTE: this is different behavior than v1). - If a flush fails, we could update the num entries for that set, but that would be a lot of overhead. +based on ipset list output with azure-npm- prefix, create an ipset restore file where we flush all sets first, then destroy all sets + +NOTE: the behavior has changed to run two separate restore files. The first to flush all, the second to destroy all. In between restores, +we determine if there are any sets with leaked ipset reference counts. We ignore destroys for those sets in-line with v1. + +overall error handling: +- if flush fails because the set doesn't exist (should never happen because we're listing sets right before), then ignore it and the destroy +- if flush fails otherwise, then add to destroyFailureCount and continue (aborting the destroy too) +- if destroy fails because the set doesn't exist (should never happen since the flush operation would have worked), then ignore it +- if destroy fails for another reason, then ignore it and add to destroyFailureCount and mark for reconcile (TODO) + +example: + + grep output: + azure-npm-123456 + azure-npm-987654 + azure-npm-777777 + + example restore file [flag meanings: -F (flush), -X (destroy)]: + -F azure-npm-123456 + -F azure-npm-987654 + -F azure-npm-777777 + -X azure-npm-123456 + -X azure-npm-987654 + -X azure-npm-777777 + +prometheus metrics: + + After this function, NumIPSets should be 0 or the number of NPM IPSets that existed and failed to be destroyed. + When NPM restarts, Prometheus metrics will initialize at 0, but NPM IPSets may exist. + We will reset ipset entry metrics if the restore succeeds whether or not some flushes/destroys failed (NOTE: this is different behavior than v1). + If a flush fails, we could update the num entries for that set, but that would be a lot of overhead. */ func (iMgr *IPSetManager) resetIPSets() error { if success := iMgr.resetWithoutRestore(); success { @@ -104,7 +108,8 @@ func (iMgr *IPSetManager) resetIPSets() error { // get current NPM ipsets listNamesCommand := iMgr.ioShim.Exec.Command(ipsetCommand, ipsetListFlag, ipsetNameFlag) grepCommand := iMgr.ioShim.Exec.Command(ioutil.Grep, azureNPMPrefix) - klog.Infof("running this command while resetting ipsets: [%s %s %s | %s %s]", ipsetCommand, ipsetListFlag, ipsetNameFlag, ioutil.Grep, azureNPMRegex) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("running this command while resetting ipsets: [%s %s %s | %s %s]", ipsetCommand, ipsetListFlag, ipsetNameFlag, ioutil.Grep, azureNPMRegex) azureIPSets, haveAzureNPMIPSets, commandError := ioutil.PipeCommandToGrep(listNamesCommand, grepCommand) if commandError != nil { return npmerrors.SimpleErrorWrapper("failed to run ipset list for resetting IPSets (prometheus metrics may be off now)", commandError) @@ -141,8 +146,9 @@ func (iMgr *IPSetManager) resetIPSets() error { func (iMgr *IPSetManager) resetWithoutRestore() bool { listNamesCommand := iMgr.ioShim.Exec.Command(ipsetCommand, ipsetListFlag, ipsetNameFlag) grepCommand := iMgr.ioShim.Exec.Command(ioutil.Grep, ioutil.GrepQuietFlag, ioutil.GrepAntiMatchFlag, azureNPMPrefix) - commandString := fmt.Sprintf(" [%s %s %s | %s %s %s %s]", ipsetCommand, ipsetListFlag, ipsetNameFlag, ioutil.Grep, ioutil.GrepQuietFlag, ioutil.GrepAntiMatchFlag, azureNPMPrefix) - klog.Infof("running this command while resetting ipsets: [%s]", commandString) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // commandString := fmt.Sprintf(" [%s %s %s | %s %s %s %s]", ipsetCommand, ipsetListFlag, ipsetNameFlag, ioutil.Grep, ioutil.GrepQuietFlag, ioutil.GrepAntiMatchFlag, azureNPMPrefix) + // klog.Infof("running this command while resetting ipsets: [%s]", commandString) _, haveNonAzureNPMIPSets, commandError := ioutil.PipeCommandToGrep(listNamesCommand, grepCommand) if commandError != nil { metrics.SendErrorLogAndMetric(util.IpsmID, "failed to determine if there were non-azure sets while resetting. err: %v", commandError) @@ -153,7 +159,8 @@ func (iMgr *IPSetManager) resetWithoutRestore() bool { } flushAndDestroy := iMgr.ioShim.Exec.Command(util.BashCommand, util.BashCommandFlag, ipsetFlushAndDestroyString) - klog.Infof("running this command while resetting ipsets: [%s %s '%s']", util.BashCommand, util.BashCommandFlag, ipsetFlushAndDestroyString) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("running this command while resetting ipsets: [%s %s '%s']", util.BashCommand, util.BashCommandFlag, ipsetFlushAndDestroyString) output, err := flushAndDestroy.CombinedOutput() if err != nil { exitCode := -1 @@ -219,16 +226,24 @@ func (iMgr *IPSetManager) setsWithReferences() map[string]struct{} { listAllCommand := iMgr.ioShim.Exec.Command(ipsetCommand, ipsetListFlag) grep1 := iMgr.ioShim.Exec.Command(ioutil.Grep, ioutil.GrepBeforeFlag, referenceGrepLookBack, ioutil.GrepRegexFlag, positiveRefsRegex) grep2 := iMgr.ioShim.Exec.Command(ioutil.Grep, ioutil.GrepOnlyMatchingFlag, ioutil.GrepRegexFlag, azureNPMRegex) - klog.Infof("running this command while resetting ipsets: [%s %s | %s %s %s %s %s | %s %s %s %s]", ipsetCommand, ipsetListFlag, - ioutil.Grep, ioutil.GrepBeforeFlag, referenceGrepLookBack, ioutil.GrepRegexFlag, positiveRefsRegex, - ioutil.Grep, ioutil.GrepOnlyMatchingFlag, ioutil.GrepRegexFlag, azureNPMRegex) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("running this command while resetting ipsets: [%s %s | %s %s %s %s %s | %s %s %s %s]", ipsetCommand, ipsetListFlag, + // ioutil.Grep, ioutil.GrepBeforeFlag, referenceGrepLookBack, ioutil.GrepRegexFlag, positiveRefsRegex, + // ioutil.Grep, ioutil.GrepOnlyMatchingFlag, ioutil.GrepRegexFlag, azureNPMRegex) setsWithReferencesBytes, haveRefsStill, err := ioutil.DoublePipeToGrep(listAllCommand, grep1, grep2) var setsWithReferences map[string]struct{} if haveRefsStill { setsWithReferences = readByteLinesToMap(setsWithReferencesBytes) + subset := make(map[string]struct{}, maxLinesToPrint) + for key := range setsWithReferences { + subset[key] = struct{}{} + if len(subset) >= maxLinesToPrint { + break + } + } metrics.SendErrorLogAndMetric(util.IpsmID, "error: found leaked reference counts in kernel. ipsets (max %d): %+v. err: %v", - maxLinesToPrint, setsWithReferences, err) + maxLinesToPrint, subset, err) } return setsWithReferences @@ -376,6 +391,7 @@ func (iMgr *IPSetManager) applyIPSetsWithSaveFile() error { See error handling in applyIPSetsWithSaveFile(). overall format for ipset restore file: + [creates] (random order) [deletes and adds] (sets in random order, where each set has deletes first (random order), then adds (random order)) [flushes] (random order) @@ -408,8 +424,20 @@ func (iMgr *IPSetManager) applyIPSets() error { creator := iMgr.fileCreatorForApply(maxTryCount) restoreError := creator.RunCommandWithFile(ipsetCommand, ipsetRestoreFlag) if restoreError != nil { + iMgr.consecutiveApplyFailures++ + if iMgr.consecutiveApplyFailures >= maxConsecutiveFailures { + msg := fmt.Sprintf("exceeded max consecutive failures (%d) when applying ipsets. final error: %s", maxConsecutiveFailures, restoreError.Error()) + klog.Error(msg) + metrics.SendErrorLogAndMetric(util.IpsmID, msg) + metrics.Close() + panic(msg) + } + return npmerrors.SimpleErrorWrapper("ipset restore failed when applying ipsets", restoreError) } + + iMgr.consecutiveApplyFailures = 0 + return nil } @@ -834,9 +862,6 @@ func readByteLinesToMap(output []byte) map[string]struct{} { line, readIndex = parse.Line(readIndex, output) hashedSetName := strings.Trim(string(line), "\n") lines[hashedSetName] = struct{}{} - if len(lines) > maxLinesToPrint { - break - } } return lines } diff --git a/npm/pkg/dataplane/ipsets/ipsetmanager_windows_test.go b/npm/pkg/dataplane/ipsets/ipsetmanager_windows_test.go index 252304cc8d..39f938d1ed 100644 --- a/npm/pkg/dataplane/ipsets/ipsetmanager_windows_test.go +++ b/npm/pkg/dataplane/ipsets/ipsetmanager_windows_test.go @@ -1,7 +1,8 @@ package ipsets import ( - "fmt" + "sort" + "strings" "testing" "github.com/Azure/azure-container-networking/common" @@ -375,11 +376,25 @@ func TestFailureOnDeletion(t *testing.T) { verifyDeletedHNSCache(t, toDeleteSetNames, hns) } +// sorts the Values field of the hcn set policy setting and returns a copy with sorted values +func getSortedHnsPolicySetting(setting *hcn.SetPolicySetting) hcn.SetPolicySetting { + members := strings.Split(setting.Values, ",") + sort.Strings(members) + copyOfSetting := *setting + copyOfSetting.Values = strings.Join(members, ",") + return copyOfSetting +} + func verifyHNSCache(t *testing.T, expected map[string]hcn.SetPolicySetting, hns *hnswrapper.Hnsv2wrapperFake) { for setName, setObj := range expected { cacheObj := hns.Cache.SetPolicy(setObj.Id) require.NotNil(t, cacheObj) - require.Equal(t, setObj, *cacheObj, fmt.Sprintf("%s mismatch in cache", setName)) + + // make values always sorted for testing consistency + copyOfCachedObj := getSortedHnsPolicySetting(cacheObj) + copyOfExpectedObj := getSortedHnsPolicySetting(&setObj) + + require.Equal(t, copyOfExpectedObj, copyOfCachedObj, setName+" mismatch in cache") } } diff --git a/npm/pkg/dataplane/ipsets/testutils_linux.go b/npm/pkg/dataplane/ipsets/testutils_linux.go index 94a59f4f45..3f0ef0117d 100644 --- a/npm/pkg/dataplane/ipsets/testutils_linux.go +++ b/npm/pkg/dataplane/ipsets/testutils_linux.go @@ -11,6 +11,12 @@ var ( Stdout: "success", ExitCode: 0, } + + fakeRestoreFailureCommand = testutils.TestCmd{ + Cmd: ipsetRestoreStringSlice, + Stdout: "failure", + ExitCode: 1, + } ) func GetApplyIPSetsTestCalls(toAddOrUpdateIPSets, toDeleteIPSets []*IPSetMetadata) []testutils.TestCmd { @@ -20,6 +26,16 @@ func GetApplyIPSetsTestCalls(toAddOrUpdateIPSets, toDeleteIPSets []*IPSetMetadat return []testutils.TestCmd{fakeRestoreSuccessCommand} } +func GetApplyIPSetsFailureTestCalls() []testutils.TestCmd { + return []testutils.TestCmd{ + fakeRestoreFailureCommand, + fakeRestoreFailureCommand, + fakeRestoreFailureCommand, + fakeRestoreFailureCommand, + fakeRestoreFailureCommand, + } +} + func GetResetTestCalls() []testutils.TestCmd { return []testutils.TestCmd{ {Cmd: []string{"ipset", "list", "--name"}, PipedToCommand: true}, diff --git a/npm/pkg/dataplane/ipsets/testutils_windows.go b/npm/pkg/dataplane/ipsets/testutils_windows.go index f9235beffe..1a14d4667a 100644 --- a/npm/pkg/dataplane/ipsets/testutils_windows.go +++ b/npm/pkg/dataplane/ipsets/testutils_windows.go @@ -27,6 +27,10 @@ func GetApplyIPSetsTestCalls(_, _ []*IPSetMetadata) []testutils.TestCmd { return []testutils.TestCmd{} } +func GetApplyIPSetsFailureTestCalls() []testutils.TestCmd { + return []testutils.TestCmd{} +} + func GetResetTestCalls() []testutils.TestCmd { return []testutils.TestCmd{} } diff --git a/npm/pkg/dataplane/parse/parser_test.go b/npm/pkg/dataplane/parse/parser_test.go index a5d880e644..daa58c2a42 100644 --- a/npm/pkg/dataplane/parse/parser_test.go +++ b/npm/pkg/dataplane/parse/parser_test.go @@ -30,7 +30,7 @@ func TestParseIptablesObjectFileV2(t *testing.T) { func TestParseIptablesObject(t *testing.T) { calls := []testutils.TestCmd{ - {Cmd: []string{"iptables-save", "-t", "filter"}}, + {Cmd: []string{"iptables-nft-save", "-t", "filter"}}, } parser := IPTablesParser{ diff --git a/npm/pkg/dataplane/policies/chain-management_linux.go b/npm/pkg/dataplane/policies/chain-management_linux.go index 0ad7c0ae7d..734443929a 100644 --- a/npm/pkg/dataplane/policies/chain-management_linux.go +++ b/npm/pkg/dataplane/policies/chain-management_linux.go @@ -86,6 +86,11 @@ var ( util.IptablesJumpFlag, util.IptablesAzureChain, } + + listHintChainArgs = []string{"KUBE-IPTABLES-HINT", util.IptablesTableFlag, util.IptablesMangleTable, util.IptablesNumericFlag} + listCanaryChainArgs = []string{"KUBE-KUBELET-CANARY", util.IptablesTableFlag, util.IptablesMangleTable, util.IptablesNumericFlag} + + errDetectingIptablesVersion = errors.New("unable to locate which iptables version kube proxy is using") ) type exitErrorInfo struct { @@ -162,6 +167,8 @@ func isBaseChain(chain string) bool { Called once at startup. Like the rest of PolicyManager, minimizes the number of OS calls by consolidating all possible actions into one iptables-restore call. +0.1. Detect iptables version. +0.2. Clean up legacy tables if using nft and vice versa. 1. Delete the deprecated jump from FORWARD to AZURE-NPM chain (if it exists). 2. Cleanup old NPM chains, and configure base chains and their rules. 1. Do the following via iptables-restore --noflush: @@ -181,87 +188,29 @@ TODO: could use one grep call instead of separate calls for getting jump line nu func (pMgr *PolicyManager) bootup(_ []string) error { klog.Infof("booting up iptables Azure chains") + // 0.1. Detect iptables version + if err := pMgr.detectIptablesVersion(); err != nil { + return npmerrors.SimpleErrorWrapper("failed to detect iptables version", err) + } + // Stop reconciling so we don't contend for iptables, and so we don't update the staleChains at the same time as reconcile() // Reconciling would only be happening if this function were called to reset iptables well into the azure-npm pod lifecycle. pMgr.reconcileManager.forceLock() defer pMgr.reconcileManager.forceUnlock() - if strings.Contains(util.Iptables, "nft") { - klog.Info("detected nft iptables. cleaning up legacy iptables") - util.Iptables = util.IptablesLegacy - util.IptablesSave = util.IptablesSaveLegacy - util.IptablesRestore = util.IptablesRestoreLegacy - - // 0. delete the deprecated jump to deprecated AZURE-NPM in legacy iptables - deprecatedErrCode, deprecatedErr := pMgr.ignoreErrorsAndRunIPTablesCommand(removeDeprecatedJumpIgnoredErrors, util.IptablesDeletionFlag, deprecatedJumpFromForwardToAzureChainArgs...) - if deprecatedErrCode == 0 { - klog.Infof("deleted deprecated jump rule from FORWARD chain to AZURE-NPM chain") - } else if deprecatedErr != nil { - metrics.SendErrorLogAndMetric(util.IptmID, - "failed to delete deprecated jump rule from FORWARD chain to AZURE-NPM chain for unexpected reason with exit code %d and error: %s", - deprecatedErrCode, deprecatedErr.Error()) - } - - // 0. delete the deprecated jump to current AZURE-NPM in legacy iptables - deprecatedErrCode, deprecatedErr = pMgr.ignoreErrorsAndRunIPTablesCommand(removeDeprecatedJumpIgnoredErrors, util.IptablesDeletionFlag, jumpFromForwardToAzureChainArgs...) - if deprecatedErrCode == 0 { - klog.Infof("deleted deprecated jump rule from FORWARD chain to AZURE-NPM chain") - } else if deprecatedErr != nil { - metrics.SendErrorLogAndMetric(util.IptmID, - "failed to delete deprecated jump rule from FORWARD chain to AZURE-NPM chain for unexpected reason with exit code %d and error: %s", - deprecatedErrCode, deprecatedErr.Error()) - } - - // clean up current chains in legacy iptables - currentChains, err := ioutil.AllCurrentAzureChains(pMgr.ioShim.Exec, util.IptablesDefaultWaitTime) - if err != nil { - return npmerrors.SimpleErrorWrapper("failed to get current chains for bootup", err) - } - - // We have only one chance to clean existing legacy iptables chains. - // So flush all the chains and then destroy them - var aggregateError error - for chain := range currentChains { - errCode, err := pMgr.runIPTablesCommand(util.IptablesFlushFlag, chain) - if err != nil && errCode != doesNotExistErrorCode { - // add to staleChains if it's not one of the iptablesAzureChains - pMgr.staleChains.add(chain) - currentErrString := fmt.Sprintf("failed to flush chain %s with err [%v]", chain, err) - if aggregateError == nil { - aggregateError = npmerrors.SimpleError(currentErrString) - } else { - aggregateError = npmerrors.SimpleErrorWrapper(fmt.Sprintf("%s and had previous error", currentErrString), aggregateError) - } - } - } - - for chain := range currentChains { - errCode, err := pMgr.runIPTablesCommand(util.IptablesDestroyFlag, chain) - if err != nil && errCode != doesNotExistErrorCode { - // add to staleChains if it's not one of the iptablesAzureChains - pMgr.staleChains.add(chain) - currentErrString := fmt.Sprintf("failed to delete chain %s with err [%v]", chain, err) - if aggregateError == nil { - aggregateError = npmerrors.SimpleError(currentErrString) - } else { - aggregateError = npmerrors.SimpleErrorWrapper(fmt.Sprintf("%s and had previous error", currentErrString), aggregateError) - } - } - } - - if aggregateError != nil { - metrics.SendErrorLogAndMetric(util.IptmID, - "failed to flush and delete stale chain in legacy iptables with error: %s", - aggregateError.Error()) - } + // 0.2. cleanup + if err := pMgr.cleanupOtherIptables(); err != nil { + return npmerrors.SimpleErrorWrapper("failed to cleanup other iptables chains", err) + } - util.Iptables = util.IptablesNft - util.IptablesSave = util.IptablesSaveNft - util.IptablesRestore = util.IptablesRestoreNft + if err := pMgr.bootupAfterDetectAndCleanup(); err != nil { + return err } - klog.Info("cleaning up default iptables") + return nil +} +func (pMgr *PolicyManager) bootupAfterDetectAndCleanup() error { // 1. delete the deprecated jump to AZURE-NPM deprecatedErrCode, deprecatedErr := pMgr.ignoreErrorsAndRunIPTablesCommand(removeDeprecatedJumpIgnoredErrors, util.IptablesDeletionFlag, deprecatedJumpFromForwardToAzureChainArgs...) if deprecatedErrCode == 0 { @@ -294,6 +243,214 @@ func (pMgr *PolicyManager) bootup(_ []string) error { return nil } +// detectIptablesVersion sets the global iptables variable to nft if detected or legacy if detected. +// NPM will crash if it fails to detect either. +// This global variable is referenced in all iptables related functions. +// NPM should use the same iptables version as kube-proxy. +// kube-proxy creates an iptables chain as a hint for which version it uses. +// For more details, see: https://kubernetes.io/blog/2022/09/07/iptables-chains-not-api/#use-case-iptables-mode +func (pMgr *PolicyManager) detectIptablesVersion() error { + klog.Info("first attempt detecting iptables version. looking for hint/canary chain in iptables-nft") + if pMgr.hintOrCanaryChainExist(util.IptablesNft) { + util.SetIptablesToNft() + return nil + } + + klog.Info("second attempt detecting iptables version. looking for hint/canary chain in iptables-legacy") + if pMgr.hintOrCanaryChainExist(util.IptablesLegacy) { + util.SetIptablesToLegacy() + return nil + } + + return errDetectingIptablesVersion +} + +func (pMgr *PolicyManager) hintOrCanaryChainExist(iptablesCmd string) bool { + // hint chain should exist since k8s 1.24 (see https://kubernetes.io/blog/2022/09/07/iptables-chains-not-api/#use-case-iptables-mode) + prevIptables := util.Iptables + util.Iptables = iptablesCmd + defer func() { + util.Iptables = prevIptables + }() + + _, hintErr := pMgr.runIPTablesCommand(util.IptablesListFlag, listHintChainArgs...) + if hintErr == nil { + metrics.SendLog(util.IptmID, "found hint chain. will use iptables version: %s"+iptablesCmd, metrics.DonotPrint) + return true + } + + // check for canary chain + _, canaryErr := pMgr.runIPTablesCommand(util.IptablesListFlag, listCanaryChainArgs...) + if canaryErr != nil { + return false + } + + metrics.SendLog(util.IptmID, "found canary chain. will use iptables version: "+iptablesCmd, metrics.DonotPrint) + return true +} + +// clenaupOtherIptablesChains cleans up legacy tables if using nft and vice versa. +// It will only return an error if it fails to delete a jump rule and flush the AZURE-NPM chain (see comment about #3088 below). +// Cleanup logic: +// 1. delete jump rules to AZURE-NPM +// 2. flush all chains +// 3. delete all chains +func (pMgr *PolicyManager) cleanupOtherIptables() error { + hadNFT := util.Iptables == util.IptablesNft + if hadNFT { + klog.Info("detected nft iptables. cleaning up legacy iptables") + util.SetIptablesToLegacy() + } else { + klog.Info("detected legacy iptables. cleaning up nft iptables") + util.SetIptablesToNft() + } + + defer func() { + if hadNFT { + klog.Info("cleaned up legacy iptables") + util.SetIptablesToNft() + } else { + klog.Info("cleaned up nft tables") + util.SetIptablesToLegacy() + } + }() + + deletedJumpRule := false + + // 1.1. delete the deprecated jump to AZURE-NPM + errCode, err := pMgr.ignoreErrorsAndRunIPTablesCommand(removeDeprecatedJumpIgnoredErrors, util.IptablesDeletionFlag, deprecatedJumpFromForwardToAzureChainArgs...) + if errCode == 0 { + klog.Infof("[cleanup] deleted deprecated jump rule from FORWARD chain to AZURE-NPM chain") + deletedJumpRule = true + } else if err != nil { + metrics.SendErrorLogAndMetric(util.IptmID, + "[cleanup] failed to delete deprecated jump rule from FORWARD chain to AZURE-NPM chain for unexpected reason with exit code %d and error: %s", + errCode, err.Error()) + } + + // 1.2. delete the jump to AZURE-NPM + errCode, err = pMgr.ignoreErrorsAndRunIPTablesCommand(removeDeprecatedJumpIgnoredErrors, util.IptablesDeletionFlag, jumpFromForwardToAzureChainArgs...) + if errCode == 0 { + deletedJumpRule = true + klog.Infof("[cleanup] deleted jump rule from FORWARD chain to AZURE-NPM chain") + } else if err != nil { + metrics.SendErrorLogAndMetric(util.IptmID, + "[cleanup] failed to delete jump rule from FORWARD chain to AZURE-NPM chain for unexpected reason with exit code %d and error: %s", + errCode, err.Error()) + } + + // 2. get current chains + currentChains, err := ioutil.AllCurrentAzureChains(pMgr.ioShim.Exec, util.IptablesDefaultWaitTime) + if err != nil { + return npmerrors.SimpleErrorWrapper("[cleanup] failed to get current chains for bootup", err) + } + + if len(currentChains) == 0 { + klog.Info("no chains to cleanup") + return nil + } + + klog.Infof("[cleanup] %d chains to clean up", len(currentChains)) + + // 3.1. try to flush all chains at once + chains := make([]string, 0, len(currentChains)) + _, hasAzureChain := currentChains[util.IptablesAzureChain] + if hasAzureChain { + // putting AZURE-NPM chain first is required for proper unit testing (for determinancy in destroying chains) + chains = append(chains, util.IptablesAzureChain) + } + for chain := range currentChains { + if chain == util.IptablesAzureChain { + // putting AZURE-NPM chain first is required for proper unit testing (for determinancy in destroying chains) + continue + } + chains = append(chains, chain) + } + + creator := pMgr.creatorForCleanup(chains) + if err := restore(creator); err != nil { + msg := "[cleanup] failed to flush all chains with error: %s" + klog.Infof(msg, err.Error()) + metrics.SendErrorLogAndMetric(util.IptmID, msg, err.Error()) + + // 3.2. if we failed to flush all chains, then try to flush and delete them one by one + var aggregateError error + if _, ok := currentChains[util.IptablesAzureChain]; ok { + _, err := pMgr.runIPTablesCommand(util.IptablesFlushFlag, util.IptablesAzureChain) + aggregateError = err + if err != nil && !deletedJumpRule { + // fixes #3088 + // if we failed to delete a jump rule to AZURE-NPM and we failed to flush AZURE-NPM chain, + // then there is risk that there is a jump rule to AZURE-NPM, which in turn has rules which could lead to allowing or dropping a packet. + // We have failed to cleanup the other iptables rules, and there is no guarantee that packets will be processed correctly now. + // So we must crash and retry. + return npmerrors.SimpleErrorWrapper("[cleanup] must crash and retry. failed to delete jump rule and flush AZURE-NPM chain with error", err) + } + } + + for chain := range currentChains { + if chain == util.IptablesAzureChain { + // already flushed above + continue + } + + errCode, err := pMgr.runIPTablesCommand(util.IptablesFlushFlag, chain) + if err != nil && errCode != doesNotExistErrorCode { + // NOTE: if we fail to flush or delete the chain, then we will never clean it up in the future. + // This is zero-day behavior since NPM supported nft (we used to mark the chain stale, but this would not have worked as expected). + // NPM currently has no mechanism for retrying flush/delete for a chain from the other iptables version (other than the AZURE-NPM chain which is handled above). + currentErrString := fmt.Sprintf("failed to flush chain %s with err [%v]", chain, err) + if aggregateError == nil { + aggregateError = npmerrors.SimpleError(currentErrString) + } else { + aggregateError = npmerrors.SimpleErrorWrapper(currentErrString+" and had previous error", aggregateError) + } + } + } + + if aggregateError != nil { + metrics.SendErrorLogAndMetric(util.IptmID, + "[cleanup] benign failure to flush chains with error: %s", + aggregateError.Error()) + } + } + + // 4. delete all chains + var aggregateError error + for _, chain := range chains { + errCode, err := pMgr.runIPTablesCommand(util.IptablesDestroyFlag, chain) + if err != nil && errCode != doesNotExistErrorCode { + // NOTE: if we fail to flush or delete the chain, then we will never clean it up in the future. + // This is zero-day behavior since NPM supported nft (we used to mark the chain stale, but this would not have worked as expected). + // NPM currently has no mechanism for retrying flush/delete for a chain from the other iptables version (other than the AZURE-NPM chain which is handled above). + currentErrString := fmt.Sprintf("failed to delete chain %s with err [%v]", chain, err) + if aggregateError == nil { + aggregateError = npmerrors.SimpleError(currentErrString) + } else { + aggregateError = npmerrors.SimpleErrorWrapper(currentErrString+" and had previous error", aggregateError) + } + } + } + + if aggregateError != nil { + metrics.SendErrorLogAndMetric(util.IptmID, + "[cleanup] benign failure to delete chains with error: %s", + aggregateError.Error()) + } + + return nil +} + +func (pMgr *PolicyManager) creatorForCleanup(chains []string) *ioutil.FileCreator { + // pass nil because we don't need to add any lines like ":CHAIN-NAME - -" because that is for creating chains + creator := pMgr.newCreatorWithChains(nil) + for _, chain := range chains { + creator.AddLine("", nil, "-F "+chain) + } + creator.AddLine("", nil, util.IptablesRestoreCommit) + return creator +} + // reconcile does the following: // - creates the jump rule from FORWARD chain to AZURE-NPM chain (if it does not exist) and makes sure it's after the jumps to KUBE-FORWARD & KUBE-SERVICES chains (if they exist). // - cleans up stale policy chains. It can be forced to stop this process if reconcileManager.forceLock() is called. @@ -363,7 +520,7 @@ func (pMgr *PolicyManager) ignoreErrorsAndRunIPTablesCommand(ignored []*exitErro allArgs := []string{util.IptablesWaitFlag, util.IptablesDefaultWaitTime, operationFlag} allArgs = append(allArgs, args...) - klog.Infof("Executing iptables command with args %v", allArgs) + klog.Infof("executing iptables command [%s] with args %v", util.Iptables, allArgs) command := pMgr.ioShim.Exec.Command(util.Iptables, allArgs...) output, err := command.CombinedOutput() diff --git a/npm/pkg/dataplane/policies/chain-management_linux_test.go b/npm/pkg/dataplane/policies/chain-management_linux_test.go index 75fa6994b5..d78576950f 100644 --- a/npm/pkg/dataplane/policies/chain-management_linux_test.go +++ b/npm/pkg/dataplane/policies/chain-management_linux_test.go @@ -26,8 +26,7 @@ Chain AZURE-NPM-INGRESS (1 references) Chain AZURE-NPM-INGRESS-ALLOW-MARK (1 references) ` - grepOutputAzureV1Chains = `Chain AZURE-NPM -Chain AZURE-NPM (1 references) + grepOutputAzureV1Chains = `Chain AZURE-NPM (1 references) Chain AZURE-NPM-INGRESS (1 references) Chain AZURE-NPM-INGRESS-DROPS (1 references) Chain AZURE-NPM-INGRESS-TO (1 references) @@ -38,28 +37,12 @@ Chain AZURE-NPM-EGRESS-FROM (1 references) Chain AZURE-NPM-EGRESS-PORTS (1 references) Chain AZURE-NPM-ACCEPT (1 references) ` -) -// similar to TestBootup in policymanager.go except an error occurs -func TestBootupFailure(t *testing.T) { - metrics.ReinitializeAll() - calls := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true, HasStartError: true, ExitCode: 1}, - {Cmd: []string{"grep", "Chain AZURE-NPM"}}, - } - ioshim := common.NewMockIOShim(calls) - defer ioshim.VerifyCalls(t, calls) - pMgr := NewPolicyManager(ioshim, ipsetConfig) - - metrics.IncNumACLRules() - metrics.IncNumACLRules() - - require.Error(t, pMgr.Bootup(nil)) - - // make sure that the metrics were reset - promVals{0, 0}.testPrometheusMetrics(t) -} + // pMgr.cleanupOtherIptables() can't be tested deterministically for more than two chains + grepOutputTwoAzureChains = `Chain AZURE-NPM (1 references) +Chain AZURE-NPM-INGRESS (1 references) +` +) func TestStaleChainsForceLock(t *testing.T) { testChains := []string{} @@ -364,25 +347,36 @@ func TestBootupLinux(t *testing.T) { // all tests with "no NPM prior" work for any situation (with v1 or v2 prior), // but the fake command exit codes and stdouts are in line with having no NPM prior { - name: "success (no NPM prior)", - calls: GetBootupTestCalls(false), + name: "success (no NPM prior)", + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + ExitCode: 1, + }, + fakeIPTablesRestoreCommand, + {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, + {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + }, wantErr: false, }, { name: "success after restore failure (no NPM prior)", calls: []testutils.TestCmd{ { - Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2, Stdout: "iptables v1.8.4 (legacy): Couldn't load target `AZURE-NPM':No such file or directory", }, // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, {Cmd: []string{"grep", "Chain AZURE-NPM"}, ExitCode: 1}, fakeIPTablesRestoreFailureCommand, // e.g. xtables lock held by another app. Currently the stdout doesn't matter for retrying fakeIPTablesRestoreCommand, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, wantErr: false, }, @@ -390,11 +384,11 @@ func TestBootupLinux(t *testing.T) { name: "success: v2 existed prior", calls: []testutils.TestCmd{ { - Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 1, Stdout: "No chain/target/match by that name", }, // deprecated rule did not exist - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: grepOutputAzureChainsWithoutPolicies, @@ -402,15 +396,15 @@ func TestBootupLinux(t *testing.T) { fakeIPTablesRestoreCommand, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, wantErr: false, }, { name: "v1 existed prior: successfully delete deprecated jump", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, // deprecated rule existed - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, // deprecated rule existed + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: grepOutputAzureV1Chains, @@ -418,15 +412,15 @@ func TestBootupLinux(t *testing.T) { fakeIPTablesRestoreCommand, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, wantErr: false, }, { name: "v1 existed prior: unknown error while deleting deprecated jump", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 3}, // unknown error - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 3}, // unknown error + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: grepOutputAzureV1Chains, @@ -434,15 +428,15 @@ func TestBootupLinux(t *testing.T) { fakeIPTablesRestoreCommand, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, wantErr: false, }, { name: "failure while finding current chains (no NPM prior)", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true, HasStartError: true, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true, HasStartError: true, ExitCode: 1}, {Cmd: []string{"grep", "Chain AZURE-NPM"}}, }, wantErr: true, @@ -450,8 +444,8 @@ func TestBootupLinux(t *testing.T) { { name: "failure twice on restore (no NPM prior)", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, {Cmd: []string{"grep", "Chain AZURE-NPM"}, ExitCode: 1}, fakeIPTablesRestoreFailureCommand, fakeIPTablesRestoreFailureCommand, @@ -461,8 +455,8 @@ func TestBootupLinux(t *testing.T) { { name: "failure on position (no NPM prior)", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: grepOutputAzureChainsWithoutPolicies, @@ -471,7 +465,7 @@ func TestBootupLinux(t *testing.T) { {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, { - Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1, }, }, @@ -484,7 +478,7 @@ func TestBootupLinux(t *testing.T) { ioshim := common.NewMockIOShim(tt.calls) defer ioshim.VerifyCalls(t, tt.calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) - err := pMgr.bootup(nil) + err := pMgr.bootupAfterDetectAndCleanup() if tt.wantErr { require.Error(t, err) } else { @@ -506,7 +500,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { calls: []testutils.TestCmd{ {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, wantErr: false, @@ -516,7 +510,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { calls: []testutils.TestCmd{ {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, wantErr: true, @@ -550,8 +544,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "AZURE-NPM"}, Stdout: "2 AZURE-NPM all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, wantErr: false, @@ -564,7 +558,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "AZURE-NPM"}, Stdout: "2 AZURE-NPM all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, wantErr: true, @@ -577,8 +571,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "AZURE-NPM"}, Stdout: "2 AZURE-NPM all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainFirst, wantErr: true, @@ -590,7 +584,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "KUBE-SERVICES"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -605,7 +599,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -620,8 +614,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { }, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "KUBE-SERVICES"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -636,8 +630,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { }, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "KUBE-SERVICES"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -672,8 +666,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -691,8 +685,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: false, @@ -719,7 +713,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "4", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: true, @@ -737,7 +731,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: true, @@ -755,8 +749,8 @@ func TestPositionAzureChainJumpRule(t *testing.T) { Cmd: []string{"grep", "KUBE-SERVICES"}, Stdout: "3 KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 ...", }, - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "3", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 1}, }, placeAzureChainFirst: util.PlaceAzureChainAfterKubeServices, wantErr: true, @@ -772,6 +766,7 @@ func TestPositionAzureChainJumpRule(t *testing.T) { PlaceAzureChainFirst: tt.placeAzureChainFirst, } pMgr := NewPolicyManager(ioshim, cfg) + err := pMgr.positionAzureChainJumpRule() if tt.wantErr { require.Error(t, err) @@ -863,6 +858,7 @@ func TestChainLineNumber(t *testing.T) { ioshim := common.NewMockIOShim(tt.calls) defer ioshim.VerifyCalls(t, tt.calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) + lineNum, err := pMgr.chainLineNumber(testChainName) if tt.wantErr { require.Error(t, err) @@ -875,7 +871,7 @@ func TestChainLineNumber(t *testing.T) { } func getFakeDestroyCommand(chain string) testutils.TestCmd { - return testutils.TestCmd{Cmd: []string{"iptables", "-w", "60", "-X", chain}} + return testutils.TestCmd{Cmd: []string{"iptables-nft", "-w", "60", "-X", chain}} } func getFakeDestroyCommandWithExitCode(chain string, exitCode int) testutils.TestCmd { @@ -894,3 +890,514 @@ func stringsToMap(items []string) map[string]struct{} { } return m } + +func TestDetectIptablesVersion(t *testing.T) { + type args struct { + name string + calls []testutils.TestCmd + expectedIptablesVersion string + expectedErr bool + } + + tests := []args{ + { + name: "nft has hint chain", + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 0, + }, + }, + expectedIptablesVersion: util.IptablesNft, + }, + { + name: "nft has only canary chain", + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 0, + }, + }, + expectedIptablesVersion: util.IptablesNft, + }, + { + name: "legacy has hint chain", + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 0, + }, + }, + expectedIptablesVersion: util.IptablesLegacy, + }, + { + name: "no kube chains: error", + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 1, + }, + }, + expectedErr: true, + }, + { + name: "nft and legacy both fail: error", + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-L", "KUBE-KUBELET-CANARY", "-t", "mangle", "-n"}, + ExitCode: 2, + }, + }, + expectedErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + metrics.InitializeAll() + + ioshim := common.NewMockIOShim(tt.calls) + defer ioshim.VerifyCalls(t, tt.calls) + cfg := &PolicyManagerCfg{ + NodeIP: "6.7.8.9", + PolicyMode: IPSetPolicyMode, + PlaceAzureChainFirst: util.PlaceAzureChainFirst, + } + pMgr := NewPolicyManager(ioshim, cfg) + + err := pMgr.detectIptablesVersion() + if tt.expectedErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedIptablesVersion, util.Iptables) + } + }) + } +} + +func TestCleanupOtherChains(t *testing.T) { + type args struct { + name string + startWithNft bool + calls []testutils.TestCmd + expectedErr bool + } + + tests := []args{ + { + name: "cleanup legacy jump no chains", + startWithNft: true, + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, // deprecated rule existed + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + ExitCode: 1, + }, + }, + expectedErr: false, + }, + { + name: "cleanup legacy jump and chains", + startWithNft: true, + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, // deprecated rule existed + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + {Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, + }, + expectedErr: false, + }, + { + name: "cleanup legacy retry flushes", + startWithNft: true, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, + }, + expectedErr: false, + }, + { + name: "cleanup legacy error: delete/flush errors", + startWithNft: true, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM"}, + ExitCode: 1, + }, + }, + expectedErr: true, + }, + { + name: "don't flush azure chain if it isn't there", + startWithNft: true, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: "Chain AZURE-NPM-INGRESS (1 references)\n", + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM-INGRESS"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}}, + }, + expectedErr: false, + }, + { + name: "cleanup legacy errors ok if deleted jump (non-deprecated)", + startWithNft: true, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM-INGRESS"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}, + ExitCode: 1, + }, + }, + expectedErr: false, + }, + { + name: "cleanup legacy errors ok if deleted jump (deprecated)", + startWithNft: true, + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM-INGRESS"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}, + ExitCode: 2, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}, + ExitCode: 2, + }, + }, + expectedErr: false, + }, + { + name: "cleanup legacy other flush errors ok", + startWithNft: true, + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true, + ExitCode: 1, + }, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM"}}, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-F", "AZURE-NPM-INGRESS"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}}, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM-INGRESS"}, + ExitCode: 1, + }, + }, + expectedErr: false, + }, + { + name: "cleanup legacy error: list error", + startWithNft: true, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true, HasStartError: true, + ExitCode: 1, + }, + {Cmd: []string{"grep", "Chain AZURE-NPM"}}, + }, + expectedErr: true, + }, + { + name: "cleanup nft", + startWithNft: false, + calls: []testutils.TestCmd{ + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}}, // deprecated rule existed + { + Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + ExitCode: 1, + }, + }, + expectedErr: false, + }, + { + name: "cleanup nft error", + startWithNft: false, + calls: []testutils.TestCmd{ + { + Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: grepOutputTwoAzureChains, + }, + { + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + { + Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, + ExitCode: 1, + }, + {Cmd: []string{"iptables-nft", "-w", "60", "-F", "AZURE-NPM"}, ExitCode: 1}, + }, + expectedErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + ioshim := common.NewMockIOShim(tt.calls) + defer ioshim.VerifyCalls(t, tt.calls) + pMgr := NewPolicyManager(ioshim, ipsetConfig) + + if tt.startWithNft { + util.SetIptablesToNft() + } else { + util.SetIptablesToLegacy() + // set back to default + defer util.SetIptablesToNft() + } + + err := pMgr.cleanupOtherIptables() + if tt.expectedErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + if tt.startWithNft { + require.Equal(t, util.IptablesNft, util.Iptables) + } else { + require.Equal(t, util.IptablesLegacy, util.Iptables) + } + }) + } +} + +func TestCreatorForCleanup(t *testing.T) { + chains := []string{ + "AZURE-NPM", + "AZURE-NPM-INGRESS", + "AZURE-NPM-EGRESS", + "AZURE-NPM-ACCEPT", + } + + expectedLines := []string{ + "*filter", + "-F AZURE-NPM", + "-F AZURE-NPM-INGRESS", + "-F AZURE-NPM-EGRESS", + "-F AZURE-NPM-ACCEPT", + "COMMIT", + "", + } + + ioshim := common.NewMockIOShim(nil) + defer ioshim.VerifyCalls(t, nil) + pMgr := NewPolicyManager(ioshim, ipsetConfig) + creator := pMgr.creatorForCleanup(chains) + actualLines := strings.Split(creator.ToString(), "\n") + sortedActualLines := sortFlushes(actualLines) + sortedExpectedLines := sortFlushes(expectedLines) + dptestutils.AssertEqualLines(t, sortedExpectedLines, sortedActualLines) + assertStaleChainsContain(t, pMgr.staleChains, []string{}...) +} diff --git a/npm/pkg/dataplane/policies/policy.go b/npm/pkg/dataplane/policies/policy.go index b411a7407b..646a03633a 100644 --- a/npm/pkg/dataplane/policies/policy.go +++ b/npm/pkg/dataplane/policies/policy.go @@ -42,6 +42,15 @@ func NewNPMNetworkPolicy(netPolName, netPolNamespace string) *NPMNetworkPolicy { } } +func (netPol *NPMNetworkPolicy) HasCIDRRules() bool { + for _, set := range netPol.RuleIPSets { + if set.Metadata.Type == ipsets.CIDRBlocks { + return true + } + } + return false +} + func (netPol *NPMNetworkPolicy) AllPodSelectorIPSets() []*ipsets.TranslatedIPSet { return append(netPol.PodSelectorIPSets, netPol.ChildPodSelectorIPSets...) } diff --git a/npm/pkg/dataplane/policies/policymanager_test.go b/npm/pkg/dataplane/policies/policymanager_test.go index 793dcda3f0..22add8eea0 100644 --- a/npm/pkg/dataplane/policies/policymanager_test.go +++ b/npm/pkg/dataplane/policies/policymanager_test.go @@ -101,15 +101,23 @@ func (p promVals) testPrometheusMetrics(t *testing.T) { // see chain-management_linux_test.go for testing when an error occurs func TestBootup(t *testing.T) { metrics.ReinitializeAll() - calls := GetBootupTestCalls(false) + calls := GetBootupTestCalls() ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) + // verify that the iptables is explicitly set to nft during bootup + util.SetIptablesToLegacy() + // set back to default + defer util.SetIptablesToNft() + metrics.IncNumACLRules() metrics.IncNumACLRules() require.NoError(t, pMgr.Bootup(epIDs)) + if !util.IsWindowsDP() { + require.Equal(t, util.IptablesNft, util.Iptables) + } expectedNumACLs := 11 if util.IsWindowsDP() { @@ -126,7 +134,7 @@ func TestAddPolicy(t *testing.T) { ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) - + util.SetIptablesToNft() require.NoError(t, pMgr.AddPolicies([]*NPMNetworkPolicy{testNetPol}, epList)) _, ok := pMgr.GetPolicy(testNetPol.PolicyKey) require.True(t, ok) @@ -144,6 +152,7 @@ func TestAddEmptyPolicy(t *testing.T) { testNetPol := testNetworkPolicy() ioshim := common.NewMockIOShim(nil) pMgr := NewPolicyManager(ioshim, ipsetConfig) + util.SetIptablesToNft() require.NoError(t, pMgr.AddPolicies([]*NPMNetworkPolicy{ { Namespace: "x", @@ -173,7 +182,7 @@ func TestGetPolicy(t *testing.T) { ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) - + util.SetIptablesToNft() require.NoError(t, pMgr.AddPolicies([]*NPMNetworkPolicy{netpol}, epList)) require.True(t, pMgr.PolicyExists("x/test-netpol")) @@ -190,6 +199,7 @@ func TestRemovePolicy(t *testing.T) { ioshim := common.NewMockIOShim(calls) defer ioshim.VerifyCalls(t, calls) pMgr := NewPolicyManager(ioshim, ipsetConfig) + util.SetIptablesToNft() require.NoError(t, pMgr.AddPolicies([]*NPMNetworkPolicy{testNetPol}, epList)) require.NoError(t, pMgr.RemovePolicy(testNetPol.PolicyKey)) _, ok := pMgr.GetPolicy(testNetPol.PolicyKey) @@ -202,6 +212,7 @@ func TestRemoveNonexistentPolicy(t *testing.T) { metrics.ReinitializeAll() ioshim := common.NewMockIOShim(nil) pMgr := NewPolicyManager(ioshim, ipsetConfig) + util.SetIptablesToNft() require.NoError(t, pMgr.RemovePolicy("wrong-policy-key")) promVals{0, 0}.testPrometheusMetrics(t) } diff --git a/npm/pkg/dataplane/policies/testutils_linux.go b/npm/pkg/dataplane/policies/testutils_linux.go index caf8f51ddc..01d0a22620 100644 --- a/npm/pkg/dataplane/policies/testutils_linux.go +++ b/npm/pkg/dataplane/policies/testutils_linux.go @@ -8,11 +8,10 @@ import ( ) var ( - fakeIPTablesRestoreCommand = testutils.TestCmd{Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}} - fakeIPTablesRestoreFailureCommand = testutils.TestCmd{Cmd: []string{"iptables-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1} + fakeIPTablesRestoreCommand = testutils.TestCmd{Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}} + fakeIPTablesRestoreFailureCommand = testutils.TestCmd{Cmd: []string{"iptables-nft-restore", "-w", "60", "-T", "filter", "--noflush"}, ExitCode: 1} - listLineNumbersCommandStrings = []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L", "FORWARD", "--line-numbers"} - listAllCommandStrings = []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"} + listLineNumbersCommandStrings = []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L", "FORWARD", "--line-numbers"} ) func GetAddPolicyTestCalls(_ *NPMNetworkPolicy) []testutils.TestCmd { @@ -27,12 +26,12 @@ func GetRemovePolicyTestCalls(policy *NPMNetworkPolicy) []testutils.TestCmd { calls := []testutils.TestCmd{} hasIngress, hasEgress := policy.hasIngressAndEgress() if hasIngress { - deleteIngressJumpSpecs := []string{"iptables", "-w", "60", "-D", util.IptablesAzureIngressChain} + deleteIngressJumpSpecs := []string{"iptables-nft", "-w", "60", "-D", util.IptablesAzureIngressChain} deleteIngressJumpSpecs = append(deleteIngressJumpSpecs, ingressJumpSpecs(policy)...) calls = append(calls, testutils.TestCmd{Cmd: deleteIngressJumpSpecs}) } if hasEgress { - deleteEgressJumpSpecs := []string{"iptables", "-w", "60", "-D", util.IptablesAzureEgressChain} + deleteEgressJumpSpecs := []string{"iptables-nft", "-w", "60", "-D", util.IptablesAzureEgressChain} deleteEgressJumpSpecs = append(deleteEgressJumpSpecs, egressJumpSpecs(policy)...) calls = append(calls, testutils.TestCmd{Cmd: deleteEgressJumpSpecs}) } @@ -50,24 +49,28 @@ func GetRemovePolicyFailureTestCalls(policy *NPMNetworkPolicy) []testutils.TestC return append(calls, fakeIPTablesRestoreFailureCommand) } -func GetBootupTestCalls(addDetectCalls bool) []testutils.TestCmd { - detectIptable := []testutils.TestCmd{ - {Cmd: []string{"iptables-nft-save", "-t", "mangle"}, Stdout: ""}, //nolint // AZURE-NPM chain didn't exist - {Cmd: []string{"iptables-save", "-t", "mangle"}, Stdout: `# Generated by iptables-save v1.8.7 on Wed May 3 01:35:24 2023 - *mangle - :PREROUTING ACCEPT [0:0] - :INPUT ACCEPT [0:0] - :FORWARD ACCEPT [0:0] - :OUTPUT ACCEPT [0:0] - :POSTROUTING ACCEPT [0:0] - :KUBE-IPTABLES-HINT - [0:0] - :KUBE-KUBELET-CANARY - [0:0] - :KUBE-PROXY-CANARY - [0:0] - COMMIT`}, //nolint // AZURE-NPM chain didn't exist - } +func GetBootupTestCalls() []testutils.TestCmd { bootUp := []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist - {Cmd: listAllCommandStrings, PipedToCommand: true}, + // detect iptables version to be nft + { + Cmd: []string{"iptables-nft", "-w", "60", "-L", "KUBE-IPTABLES-HINT", "-t", "mangle", "-n"}, + ExitCode: 0, + }, + // legacy clean up + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-legacy", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-legacy", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + { + // 1 AZURE-NPM chain + Cmd: []string{"grep", "Chain AZURE-NPM"}, + Stdout: `Chain AZURE-NPM (0 references) +`, + }, + {Cmd: []string{"iptables-legacy-restore", "-w", "60", "-T", "filter", "--noflush"}}, + {Cmd: []string{"iptables-legacy", "-w", "60", "-X", "AZURE-NPM"}}, + // nft bootup + {Cmd: []string{"iptables-nft", "-w", "60", "-D", "FORWARD", "-j", "AZURE-NPM"}, ExitCode: 2}, //nolint // AZURE-NPM chain didn't exist + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, ExitCode: 1, @@ -75,17 +78,13 @@ func GetBootupTestCalls(addDetectCalls bool) []testutils.TestCmd { fakeIPTablesRestoreCommand, {Cmd: listLineNumbersCommandStrings, PipedToCommand: true}, {Cmd: []string{"grep", "AZURE-NPM"}, ExitCode: 1}, - {Cmd: []string{"iptables", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, - } - - if addDetectCalls { - return append(detectIptable, bootUp...) + {Cmd: []string{"iptables-nft", "-w", "60", "-I", "FORWARD", "-j", "AZURE-NPM", "-m", "conntrack", "--ctstate", "NEW"}}, } return bootUp } func getFakeDeleteJumpCommand(chainName, jumpRule string) testutils.TestCmd { - args := []string{"iptables", "-w", "60", "-D", chainName} + args := []string{"iptables-nft", "-w", "60", "-D", chainName} args = append(args, strings.Split(jumpRule, " ")...) return testutils.TestCmd{Cmd: args} } diff --git a/npm/pkg/dataplane/policies/testutils_windows.go b/npm/pkg/dataplane/policies/testutils_windows.go index 914ea76986..dfc25dab94 100644 --- a/npm/pkg/dataplane/policies/testutils_windows.go +++ b/npm/pkg/dataplane/policies/testutils_windows.go @@ -10,6 +10,6 @@ func GetRemovePolicyTestCalls(_ *NPMNetworkPolicy) []testutils.TestCmd { return []testutils.TestCmd{} } -func GetBootupTestCalls(_ bool) []testutils.TestCmd { +func GetBootupTestCalls() []testutils.TestCmd { return []testutils.TestCmd{} } diff --git a/npm/pkg/dataplane/types.go b/npm/pkg/dataplane/types.go index 2b06c5c079..dd17c714dc 100644 --- a/npm/pkg/dataplane/types.go +++ b/npm/pkg/dataplane/types.go @@ -144,7 +144,8 @@ func (c *updatePodCache) enqueue(m *PodMetadata) *updateNPMPod { } if !ok { - klog.Infof("[DataPlane] pod key %s not found in updatePodCache. creating a new obj", m.PodKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] pod key %s not found in updatePodCache. creating a new obj", m.PodKey) pod = newUpdateNPMPod(m) c.cache[m.PodKey] = pod @@ -157,7 +158,8 @@ func (c *updatePodCache) enqueue(m *PodMetadata) *updateNPMPod { // dequeue returns the first pod in the queue and removes it from the queue. func (c *updatePodCache) dequeue() *updateNPMPod { if c.isEmpty() { - klog.Infof("[DataPlane] updatePodCache is empty. returning nil for dequeue()") + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] updatePodCache is empty. returning nil for dequeue()") return nil } @@ -177,7 +179,8 @@ func (c *updatePodCache) dequeue() *updateNPMPod { func (c *updatePodCache) requeue(pod *updateNPMPod) { if _, ok := c.cache[pod.PodKey]; ok { // should not happen - klog.Infof("[DataPlane] pod key %s already exists in updatePodCache. skipping requeue", pod.PodKey) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("[DataPlane] pod key %s already exists in updatePodCache. skipping requeue", pod.PodKey) return } @@ -208,11 +211,12 @@ func (q *netPolQueue) len() int { // enqueue adds a NetPol to the queue. If the NetPol already exists in the queue, the NetPol object is updated. func (q *netPolQueue) enqueue(policy *policies.NPMNetworkPolicy) { - if _, ok := q.toAdd[policy.PolicyKey]; ok { - klog.Infof("[DataPlane] policy %s exists in netPolQueue. updating", policy.PolicyKey) - } else { - klog.Infof("[DataPlane] enqueuing policy %s in netPolQueue", policy.PolicyKey) - } + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // if _, ok := q.toAdd[policy.PolicyKey]; ok { + // klog.Infof("[DataPlane] policy %s exists in netPolQueue. updating", policy.PolicyKey) + // } else { + // klog.Infof("[DataPlane] enqueuing policy %s in netPolQueue", policy.PolicyKey) + // } q.toAdd[policy.PolicyKey] = policy } diff --git a/npm/pkg/dataplane/types_windows_test.go b/npm/pkg/dataplane/types_windows_test.go index 7e52d5e1be..ea51bb24eb 100644 --- a/npm/pkg/dataplane/types_windows_test.go +++ b/npm/pkg/dataplane/types_windows_test.go @@ -425,7 +425,7 @@ func UpdatePolicy(policy *networkingv1.NetworkPolicy) *Action { // Do models policy updates in the NetworkPolicyController func (p *PolicyUpdateAction) Do(dp *DataPlane) error { - npmNetPol, err := translation.TranslatePolicy(p.Policy) + npmNetPol, err := translation.TranslatePolicy(p.Policy, false) if err != nil { return errors.Wrapf(err, "[PolicyUpdateAction] failed to translate policy with key %s/%s", p.Policy.Namespace, p.Policy.Name) } diff --git a/npm/pkg/models/types.go b/npm/pkg/models/types.go index 28d3ffb80e..e42c7b51b5 100644 --- a/npm/pkg/models/types.go +++ b/npm/pkg/models/types.go @@ -38,10 +38,11 @@ type K8SControllersV2 struct { // Informers are the informers for the k8s controllers type Informers struct { - InformerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error - PodInformer coreinformers.PodInformer //nolint:structcheck // false lint error - NsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error - NpInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error + InformerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error + PodInformerFactory informers.SharedInformerFactory + PodInformer coreinformers.PodInformer //nolint:structcheck // false lint error + NsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error + NpInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error } // AzureConfig captures the Azure specific configurations and fields diff --git a/npm/profiles/v2-background.yaml b/npm/profiles/v2-background.yaml index 0594c3375a..6932b37a5b 100644 --- a/npm/profiles/v2-background.yaml +++ b/npm/profiles/v2-background.yaml @@ -6,18 +6,18 @@ metadata: data: azure-npm.json: | { - "ResyncPeriodInMinutes": 15, - "ListeningPort": 10091, - "ListeningAddress": "0.0.0.0", - "NetPolInvervalInMilliseconds": 500, - "MaxPendingNetPols": 100, - "Toggles": { - "EnablePrometheusMetrics": true, - "EnablePprof": true, - "EnableHTTPDebugAPI": true, - "EnableV2NPM": true, - "PlaceAzureChainFirst": true, - "ApplyIPSetsOnNeed": false, - "NetPolInBackground": true + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "NetPolInvervalInMilliseconds": 500, + "MaxPendingNetPols": 100, + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": true, + "PlaceAzureChainFirst": false, + "ApplyIPSetsOnNeed": false, + "NetPolInBackground": true } } diff --git a/npm/profiles/v2-place-first.yaml b/npm/profiles/v2-place-first.yaml index d487a25f69..aabab52304 100644 --- a/npm/profiles/v2-place-first.yaml +++ b/npm/profiles/v2-place-first.yaml @@ -6,16 +6,18 @@ metadata: data: azure-npm.json: | { - "ResyncPeriodInMinutes": 15, - "ListeningPort": 10091, - "ListeningAddress": "0.0.0.0", + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "NetPolInvervalInMilliseconds": 500, + "MaxPendingNetPols": 100, "Toggles": { - "EnablePrometheusMetrics": true, - "EnablePprof": false, - "EnableHTTPDebugAPI": true, - "EnableV2NPM": true, - "PlaceAzureChainFirst": true, - "ApplyIPSetsOnNeed": true, - "NetPolInBackground": false - } + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": true, + "PlaceAzureChainFirst": true, + "ApplyIPSetsOnNeed": false, + "NetPolInBackground": true + } } diff --git a/npm/util/const.go b/npm/util/const.go index 38885935be..e323d618b0 100644 --- a/npm/util/const.go +++ b/npm/util/const.go @@ -2,13 +2,7 @@ // MIT License package util -import ( - "bytes" - "fmt" - "strings" - - "github.com/Azure/azure-container-networking/common" -) +import "k8s.io/klog" // kubernetes related constants. const ( @@ -28,10 +22,10 @@ const ( ) var ( - Iptables = IptablesLegacy + Iptables = IptablesNft Ip6tables = Ip6tablesLegacy //nolint (avoid warning to capitalize this p) - IptablesSave = IptablesSaveLegacy - IptablesRestore = IptablesRestoreLegacy + IptablesSave = IptablesSaveNft + IptablesRestore = IptablesRestoreNft ) // iptables related constants. @@ -43,9 +37,9 @@ const ( Ip6tablesLegacy string = "ip6tables" //nolint (avoid warning to capitalize this p) IptablesSaveNft string = "iptables-nft-save" IptablesRestoreNft string = "iptables-nft-restore" - IptablesLegacy string = "iptables" - IptablesSaveLegacy string = "iptables-save" - IptablesRestoreLegacy string = "iptables-restore" + IptablesLegacy string = "iptables-legacy" + IptablesSaveLegacy string = "iptables-legacy-save" + IptablesRestoreLegacy string = "iptables-legacy-restore" IptablesRestoreNoFlushFlag string = "--noflush" IptablesRestoreTableFlag string = "-T" IptablesRestoreCommit string = "COMMIT" @@ -91,6 +85,7 @@ const ( IptablesEstablishedState string = "ESTABLISHED" IptablesNewState string = "NEW" IptablesFilterTable string = "filter" + IptablesMangleTable string = "mangle" IptablesCommentModuleFlag string = "comment" IptablesCommentFlag string = "--comment" IptablesAddCommentFlag @@ -243,8 +238,6 @@ const ( AiInitializeRetryCount int = 3 AiInitializeRetryInMin int = 1 - DebugMode bool = true - ErrorValue float64 = 1 ) @@ -272,70 +265,16 @@ const ( FanOutServerID // for v2 ) -func DetectIptablesVersion(ioShim *common.IOShim) { - cmd := ioShim.Exec.Command(IptablesSaveNft, "-t", "mangle") - - output, err := cmd.CombinedOutput() - if err != nil { - fmt.Printf("Error running iptables-nft-save: %s", err) - return - } - - if strings.Contains(string(output), "KUBE-IPTABLES-HINT") || strings.Contains(string(output), "KUBE-KUBELET-CANARY") { - Iptables = IptablesNft - IptablesSave = IptablesSaveNft - IptablesRestore = IptablesRestoreNft - } else { - lCmd := ioShim.Exec.Command(IptablesSaveLegacy, "-t", "mangle") - - loutput, err := lCmd.CombinedOutput() - if err != nil { - fmt.Printf("Error running iptables-legacy-save: %s", err) - return - } - - if strings.Contains(string(loutput), "KUBE-IPTABLES-HINT") || strings.Contains(string(loutput), "KUBE-KUBELET-CANARY") { - Iptables = IptablesLegacy - IptablesSave = IptablesSaveLegacy - IptablesRestore = IptablesRestoreLegacy - } else { - lsavecmd := ioShim.Exec.Command(IptablesSaveNft) - lsaveoutput, err := lsavecmd.CombinedOutput() - if err != nil { - fmt.Printf("Error running iptables-nft-save: %s", err) - return - } - - lcount := countLines(lsaveoutput) - - savecmd := ioShim.Exec.Command(IptablesSaveLegacy) - saveoutput, err := savecmd.CombinedOutput() - if err != nil { - fmt.Printf("Error running iptables-legacy-save: %s", err) - return - } - - count := countLines(saveoutput) - - if lcount > count { - Iptables = IptablesLegacy - IptablesSave = IptablesSaveLegacy - IptablesRestore = IptablesRestoreLegacy - } else { - Iptables = IptablesNft - IptablesSave = IptablesSaveNft - IptablesRestore = IptablesRestoreNft - } - } - } +func SetIptablesToNft() { + klog.Info("setting iptables to nft") + Iptables = IptablesNft + IptablesSave = IptablesSaveNft + IptablesRestore = IptablesRestoreNft } -func countLines(output []byte) int { - count := 0 - for _, x := range bytes.Split(output, []byte("\n")) { - if len(x) >= 1 && x[0] == '-' { - count++ - } - } - return count +func SetIptablesToLegacy() { + klog.Info("setting iptables to legacy") + Iptables = IptablesLegacy + IptablesSave = IptablesSaveLegacy + IptablesRestore = IptablesRestoreLegacy } diff --git a/npm/util/ioutil/current-azure-chains_test.go b/npm/util/ioutil/current-azure-chains_test.go index f2640d7884..107311394b 100644 --- a/npm/util/ioutil/current-azure-chains_test.go +++ b/npm/util/ioutil/current-azure-chains_test.go @@ -17,7 +17,7 @@ Chain AZURE-NPM-INGRESS-123456 (1 references) Chain AZURE-NPM-INGRESS-ALLOW-MARK (1 references) ` -var listAllCommandStrings = []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"} +var listAllCommandStrings = []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"} func TestAllCurrentAzureChains(t *testing.T) { tests := []struct { @@ -41,7 +41,7 @@ func TestAllCurrentAzureChains(t *testing.T) { { name: "ignore missing newline at end of grep result", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: `Chain AZURE-NPM (1 references) @@ -54,7 +54,7 @@ Chain AZURE-NPM-INGRESS (1 references)`, { name: "ignore unexpected grep line (chain name too short)", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: `Chain AZURE-NPM (1 references) @@ -69,7 +69,7 @@ Chain AZURE-NPM-INGRESS (1 references) { name: "ignore unexpected grep line (no space)", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: `Chain AZURE-NPM (1 references) @@ -83,7 +83,7 @@ Chain AZURE-NPM-INGRESS (1 references) { name: "success with no chains", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, {Cmd: []string{"grep", "Chain AZURE-NPM"}, ExitCode: 1}, }, expectedChains: nil, @@ -92,7 +92,7 @@ Chain AZURE-NPM-INGRESS (1 references) { name: "grep failure", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true, HasStartError: true, ExitCode: 1}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true, HasStartError: true, ExitCode: 1}, {Cmd: []string{"grep", "Chain AZURE-NPM"}}, }, expectedChains: nil, @@ -101,7 +101,7 @@ Chain AZURE-NPM-INGRESS (1 references) { name: "invalid grep result", calls: []testutils.TestCmd{ - {Cmd: []string{"iptables", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, + {Cmd: []string{"iptables-nft", "-w", "60", "-t", "filter", "-n", "-L"}, PipedToCommand: true}, { Cmd: []string{"grep", "Chain AZURE-NPM"}, Stdout: "", diff --git a/npm/util/ioutil/restore.go b/npm/util/ioutil/restore.go index 9255b01ee9..21933ca29b 100644 --- a/npm/util/ioutil/restore.go +++ b/npm/util/ioutil/restore.go @@ -185,7 +185,8 @@ func (creator *FileCreator) runCommandOnceWithFile(fileString, cmd string, args return false, nil } - klog.Infof("running this restore command: [%s]", commandString) + // TODO: Refactor non-error/warning klogs with Zap and set the following logs to "debug" level + // klog.Infof("running this restore command: [%s]", commandString) if creator.verbose { creator.logLines(commandString) diff --git a/npm/windows.Dockerfile b/npm/windows.Dockerfile index 06d4ff149e..ffee28af12 100644 --- a/npm/windows.Dockerfile +++ b/npm/windows.Dockerfile @@ -1,5 +1,5 @@ ARG OS_VERSION -FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.23-azurelinux3.0 AS builder ARG VERSION ARG NPM_AI_PATH ARG NPM_AI_ID @@ -8,9 +8,7 @@ COPY . . RUN GOOS=windows CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm.exe -ldflags "-X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go # intermediate for win-ltsc2022 -FROM mcr.microsoft.com/windows/servercore@sha256:45952938708fbde6ec0b5b94de68bcdec3f8c838be018536b1e9e5bd95e6b943 as ltsc2022 - -FROM ${OS_VERSION} as windows +FROM mcr.microsoft.com/windows/servercore@sha256:45952938708fbde6ec0b5b94de68bcdec3f8c838be018536b1e9e5bd95e6b943 as windows COPY --from=builder /usr/local/src/npm/examples/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml COPY --from=builder /usr/local/src/npm/examples/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 COPY --from=builder /usr/local/src/npm/examples/windows/setkubeconfigpath-capz.ps1 setkubeconfigpath-capz.ps1 diff --git a/platform/os_linux.go b/platform/os_linux.go index 9e659b06a8..129a7f1962 100644 --- a/platform/os_linux.go +++ b/platform/os_linux.go @@ -179,7 +179,7 @@ func (p *execClient) KillProcessByName(processName string) error { // SetSdnRemoteArpMacAddress sets the regkey for SDNRemoteArpMacAddress needed for multitenancy // This operation is specific to windows OS -func SetSdnRemoteArpMacAddress(_ ExecClient) error { +func SetSdnRemoteArpMacAddress(context.Context) error { return nil } diff --git a/platform/os_windows.go b/platform/os_windows.go index 63900c6e5d..6b99f4634d 100644 --- a/platform/os_windows.go +++ b/platform/os_windows.go @@ -17,9 +17,13 @@ import ( "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/platform/windows/adapter" "github.com/Azure/azure-container-networking/platform/windows/adapter/mellanox" + "github.com/avast/retry-go/v4" "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" ) const ( @@ -61,24 +65,10 @@ const ( // for vlan tagged arp requests SDNRemoteArpMacAddress = "12-34-56-78-9a-bc" - // Command to get SDNRemoteArpMacAddress registry key - GetSdnRemoteArpMacAddressCommand = "(Get-ItemProperty " + - "-Path HKLM:\\SYSTEM\\CurrentControlSet\\Services\\hns\\State -Name SDNRemoteArpMacAddress).SDNRemoteArpMacAddress" - - // Command to set SDNRemoteArpMacAddress registry key - SetSdnRemoteArpMacAddressCommand = "Set-ItemProperty " + - "-Path HKLM:\\SYSTEM\\CurrentControlSet\\Services\\hns\\State -Name SDNRemoteArpMacAddress -Value \"12-34-56-78-9a-bc\"" - - // Command to check if system has hns state path or not - CheckIfHNSStatePathExistsCommand = "Test-Path " + - "-Path HKLM:\\SYSTEM\\CurrentControlSet\\Services\\hns\\State" - // Command to fetch netadapter and pnp id + // TODO: can we replace this (and things in endpoint_windows) with other utils from "golang.org/x/sys/windows"? GetMacAddressVFPPnpIDMapping = "Get-NetAdapter | Select-Object MacAddress, PnpDeviceID| Format-Table -HideTableHeaders" - // Command to restart HNS service - RestartHnsServiceCommand = "Restart-Service -Name hns" - // Interval between successive checks for mellanox adapter's PriorityVLANTag value defaultMellanoxMonitorInterval = 30 * time.Second @@ -257,41 +247,163 @@ func (p *execClient) ExecutePowershellCommandWithContext(ctx context.Context, co } // SetSdnRemoteArpMacAddress sets the regkey for SDNRemoteArpMacAddress needed for multitenancy if hns is enabled -func SetSdnRemoteArpMacAddress(execClient ExecClient) error { - exists, err := execClient.ExecutePowershellCommand(CheckIfHNSStatePathExistsCommand) +func SetSdnRemoteArpMacAddress(ctx context.Context) error { + changed, err := setSDNRemoteARPRegKey() if err != nil { - errMsg := fmt.Sprintf("Failed to check the existent of hns state path due to error %s", err.Error()) - log.Printf(errMsg) - return errors.Errorf(errMsg) + return err } - if strings.EqualFold(exists, "false") { - log.Printf("hns state path does not exist, skip setting SdnRemoteArpMacAddress") + if !changed { + log.Printf("SDNRemoteArpMacAddress regKey already set, skipping HNS restart") return nil } - if sdnRemoteArpMacAddressSet == false { - result, err := execClient.ExecutePowershellCommand(GetSdnRemoteArpMacAddressCommand) - if err != nil { - return err + log.Printf("SDNRemoteArpMacAddress regKey set successfully") + if err := restartHNS(ctx); err != nil { + return err + } + log.Printf("HNS service restarted successfully") + return nil +} + +// setSDNRemoteARPRegKey sets the SDNRemoteArpMacAddress registry key +// returns true if the key was changed, false if unchanged +func setSDNRemoteARPRegKey() (bool, error) { + log.Printf("Setting SDNRemoteArpMacAddress regKey") + // open the registry key + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Services\hns\State`, registry.READ|registry.SET_VALUE) + if err != nil { + if errors.Is(err, registry.ErrNotExist) { + return false, nil } + return false, errors.Wrap(err, "could not open registry key") + } + defer k.Close() + // check the key value + if v, _, _ := k.GetStringValue("SDNRemoteArpMacAddress"); v == SDNRemoteArpMacAddress { + log.Printf("SDNRemoteArpMacAddress regKey already set") + return false, nil // already set + } + if err = k.SetStringValue("SDNRemoteArpMacAddress", SDNRemoteArpMacAddress); err != nil { + return false, errors.Wrap(err, "could not set registry key") + } + return true, nil +} - // Set the reg key if not already set or has incorrect value - if result != SDNRemoteArpMacAddress { - if _, err = execClient.ExecutePowershellCommand(SetSdnRemoteArpMacAddressCommand); err != nil { - log.Printf("Failed to set SDNRemoteArpMacAddress due to error %s", err.Error()) - return err - } +func restartHNS(ctx context.Context) error { + log.Printf("Restarting HNS service") + // connect to the service manager + m, err := mgr.Connect() + if err != nil { + return errors.Wrap(err, "could not connect to service manager") + } + defer m.Disconnect() //nolint:errcheck // ignore error + // open the HNS service + service, err := m.OpenService("hns") + if err != nil { + return errors.Wrap(err, "could not access service") + } + defer service.Close() + // Stop the service + log.Printf("Stopping HNS service") + _ = retry.Do( + tryStopServiceFn(ctx, service), + retry.UntilSucceeded(), + retry.Context(ctx), + retry.DelayType(retry.BackOffDelay), + ) + // Start the service again + log.Printf("Starting HNS service") + _ = retry.Do( + tryStartServiceFn(ctx, service), + retry.UntilSucceeded(), + retry.Context(ctx), + retry.DelayType(retry.BackOffDelay), + ) + log.Printf("HNS service started") + return nil +} + +type managedService interface { + Control(control svc.Cmd) (svc.Status, error) + Query() (svc.Status, error) + Start(args ...string) error +} - log.Printf("[Azure CNS] SDNRemoteArpMacAddress regKey set successfully. Restarting hns service.") - if _, err := execClient.ExecutePowershellCommand(RestartHnsServiceCommand); err != nil { - log.Printf("Failed to Restart HNS Service due to error %s", err.Error()) - return err +func tryStartServiceFn(ctx context.Context, service managedService) func() error { + shouldStart := func(state svc.State) bool { + return !(state == svc.Running || state == svc.StartPending) + } + return func() error { + status, err := service.Query() + if err != nil { + return errors.Wrap(err, "could not query service status") + } + if shouldStart(status.State) { + err = service.Start() + if err != nil { + return errors.Wrap(err, "could not start service") } } - - sdnRemoteArpMacAddressSet = true + // Wait for the service to start + deadline, cancel := context.WithTimeout(ctx, 90*time.Second) + defer cancel() + ticker := time.NewTicker(500 * time.Millisecond) //nolint:gomnd // 500ms + defer ticker.Stop() + for { + status, err := service.Query() + if err != nil { + return errors.Wrap(err, "could not query service status") + } + if status.State == svc.Running { + log.Printf("service started") + break + } + select { + case <-deadline.Done(): + return deadline.Err() //nolint:wrapcheck // error has sufficient context + case <-ticker.C: + } + } + return nil } +} - return nil +func tryStopServiceFn(ctx context.Context, service managedService) func() error { + shouldStop := func(state svc.State) bool { + return !(state == svc.Stopped || state == svc.StopPending) + } + return func() error { + status, err := service.Query() + if err != nil { + return errors.Wrap(err, "could not query service status") + } + if shouldStop(status.State) { + _, err = service.Control(svc.Stop) + if err != nil { + return errors.Wrap(err, "could not stop service") + } + } + // Wait for the service to stop + deadline, cancel := context.WithTimeout(ctx, 90*time.Second) + defer cancel() + ticker := time.NewTicker(500 * time.Millisecond) //nolint:gomnd // 500ms + defer ticker.Stop() + for { + status, err := service.Query() + if err != nil { + return errors.Wrap(err, "could not query service status") + } + if status.State == svc.Stopped { + log.Printf("service stopped") + break + } + select { + case <-deadline.Done(): + return deadline.Err() //nolint:wrapcheck // error has sufficient context + case <-ticker.C: + } + } + return nil + } } func HasMellanoxAdapter() bool { diff --git a/platform/os_windows_test.go b/platform/os_windows_test.go index 5cb5dacc12..805c0a83aa 100644 --- a/platform/os_windows_test.go +++ b/platform/os_windows_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "os/exec" - "strings" "testing" "time" @@ -12,6 +11,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/svc" ) var errTestFailure = errors.New("test failure") @@ -115,41 +115,6 @@ func TestExecuteCommandError(t *testing.T) { require.ErrorIs(t, err, exec.ErrNotFound) } -func TestSetSdnRemoteArpMacAddress_hnsNotEnabled(t *testing.T) { - mockExecClient := NewMockExecClient(false) - // testing skip setting SdnRemoteArpMacAddress when hns not enabled - mockExecClient.SetPowershellCommandResponder(func(_ string) (string, error) { - return "False", nil - }) - err := SetSdnRemoteArpMacAddress(mockExecClient) - assert.NoError(t, err) - assert.Equal(t, false, sdnRemoteArpMacAddressSet) - - // testing the scenario when there is an error in checking if hns is enabled or not - mockExecClient.SetPowershellCommandResponder(func(_ string) (string, error) { - return "", errTestFailure - }) - err = SetSdnRemoteArpMacAddress(mockExecClient) - assert.ErrorAs(t, err, &errTestFailure) - assert.Equal(t, false, sdnRemoteArpMacAddressSet) -} - -func TestSetSdnRemoteArpMacAddress_hnsEnabled(t *testing.T) { - mockExecClient := NewMockExecClient(false) - // happy path - mockExecClient.SetPowershellCommandResponder(func(cmd string) (string, error) { - if strings.Contains(cmd, "Test-Path") { - return "True", nil - } - return "", nil - }) - err := SetSdnRemoteArpMacAddress(mockExecClient) - assert.NoError(t, err) - assert.Equal(t, true, sdnRemoteArpMacAddressSet) - // reset sdnRemoteArpMacAddressSet - sdnRemoteArpMacAddressSet = false -} - func TestFetchPnpIDMapping(t *testing.T) { mockExecClient := NewMockExecClient(false) // happy path @@ -182,3 +147,205 @@ func TestExecuteCommandTimeout(t *testing.T) { _, err := client.ExecuteCommand(context.Background(), "ping", "-t", "localhost") require.Error(t, err) } + +type mockManagedService struct { + queryFuncs []func() (svc.Status, error) + controlFunc func(svc.Cmd) (svc.Status, error) + startFunc func(args ...string) error +} + +func (m *mockManagedService) Query() (svc.Status, error) { + queryFunc := m.queryFuncs[0] + m.queryFuncs = m.queryFuncs[1:] + return queryFunc() +} + +func (m *mockManagedService) Control(cmd svc.Cmd) (svc.Status, error) { + return m.controlFunc(cmd) +} + +func (m *mockManagedService) Start(args ...string) error { + return m.startFunc(args...) +} + +func TestTryStopServiceFn(t *testing.T) { + tests := []struct { + name string + queryFuncs []func() (svc.Status, error) + controlFunc func(svc.Cmd) (svc.Status, error) + expectError bool + }{ + { + name: "Service already stopped", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + }, + controlFunc: nil, + expectError: false, + }, + { + name: "Service running and stops successfully", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + }, + controlFunc: func(svc.Cmd) (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + expectError: false, + }, + { + name: "Service running and stops after multiple attempts", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + }, + controlFunc: func(svc.Cmd) (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + expectError: false, + }, + { + name: "Service running and fails to stop", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + }, + controlFunc: func(svc.Cmd) (svc.Status, error) { + return svc.Status{State: svc.Running}, errors.New("failed to stop service") //nolint:err113 // test error + }, + expectError: true, + }, + { + name: "Service query fails", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{}, errors.New("failed to query service status") //nolint:err113 // test error + }, + }, + controlFunc: nil, + expectError: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := &mockManagedService{ + queryFuncs: tt.queryFuncs, + controlFunc: tt.controlFunc, + } + err := tryStopServiceFn(context.Background(), service)() + if tt.expectError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func TestTryStartServiceFn(t *testing.T) { + tests := []struct { + name string + queryFuncs []func() (svc.Status, error) + startFunc func(...string) error + expectError bool + }{ + { + name: "Service already running", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + }, + startFunc: nil, + expectError: false, + }, + { + name: "Service already starting", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.StartPending}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + }, + startFunc: nil, + expectError: false, + }, + { + name: "Service starts successfully", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + func() (svc.Status, error) { + return svc.Status{State: svc.Running}, nil + }, + }, + startFunc: func(...string) error { + return nil + }, + expectError: false, + }, + { + name: "Service fails to start", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{State: svc.Stopped}, nil + }, + }, + startFunc: func(...string) error { + return errors.New("failed to start service") //nolint:err113 // test error + }, + expectError: true, + }, + { + name: "Service query fails", + queryFuncs: []func() (svc.Status, error){ + func() (svc.Status, error) { + return svc.Status{}, errors.New("failed to query service status") //nolint:err113 // test error + }, + }, + startFunc: nil, + expectError: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := &mockManagedService{ + queryFuncs: tt.queryFuncs, + startFunc: tt.startFunc, + } + err := tryStartServiceFn(context.Background(), service)() + if tt.expectError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} diff --git a/refresh/equaler.go b/refresh/equaler.go new file mode 100644 index 0000000000..96a42f413e --- /dev/null +++ b/refresh/equaler.go @@ -0,0 +1,5 @@ +package refresh + +type equaler[T any] interface { + Equal(T) bool +} diff --git a/refresh/fetcher.go b/refresh/fetcher.go new file mode 100644 index 0000000000..a509e0dc8c --- /dev/null +++ b/refresh/fetcher.go @@ -0,0 +1,114 @@ +package refresh + +import ( + "context" + "time" +) + +const ( + DefaultMinInterval = 4 * time.Second + DefaultMaxInterval = 1024 * time.Second +) + +// Fetcher fetches data at regular intervals. The interval will vary within the range of minInterval and +// maxInterval. When no diff is observed after a fetch, the interval doubles (subject to the maximum interval). +// When a diff is observed, the interval resets to the minimum. The interval can be made unchanging by setting +// minInterval and maxInterval to the same desired value. + +type Fetcher[T equaler[T]] struct { + fetchFunc func(context.Context) (T, error) + cache T + minInterval time.Duration + maxInterval time.Duration + currentInterval time.Duration + ticker TickProvider + consumeFunc func(T) error + logger Logger +} + +// NewFetcher creates a new Fetcher. If minInterval is 0, it will default to 4 seconds. +func NewFetcher[T equaler[T]]( + fetchFunc func(context.Context) (T, error), + minInterval time.Duration, + maxInterval time.Duration, + consumeFunc func(T) error, + logger Logger, +) *Fetcher[T] { + if minInterval == 0 { + minInterval = DefaultMinInterval + } + + if maxInterval == 0 { + maxInterval = DefaultMaxInterval + } + + maxInterval = max(minInterval, maxInterval) + + return &Fetcher[T]{ + fetchFunc: fetchFunc, + minInterval: minInterval, + maxInterval: maxInterval, + currentInterval: minInterval, + consumeFunc: consumeFunc, + logger: logger, + } +} + +func (f *Fetcher[T]) Start(ctx context.Context) { + go func() { + // do an initial fetch + res, err := f.fetchFunc(ctx) + if err != nil { + f.logger.Printf("Error invoking fetch: %v", err) + } + + f.cache = res + if f.consumeFunc != nil { + if err := f.consumeFunc(res); err != nil { + f.logger.Errorf("Error consuming data: %v", err) + } + } + + if f.ticker == nil { + f.ticker = NewTimedTickProvider(f.currentInterval) + } + + defer f.ticker.Stop() + + for { + select { + case <-ctx.Done(): + f.logger.Printf("Fetcher stopped") + return + case <-f.ticker.C(): + result, err := f.fetchFunc(ctx) + if err != nil { + f.logger.Errorf("Error fetching data: %v", err) + } else { + if result.Equal(f.cache) { + f.updateFetchIntervalForNoObservedDiff() + f.logger.Printf("No diff observed in fetch, not invoking the consumer") + } else { + f.cache = result + f.updateFetchIntervalForObservedDiff() + if f.consumeFunc != nil { + if err := f.consumeFunc(result); err != nil { + f.logger.Errorf("Error consuming data: %v", err) + } + } + } + } + + f.ticker.Reset(f.currentInterval) + } + } + }() +} + +func (f *Fetcher[T]) updateFetchIntervalForNoObservedDiff() { + f.currentInterval = min(f.currentInterval*2, f.maxInterval) // nolint:gomnd // doubling logic +} + +func (f *Fetcher[T]) updateFetchIntervalForObservedDiff() { + f.currentInterval = f.minInterval +} diff --git a/refresh/fetcher_test.go b/refresh/fetcher_test.go new file mode 100644 index 0000000000..0e686a358e --- /dev/null +++ b/refresh/fetcher_test.go @@ -0,0 +1,161 @@ +package refresh_test + +import ( + "context" + "fmt" + "net/netip" + "sync" + "testing" + + "github.com/Azure/azure-container-networking/cns/logger" + "github.com/Azure/azure-container-networking/cns/nodesubnet" + "github.com/Azure/azure-container-networking/nmagent" + "github.com/Azure/azure-container-networking/refresh" +) + +// Mock client that simply tracks if refresh has been called +type TestClient struct { + refreshCount int + responses []nmagent.Interfaces + mu sync.Mutex +} + +// FetchRefreshCount atomically fetches the refresh count +func (c *TestClient) FetchRefreshCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.refreshCount +} + +// UpdateRefreshCount atomically updates the refresh count +func (c *TestClient) UpdateRefreshCount() { + c.mu.Lock() + defer c.mu.Unlock() + c.refreshCount++ +} + +// Mock refresh +func (c *TestClient) GetInterfaceIPInfo(_ context.Context) (nmagent.Interfaces, error) { + defer c.UpdateRefreshCount() + + if c.refreshCount >= len(c.responses) { + return c.responses[len(c.responses)-1], nil + } + + return c.responses[c.refreshCount], nil +} + +var _ nodesubnet.InterfaceRetriever = &TestClient{} + +// Mock client that simply consumes fetched IPs +type TestConsumer struct { + consumeCount int + mu sync.Mutex +} + +// FetchConsumeCount atomically fetches the consume count +func (c *TestConsumer) FetchConsumeCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.consumeCount +} + +// UpdateConsumeCount atomically updates the consume count +func (c *TestConsumer) UpdateConsumeCount() { + c.mu.Lock() + defer c.mu.Unlock() + c.consumeCount++ +} + +// Mock IP update +func (c *TestConsumer) ConsumeInterfaces(intfs nmagent.Interfaces) error { + fmt.Printf("Consumed interfaces: %v\n", intfs) + c.UpdateConsumeCount() + return nil +} + +func TestRefresh(t *testing.T) { + clientPtr := &TestClient{ + refreshCount: 0, + responses: []nmagent.Interfaces{ + { + Entries: []nmagent.Interface{ + { + MacAddress: nmagent.MACAddress{0x00, 0x0D, 0x3A, 0xF9, 0xDC, 0xA6}, + IsPrimary: true, + InterfaceSubnets: []nmagent.InterfaceSubnet{ + { + Prefix: "10.240.0.0/16", + IPAddress: []nmagent.NodeIP{ + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 5})), + IsPrimary: true, + }, + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 6})), + IsPrimary: false, + }, + }, + }, + }, + }, + }, + }, + { + Entries: []nmagent.Interface{ + { + MacAddress: nmagent.MACAddress{0x00, 0x0D, 0x3A, 0xF9, 0xDC, 0xA6}, + IsPrimary: true, + InterfaceSubnets: []nmagent.InterfaceSubnet{ + { + Prefix: "10.240.0.0/16", + IPAddress: []nmagent.NodeIP{ + { + Address: nmagent.IPAddress(netip.AddrFrom4([4]byte{10, 240, 0, 5})), + IsPrimary: true, + }, + }, + }, + }, + }, + }, + }, + }, + mu: sync.Mutex{}, + } + + consumerPtr := &TestConsumer{} + fetcher := refresh.NewFetcher[nmagent.Interfaces](clientPtr.GetInterfaceIPInfo, 0, 0, consumerPtr.ConsumeInterfaces, logger.Log) + ticker := refresh.NewMockTickProvider() + fetcher.SetTicker(ticker) + ctx, cancel := testContext(t) + defer cancel() + fetcher.Start(ctx) + ticker.Tick() // Trigger a refresh + ticker.Tick() // This tick will be read only after previous refresh is done + ticker.Tick() // This call will block until the prevous tick is read + + // At least 2 refreshes - one initial and one after the first tick should be done + if clientPtr.FetchRefreshCount() < 2 { + t.Error("Not enough refreshes") + } + + // Exactly 2 consumes - one initial and one after the first tick should be done (responses are different). + // Then no more, since the response is unchanged + if consumerPtr.FetchConsumeCount() != 2 { + t.Error("Exactly two consumes expected (for two different responses)") + } +} + +// testContext creates a context from the provided testing.T that will be +// canceled if the test suite is terminated. +func testContext(t *testing.T) (context.Context, context.CancelFunc) { + if deadline, ok := t.Deadline(); ok { + return context.WithDeadline(context.Background(), deadline) + } + return context.WithCancel(context.Background()) +} + +func init() { + logger.InitLogger("testlogs", 0, 0, "./") +} diff --git a/refresh/helper_for_fetcher_test.go b/refresh/helper_for_fetcher_test.go new file mode 100644 index 0000000000..fa6a6554eb --- /dev/null +++ b/refresh/helper_for_fetcher_test.go @@ -0,0 +1,5 @@ +package refresh + +func (f *Fetcher[T]) SetTicker(t TickProvider) { + f.ticker = t +} diff --git a/refresh/logger.go b/refresh/logger.go new file mode 100644 index 0000000000..d3a8ea66d0 --- /dev/null +++ b/refresh/logger.go @@ -0,0 +1,8 @@ +package refresh + +type Logger interface { + Debugf(format string, v ...interface{}) + Printf(format string, v ...interface{}) + Warnf(format string, v ...interface{}) + Errorf(format string, v ...interface{}) +} diff --git a/refresh/mocktickprovider.go b/refresh/mocktickprovider.go new file mode 100644 index 0000000000..34b4190b50 --- /dev/null +++ b/refresh/mocktickprovider.go @@ -0,0 +1,41 @@ +package refresh + +import "time" + +// MockTickProvider is a mock implementation of the TickProvider interface +type MockTickProvider struct { + tickChan chan time.Time + currentDuration time.Duration +} + +// NewMockTickProvider creates a new MockTickProvider +func NewMockTickProvider() *MockTickProvider { + return &MockTickProvider{ + tickChan: make(chan time.Time, 1), + } +} + +// C returns the channel on which ticks are delivered +func (m *MockTickProvider) C() <-chan time.Time { + return m.tickChan +} + +// Stop stops the ticker +func (m *MockTickProvider) Stop() { + close(m.tickChan) +} + +// Tick manually sends a tick to the channel +func (m *MockTickProvider) Tick() { + m.tickChan <- time.Now() +} + +func (m *MockTickProvider) Reset(d time.Duration) { + m.currentDuration = d +} + +func (m *MockTickProvider) GetCurrentDuration() time.Duration { + return m.currentDuration +} + +var _ TickProvider = &MockTickProvider{} diff --git a/refresh/refreshticker.go b/refresh/refreshticker.go new file mode 100644 index 0000000000..20ad268718 --- /dev/null +++ b/refresh/refreshticker.go @@ -0,0 +1,37 @@ +package refresh + +import "time" + +// TickProvider defines an interface for a type that provides a channel that ticks at a regular interval +type TickProvider interface { + Stop() + Reset(d time.Duration) + C() <-chan time.Time +} + +// TimedTickProvider wraps a time.Ticker to implement TickProvider +type TimedTickProvider struct { + ticker *time.Ticker +} + +var _ TickProvider = &TimedTickProvider{} + +// NewTimedTickProvider creates a new TimedTickProvider +func NewTimedTickProvider(d time.Duration) *TimedTickProvider { + return &TimedTickProvider{ticker: time.NewTicker(d)} +} + +// Stop stops the ticker +func (tw *TimedTickProvider) Stop() { + tw.ticker.Stop() +} + +// Reset resets the ticker with a new duration +func (tw *TimedTickProvider) Reset(d time.Duration) { + tw.ticker.Reset(d) +} + +// C returns the ticker's channel +func (tw *TimedTickProvider) C() <-chan time.Time { + return tw.ticker.C +} diff --git a/server/tls/tlscertificate_retriever.go b/server/tls/tlscertificate_retriever.go index d3037815be..a22a7336b7 100644 --- a/server/tls/tlscertificate_retriever.go +++ b/server/tls/tlscertificate_retriever.go @@ -14,6 +14,7 @@ type TlsSettings struct { MSIResourceID string KeyVaultCertificateRefreshInterval time.Duration UseMTLS bool + MinTLSVersion string } func GetTlsCertificateRetriever(settings TlsSettings) (TlsCertificateRetriever, error) { diff --git a/telemetry/aiwrapper_test.go b/telemetry/aiwrapper_test.go index c10e39fe70..326e99f55d 100644 --- a/telemetry/aiwrapper_test.go +++ b/telemetry/aiwrapper_test.go @@ -17,15 +17,7 @@ func TestCreateAITelemetryHandle(t *testing.T) { wantErr bool }{ { - name: "disable telemetry", - aiConfig: aitelemetry.AIConfig{}, - disableAll: false, - disableMetric: true, - disableTrace: true, - wantErr: true, - }, - { - name: "empty aiconfig", + name: "disabled telemetry with empty aiconfig", aiConfig: aitelemetry.AIConfig{}, disableAll: true, disableMetric: true, diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index dfa052794e..04313631f6 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -79,8 +79,6 @@ type CNIReport struct { VnetAddressSpace []string OSDetails OSInfo SystemDetails SystemInfo - InterfaceDetails InterfaceInfo - BridgeDetails BridgeInfo Metadata common.Metadata `json:"compute"` Logger *zap.Logger } @@ -91,9 +89,7 @@ type AIMetric struct { // ReportManager structure. type ReportManager struct { - HostNetAgentURL string - ContentType string - Report interface{} + Report interface{} } // GetReport retrieves orchestrator, system, OS and Interface details and create a report structure. diff --git a/telemetry/telemetry_client.go b/telemetry/telemetry_client.go new file mode 100644 index 0000000000..54241c42eb --- /dev/null +++ b/telemetry/telemetry_client.go @@ -0,0 +1,113 @@ +package telemetry + +import ( + "fmt" + "os" + "sync" + + "github.com/Azure/azure-container-networking/aitelemetry" + "go.uber.org/zap" +) + +const ( + telemetryNumberRetries = 5 + telemetryWaitTimeInMilliseconds = 200 +) + +type Client struct { + cniReportSettings *CNIReport + tb *TelemetryBuffer + logger *zap.Logger + lock sync.Mutex +} + +// package level variable for application insights telemetry +var AIClient = NewClient() + +func NewClient() *Client { + return &Client{ + cniReportSettings: &CNIReport{}, + } +} + +// Settings gets a pointer to the cni report struct, used to modify individual fields +func (c *Client) Settings() *CNIReport { + return c.cniReportSettings +} + +// SetSettings REPLACES the pointer to the cni report struct and should only be used on startup +func (c *Client) SetSettings(settings *CNIReport) { + c.cniReportSettings = settings +} + +func (c *Client) IsConnected() bool { + return c.tb != nil && c.tb.Connected +} + +func (c *Client) ConnectTelemetry(logger *zap.Logger) { + c.tb = NewTelemetryBuffer(logger) + c.tb.ConnectToTelemetry() + c.logger = logger +} + +func (c *Client) StartAndConnectTelemetry(logger *zap.Logger) { + c.tb = NewTelemetryBuffer(logger) + c.tb.ConnectToTelemetryService(telemetryNumberRetries, telemetryWaitTimeInMilliseconds) + c.logger = logger +} + +func (c *Client) DisconnectTelemetry() { + if c.tb == nil { + return + } + c.tb.Close() +} + +func (c *Client) sendEvent(msg string) { + if c.tb == nil { + return + } + c.lock.Lock() + defer c.lock.Unlock() + eventMsg := fmt.Sprintf("[%d] %s", os.Getpid(), msg) + c.cniReportSettings.EventMessage = eventMsg + SendCNIEvent(c.tb, c.cniReportSettings) +} + +func (c *Client) sendLog(msg string) { + if c.logger == nil { + return + } + c.logger.Info("Telemetry Event", zap.String("message", msg)) +} + +func (c *Client) SendEvent(msg string) { + c.sendEvent(msg) +} + +func (c *Client) SendError(err error) { + if err == nil { + return + } + // when the cni report reaches the telemetry service, the ai log message + // is set to either the cni report's event message or error message, + // whichever is not empty, so we can always just set the event message + c.sendEvent(err.Error()) +} + +func (c *Client) SendMetric(name string, value float64, customDims map[string]string) { + if c.tb == nil { + return + } + err := SendCNIMetric(&AIMetric{ + aitelemetry.Metric{ + Name: name, + Value: value, + AppVersion: c.Settings().Version, + CustomDimensions: customDims, + }, + }, c.tb) + if err != nil { + c.sendLog("Couldn't send metric: " + err.Error()) + } +} diff --git a/telemetry/telemetry_client_test.go b/telemetry/telemetry_client_test.go new file mode 100644 index 0000000000..2c33f6540d --- /dev/null +++ b/telemetry/telemetry_client_test.go @@ -0,0 +1,65 @@ +package telemetry + +import ( + "errors" + "regexp" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +var errMockTelemetryClient = errors.New("mock telemetry client error") + +func TestClient(t *testing.T) { + allowedErrorMsg := regexp.MustCompile(`^\[\d+\] mock telemetry client error`) + allowedEventMsg := regexp.MustCompile(`^\[\d+\] telemetry event`) + + emptyClient := NewClient() + + // an empty client should not cause panics + require.NotPanics(t, func() { emptyClient.SendEvent("no errors") }) + + require.NotPanics(t, func() { emptyClient.SendError(errMockTelemetryClient) }) + + require.NotPanics(t, func() { emptyClient.DisconnectTelemetry() }) + + require.NotPanics(t, func() { emptyClient.sendLog("no errors") }) + + require.NotPanics(t, func() { emptyClient.sendEvent("no errors") }) + + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + // should not panic if connecting telemetry fails or succeeds + require.NotPanics(t, func() { emptyClient.ConnectTelemetry(logger) }) + + // should set logger during connection + require.Equal(t, logger, emptyClient.logger) + + // for testing, we create a new telemetry buffer and assign it + emptyClient.tb = &TelemetryBuffer{} + + // test sending error + require.NotPanics(t, func() { emptyClient.SendError(errMockTelemetryClient) }) + require.Regexp(t, allowedErrorMsg, emptyClient.Settings().EventMessage) + + // test sending event, error is empty + require.NotPanics(t, func() { emptyClient.SendEvent("telemetry event") }) + require.Regexp(t, allowedEventMsg, emptyClient.Settings().EventMessage) + require.Equal(t, "", emptyClient.Settings().ErrorMessage) + + // test sending aimetrics doesn't panic... + require.NotPanics(t, func() { emptyClient.SendMetric("", 0, nil) }) + // ...and doesn't affect the cni report + require.Regexp(t, allowedEventMsg, emptyClient.Settings().EventMessage) + require.Equal(t, "", emptyClient.Settings().ErrorMessage) + + emptyClient.Settings().Context = "abc" + require.Equal(t, "abc", emptyClient.Settings().Context) + + myClient := &Client{ + tb: &TelemetryBuffer{}, + } + require.NotPanics(t, func() { myClient.DisconnectTelemetry() }) +} diff --git a/telemetry/telemetrybuffer.go b/telemetry/telemetrybuffer.go index ce30e642c2..c44a6988a4 100644 --- a/telemetry/telemetrybuffer.go +++ b/telemetry/telemetrybuffer.go @@ -308,7 +308,7 @@ func (tb *TelemetryBuffer) StartTelemetryService(path string, args []string) err err := tb.plc.KillProcessByName(TelemetryServiceProcessName) if err != nil { if tb.logger != nil { - tb.logger.Error("Failed to kill process by", zap.String("TelemetryServiceProcessName", TelemetryServiceProcessName), zap.Error(err)) + tb.logger.Warn("Failed to kill process by", zap.String("TelemetryServiceProcessName", TelemetryServiceProcessName), zap.Error(err)) } else { log.Logf("[Telemetry] Failed to kill process by telemetryServiceProcessName %s due to %v", TelemetryServiceProcessName, err) } diff --git a/telemetry/telemetrybuffer_test.go b/telemetry/telemetrybuffer_test.go index 19b3abe495..8cd6e307bc 100644 --- a/telemetry/telemetrybuffer_test.go +++ b/telemetry/telemetrybuffer_test.go @@ -188,3 +188,22 @@ func TestStartTelemetryService(t *testing.T) { err := tb.StartTelemetryService("", nil) require.Error(t, err) } + +// TestExtraneousClose checks that closing potentially multiple times after a failed connect won't panic +func TestExtraneousClose(_ *testing.T) { + tb := NewTelemetryBuffer(nil) + + tb.Close() + tb.Close() + + tb.ConnectToTelemetry() + + tb.Close() + tb.Close() + + tb = NewTelemetryBuffer(nil) + tb.ConnectToTelemetryService(telemetryNumberRetries, telemetryWaitTimeInMilliseconds) + + tb.Close() + tb.Close() +} diff --git a/test/e2e/framework/kubernetes/port-forward.go b/test/e2e/framework/kubernetes/port-forward.go index 2693c8cdbf..576caf4c93 100644 --- a/test/e2e/framework/kubernetes/port-forward.go +++ b/test/e2e/framework/kubernetes/port-forward.go @@ -86,7 +86,7 @@ func (p *PortForward) Run() error { log.Printf("attempting port forward to pod name \"%s\" with label \"%s\", in namespace \"%s\"...\n", targetPodName, p.LabelSelector, p.Namespace) - p.pf, err = k8s.NewPortForwarder(config, &logger{}, opts) + p.pf, err = k8s.NewPortForwarder(config, opts) if err != nil { return fmt.Errorf("could not create port forwarder: %w", err) } @@ -161,9 +161,3 @@ func (p *PortForward) Stop() error { p.pf.Stop() return nil } - -type logger struct{} - -func (l *logger) Logf(format string, args ...interface{}) { - log.Printf(format, args...) -} diff --git a/test/integration/cilium-nodesubnet/ipconfigupdate.go b/test/integration/cilium-nodesubnet/ipconfigupdate.go new file mode 100644 index 0000000000..214f4655f7 --- /dev/null +++ b/test/integration/cilium-nodesubnet/ipconfigupdate.go @@ -0,0 +1,144 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +func runAzCommand(params ...string) (string, error) { + var out bytes.Buffer + var stderr bytes.Buffer + var err error + fmt.Println("Running Azure CLI command ", strings.Join(params, " ")) + for i := 0; i < 3; i++ { + cmd := exec.Command("az", params...) + cmd.Stdout = &out + cmd.Stderr = &stderr + err = cmd.Run() + if err == nil { + break + } + } + + if err != nil { + return "", errors.Wrap(err, "command failed "+stderr.String()) + } + + return out.String(), nil +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +func main() { + resourceGroup := os.Getenv("RESOURCE_GROUP") + if resourceGroup == "" { + fmt.Println("RESOURCE_GROUP environment variable is required") + os.Exit(1) + } + + secondaryConfigCountStr := os.Getenv("SECONDARY_CONFIG_COUNT") + if secondaryConfigCountStr == "" { + secondaryConfigCountStr = "64" + } + + secondaryConfigCount, err := strconv.Atoi(secondaryConfigCountStr) + if err != nil { + fmt.Printf("Invalid value for SECONDARY_CONFIG_COUNT: %s\n", secondaryConfigCountStr) + os.Exit(1) + } + + result, err := runAzCommand("vmss", "list", "-g", resourceGroup, "--query", "[0].name", "-o", "tsv") + if err != nil { + fmt.Printf("Command failed with error: %s\n", err) + os.Exit(1) + } + vmssName := strings.TrimSpace(result) + + result, err = runAzCommand("vmss", "show", "-g", resourceGroup, "-n", vmssName) + if err != nil { + fmt.Printf("Command failed with error: %s\n", err) + os.Exit(1) + } + + var vmssInfo map[string]interface{} + err = json.Unmarshal([]byte(result), &vmssInfo) + if err != nil { + fmt.Printf("Failed to parse JSON: %s\n", err) + os.Exit(1) + } + + networkProfile := vmssInfo["virtualMachineProfile"].(map[string]interface{})["networkProfile"].(map[string]interface{}) + networkInterfaceConfigurations := networkProfile["networkInterfaceConfigurations"].([]interface{}) + + var usedIPConfigNames []string + var secondaryConfigs []interface{} + + for _, nicConfig := range networkInterfaceConfigurations { + nicConfigMap := nicConfig.(map[string]interface{}) + ipConfigurations := nicConfigMap["ipConfigurations"].([]interface{}) + var primaryIPConfig map[string]interface{} + for _, ipConfig := range ipConfigurations { + ipConfigMap := ipConfig.(map[string]interface{}) + usedIPConfigNames = append(usedIPConfigNames, ipConfigMap["name"].(string)) + if ipConfigMap["primary"].(bool) { + primaryIPConfig = ipConfigMap + } + } + + if primaryIPConfig != nil { + for i := 2; i <= secondaryConfigCount+1; i++ { + ipConfig := make(map[string]interface{}) + for k, v := range primaryIPConfig { + // only the primary config needs loadBalancerBackendAddressPools. Azure doesn't allow + // secondary IP configs to be associated load balancer backend pools. + if k == "loadBalancerBackendAddressPools" { + continue + } + ipConfig[k] = v + } + + ipConfigName := fmt.Sprintf("ipconfig%d", i) + if !contains(usedIPConfigNames, ipConfigName) { + ipConfig["name"] = ipConfigName + ipConfig["primary"] = false + usedIPConfigNames = append(usedIPConfigNames, ipConfigName) + secondaryConfigs = append(secondaryConfigs, ipConfig) + } + } + } + + nicConfigMap["ipConfigurations"] = append(ipConfigurations, secondaryConfigs...) + } + + networkProfileJSON, err := json.Marshal(networkProfile) + if err != nil { + fmt.Printf("Failed to marshal JSON: %s\n", err) + os.Exit(1) + } + + _, err = runAzCommand("vmss", "update", "-g", resourceGroup, "-n", vmssName, "--set", fmt.Sprintf("virtualMachineProfile.networkProfile=%s", networkProfileJSON)) + if err != nil { + fmt.Printf("Command failed with error: %s\n", err) + os.Exit(1) + } + + _, err = runAzCommand("vmss", "update-instances", "-g", resourceGroup, "-n", vmssName, "--instance-ids", "*") + if err != nil { + fmt.Printf("Command failed with error: %s\n", err) + os.Exit(1) + } +} diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 3309a297c0..5ac41ca524 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -221,7 +221,7 @@ func TestDatapathLinux(t *testing.T) { DestPort: 8080, } - pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + pf, err := k8s.NewPortForwarder(restConfig, pfOpts) if err != nil { t.Fatal(err) } diff --git a/test/integration/k8s_test.go b/test/integration/k8s_test.go index 8eeddf3f11..71a588fd6e 100644 --- a/test/integration/k8s_test.go +++ b/test/integration/k8s_test.go @@ -174,7 +174,7 @@ func TestPodScaling(t *testing.T) { } pingCheckFn := func() error { - pf, err := NewPortForwarder(restConfig, t, pfOpts) + pf, err := NewPortForwarder(restConfig, pfOpts) if err != nil { t.Fatalf("could not build port forwarder: %v", err) } diff --git a/test/integration/lrp/lrp_fqdn_test.go b/test/integration/lrp/lrp_fqdn_test.go new file mode 100644 index 0000000000..93bca2439b --- /dev/null +++ b/test/integration/lrp/lrp_fqdn_test.go @@ -0,0 +1,108 @@ +//go:build lrp + +package lrp + +import ( + "context" + "testing" + + "github.com/Azure/azure-container-networking/test/internal/kubernetes" + ciliumClientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + "github.com/stretchr/testify/require" +) + +var ( + fqdnCNPPath = ciliumManifestsDir + "fqdn-cnp.yaml" + enableFQDNFlag = "enable-l7-proxy" +) + +// TestLRPFQDN tests if the local redirect policy in a cilium cluster is functioning with a +// FQDN Cilium Network Policy. As such, enable-l7-proxy should be enabled in the config +// The test assumes the current kubeconfig points to a cluster with cilium, cns, +// and kube-dns already installed. The lrp feature flag should also be enabled in the cilium config +// Does not check if cluster is in a stable state +// Resources created are automatically cleaned up +// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRPFQDN$ +func TestLRPFQDN(t *testing.T) { + ctx := context.Background() + + selectedPod, cleanupFn := setupLRP(t, ctx) + defer cleanupFn() + require.NotNil(t, selectedPod) + + cs := kubernetes.MustGetClientset() + config := kubernetes.MustGetRestConfig() + ciliumCS, err := ciliumClientset.NewForConfig(config) + require.NoError(t, err) + + // ensure enable l7 proxy flag is enabled + ciliumCM, err := kubernetes.GetConfigmap(ctx, cs, kubeSystemNamespace, ciliumConfigmapName) + require.NoError(t, err) + require.Equal(t, "true", ciliumCM.Data[enableFQDNFlag], "enable-l7-proxy not set to true in cilium-config") + + _, cleanupCNP := kubernetes.MustSetupCNP(ctx, ciliumCS, fqdnCNPPath) + defer cleanupCNP() + + tests := []struct { + name string + command []string + expectedMsgContains string + expectedErrMsgContains string + shouldError bool + countIncreases bool + }{ + { + name: "nslookup google succeeds", + command: []string{"nslookup", "www.google.com", "10.0.0.10"}, + countIncreases: true, + shouldError: false, + }, + { + name: "nslookup google succeeds without explicit dns server", + command: []string{"nslookup", "www.google.com"}, + countIncreases: true, + shouldError: false, + }, + { + name: "wget google succeeds", + command: []string{"wget", "-O", "index.html", "www.google.com", "--timeout=5"}, + expectedErrMsgContains: "saved", + countIncreases: true, + shouldError: false, + }, + { + name: "nslookup cloudflare succeeds", + command: []string{"nslookup", "www.cloudflare.com", "10.0.0.10"}, + countIncreases: true, + shouldError: false, + }, + { + name: "wget cloudflare fails but dns succeeds", + command: []string{"wget", "-O", "index.html", "www.cloudflare.com", "--timeout=5"}, + expectedErrMsgContains: "timed out", + countIncreases: true, + shouldError: true, + }, + { + name: "nslookup example fails", + command: []string{"nslookup", "www.example.com", "10.0.0.10"}, + expectedMsgContains: "REFUSED", + countIncreases: false, + shouldError: true, + }, + { + // won't be able to nslookup, let alone query the website + name: "wget example fails", + command: []string{"wget", "-O", "index.html", "www.example.com", "--timeout=5"}, + expectedErrMsgContains: "bad address", + countIncreases: false, + shouldError: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + testLRPCase(t, ctx, *selectedPod, tt.command, tt.expectedMsgContains, tt.expectedErrMsgContains, tt.shouldError, tt.countIncreases) + }) + } +} diff --git a/test/integration/lrp/lrp_test.go b/test/integration/lrp/lrp_test.go new file mode 100644 index 0000000000..59fd974114 --- /dev/null +++ b/test/integration/lrp/lrp_test.go @@ -0,0 +1,227 @@ +//go:build lrp + +package lrp + +import ( + "context" + "os" + "strings" + "testing" + "time" + + k8s "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/prometheus" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" + "github.com/Azure/azure-container-networking/test/internal/retry" + ciliumClientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + corev1 "k8s.io/api/core/v1" +) + +const ( + ciliumConfigmapName = "cilium-config" + ciliumManifestsDir = "../manifests/cilium/lrp/" + enableLRPFlag = "enable-local-redirect-policy" + kubeSystemNamespace = "kube-system" + dnsService = "kube-dns" + retryAttempts = 10 + retryDelay = 5 * time.Second + promAddress = "http://localhost:9253/metrics" + nodeLocalDNSLabelSelector = "k8s-app=node-local-dns" + clientLabelSelector = "lrp-test=true" + coreDNSRequestCountTotal = "coredns_dns_request_count_total" + clientContainer = "no-op" +) + +var ( + defaultRetrier = retry.Retrier{Attempts: retryAttempts, Delay: retryDelay} + nodeLocalDNSDaemonsetPath = ciliumManifestsDir + "node-local-dns-ds.yaml" + tempNodeLocalDNSDaemonsetPath = ciliumManifestsDir + "temp-daemonset.yaml" + nodeLocalDNSConfigMapPath = ciliumManifestsDir + "config-map.yaml" + nodeLocalDNSServiceAccountPath = ciliumManifestsDir + "service-account.yaml" + nodeLocalDNSServicePath = ciliumManifestsDir + "service.yaml" + lrpPath = ciliumManifestsDir + "lrp.yaml" + numClients = 4 + clientPath = ciliumManifestsDir + "client-ds.yaml" +) + +func setupLRP(t *testing.T, ctx context.Context) (*corev1.Pod, func()) { + var cleanUpFns []func() + success := false + cleanupFn := func() { + for len(cleanUpFns) > 0 { + cleanUpFns[len(cleanUpFns)-1]() + cleanUpFns = cleanUpFns[:len(cleanUpFns)-1] + } + } + defer func() { + if !success { + cleanupFn() + } + }() + + config := kubernetes.MustGetRestConfig() + cs := kubernetes.MustGetClientset() + + ciliumCS, err := ciliumClientset.NewForConfig(config) + require.NoError(t, err) + + svc, err := kubernetes.GetService(ctx, cs, kubeSystemNamespace, dnsService) + require.NoError(t, err) + kubeDNS := svc.Spec.ClusterIP + + // ensure lrp flag is enabled + ciliumCM, err := kubernetes.GetConfigmap(ctx, cs, kubeSystemNamespace, ciliumConfigmapName) + require.NoError(t, err) + require.Equal(t, "true", ciliumCM.Data[enableLRPFlag], "enable-local-redirect-policy not set to true in cilium-config") + + // 1.17 and 1.13 cilium versions of both files are identical + // read file + nodeLocalDNSContent, err := os.ReadFile(nodeLocalDNSDaemonsetPath) + require.NoError(t, err) + // replace pillar dns + replaced := strings.ReplaceAll(string(nodeLocalDNSContent), "__PILLAR__DNS__SERVER__", kubeDNS) + // Write the updated content back to the file + err = os.WriteFile(tempNodeLocalDNSDaemonsetPath, []byte(replaced), 0o644) + require.NoError(t, err) + defer func() { + err := os.Remove(tempNodeLocalDNSDaemonsetPath) + require.NoError(t, err) + }() + + // list out and select node of choice + nodeList, err := kubernetes.GetNodeList(ctx, cs) + require.NotEmpty(t, nodeList.Items) + selectedNode := TakeOne(nodeList.Items).Name + + // deploy node local dns preqreqs and pods + _, cleanupConfigMap := kubernetes.MustSetupConfigMap(ctx, cs, nodeLocalDNSConfigMapPath) + cleanUpFns = append(cleanUpFns, cleanupConfigMap) + _, cleanupServiceAccount := kubernetes.MustSetupServiceAccount(ctx, cs, nodeLocalDNSServiceAccountPath) + cleanUpFns = append(cleanUpFns, cleanupServiceAccount) + _, cleanupService := kubernetes.MustSetupService(ctx, cs, nodeLocalDNSServicePath) + cleanUpFns = append(cleanUpFns, cleanupService) + nodeLocalDNSDS, cleanupNodeLocalDNS := kubernetes.MustSetupDaemonset(ctx, cs, tempNodeLocalDNSDaemonsetPath) + cleanUpFns = append(cleanUpFns, cleanupNodeLocalDNS) + kubernetes.WaitForPodDaemonset(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSDS.Name, nodeLocalDNSLabelSelector) + require.NoError(t, err) + // select a local dns pod after they start running + pods, err := kubernetes.GetPodsByNode(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSLabelSelector, selectedNode) + require.NoError(t, err) + selectedLocalDNSPod := TakeOne(pods.Items).Name + + // deploy lrp + _, cleanupLRP := kubernetes.MustSetupLRP(ctx, ciliumCS, lrpPath) + cleanUpFns = append(cleanUpFns, cleanupLRP) + + // create client pods + clientDS, cleanupClient := kubernetes.MustSetupDaemonset(ctx, cs, clientPath) + cleanUpFns = append(cleanUpFns, cleanupClient) + kubernetes.WaitForPodDaemonset(ctx, cs, clientDS.Namespace, clientDS.Name, clientLabelSelector) + require.NoError(t, err) + // select a client pod after they start running + clientPods, err := kubernetes.GetPodsByNode(ctx, cs, clientDS.Namespace, clientLabelSelector, selectedNode) + require.NoError(t, err) + selectedClientPod := TakeOne(clientPods.Items) + + t.Logf("Selected node: %s, node local dns pod: %s, client pod: %s\n", selectedNode, selectedLocalDNSPod, selectedClientPod.Name) + + // port forward to local dns pod on same node (separate thread) + pf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{ + Namespace: nodeLocalDNSDS.Namespace, + PodName: selectedLocalDNSPod, + LocalPort: 9253, + DestPort: 9253, + }) + require.NoError(t, err) + pctx := context.Background() + portForwardCtx, cancel := context.WithTimeout(pctx, (retryAttempts+1)*retryDelay) + cleanUpFns = append(cleanUpFns, cancel) + + err = defaultRetrier.Do(portForwardCtx, func() error { + t.Logf("attempting port forward to a pod with label %s, in namespace %s...", nodeLocalDNSLabelSelector, nodeLocalDNSDS.Namespace) + return errors.Wrap(pf.Forward(portForwardCtx), "could not start port forward") + }) + require.NoError(t, err, "could not start port forward within %d", (retryAttempts+1)*retryDelay) + cleanUpFns = append(cleanUpFns, pf.Stop) + + t.Log("started port forward") + + success = true + return &selectedClientPod, cleanupFn +} + +func testLRPCase(t *testing.T, ctx context.Context, clientPod corev1.Pod, clientCmd []string, expectResponse, expectErrMsg string, + shouldError, countShouldIncrease bool) { + + config := kubernetes.MustGetRestConfig() + cs := kubernetes.MustGetClientset() + + // labels for target lrp metric + metricLabels := map[string]string{ + "family": "1", + "proto": "udp", + "server": "dns://0.0.0.0:53", + "zone": ".", + } + + // curl localhost:9253/metrics + beforeMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels) + require.NoError(t, err) + + t.Log("calling command from client") + + val, errMsg, err := kubernetes.ExecCmdOnPod(ctx, cs, clientPod.Namespace, clientPod.Name, clientContainer, clientCmd, config, false) + if shouldError { + require.Error(t, err, "stdout: %s, stderr: %s", string(val), string(errMsg)) + } else { + require.NoError(t, err, "stdout: %s, stderr: %s", string(val), string(errMsg)) + } + + require.Contains(t, string(val), expectResponse) + require.Contains(t, string(errMsg), expectErrMsg) + + // in case there is time to propagate + time.Sleep(500 * time.Millisecond) + + // curl again and see count diff + afterMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels) + require.NoError(t, err) + + if countShouldIncrease { + require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after command") + } else { + require.Equal(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count increased after command") + } +} + +// TestLRP tests if the local redirect policy in a cilium cluster is functioning +// The test assumes the current kubeconfig points to a cluster with cilium (1.16+), cns, +// and kube-dns already installed. The lrp feature flag should be enabled in the cilium config +// Does not check if cluster is in a stable state +// Resources created are automatically cleaned up +// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRP$ +func TestLRP(t *testing.T) { + ctx := context.Background() + + selectedPod, cleanupFn := setupLRP(t, ctx) + defer cleanupFn() + require.NotNil(t, selectedPod) + + testLRPCase(t, ctx, *selectedPod, []string{ + "nslookup", "google.com", "10.0.0.10", + }, "", "", false, true) +} + +// TakeOne takes one item from the slice randomly; if empty, it returns the empty value for the type +// Use in testing only +func TakeOne[T any](slice []T) T { + if len(slice) == 0 { + var zero T + return zero + } + rand.Seed(uint64(time.Now().UnixNano())) + return slice[rand.Intn(len(slice))] +} diff --git a/test/integration/manifests/cilium/cilium-nightly-agent/clusterrole.yaml b/test/integration/manifests/cilium/cilium-nightly-agent/clusterrole.yaml index 7dbdd42326..b718138c9e 100644 --- a/test/integration/manifests/cilium/cilium-nightly-agent/clusterrole.yaml +++ b/test/integration/manifests/cilium/cilium-nightly-agent/clusterrole.yaml @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - networking.k8s.io @@ -45,8 +47,6 @@ rules: - apiGroups: - cilium.io resources: - #Naming changed from ciliumbgploadbalancerippools - - ciliumloadbalancerippools - ciliumbgppeeringpolicies - ciliumclusterwideenvoyconfigs - ciliumclusterwidenetworkpolicies @@ -59,8 +59,13 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs - #Added in 1.14.0 snapshot 2 + - ciliumloadbalancerippools - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs verbs: - list - watch @@ -74,6 +79,7 @@ rules: - create - apiGroups: - cilium.io + # To synchronize garbage collection of such resources resources: - ciliumidentities verbs: @@ -100,5 +106,16 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status verbs: - patch +- apiGroups: + - "" + resourceNames: + - cilium-config + resources: + - configmaps + verbs: + - list + - watch diff --git a/test/integration/manifests/cilium/cilium-nightly-agent/clusterrolebinding.yaml b/test/integration/manifests/cilium/cilium-nightly-agent/clusterrolebinding.yaml index f5d39b0ffd..93a6e06cdc 100644 --- a/test/integration/manifests/cilium/cilium-nightly-agent/clusterrolebinding.yaml +++ b/test/integration/manifests/cilium/cilium-nightly-agent/clusterrolebinding.yaml @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium + labels: + app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/test/integration/manifests/cilium/cilium-nightly-config.yaml b/test/integration/manifests/cilium/cilium-nightly-config.yaml index 09cb637462..f875fd6680 100644 --- a/test/integration/manifests/cilium/cilium-nightly-config.yaml +++ b/test/integration/manifests/cilium/cilium-nightly-config.yaml @@ -1,4 +1,4 @@ -apiVersion: v1 +apiVersion: v1 #Not verified, placeholder data: agent-not-ready-taint-key: node.cilium.io/agent-not-ready arping-refresh-period: 30s @@ -33,7 +33,7 @@ data: enable-l2-neigh-discovery: "true" enable-l7-proxy: "false" enable-local-node-route: "false" - enable-local-redirect-policy: "false" + enable-local-redirect-policy: "true" # set to true for lrp test enable-metrics: "true" enable-policy: default enable-session-affinity: "true" @@ -46,7 +46,7 @@ data: install-no-conntrack-iptables-rules: "false" ipam: delegated-plugin kube-proxy-replacement: "true" - kube-proxy-replacement-healthz-bind-address: "" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" local-router-ipv4: 169.254.23.0 metrics: +cilium_bpf_map_pressure monitor-aggregation: medium @@ -61,21 +61,72 @@ data: prometheus-serve-addr: :9962 remove-cilium-node-taints: "true" set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy synchronize-k8s-nodes: "true" tofqdns-dns-reject-response-code: refused tofqdns-enable-dns-compression: "true" - tofqdns-endpoint-max-ip-per-hostname: "50" + tofqdns-endpoint-max-ip-per-hostname: "1000" tofqdns-idle-connection-grace-period: 0s tofqdns-max-deferred-connection-deletes: "10000" - tofqdns-min-ttl: "3600" + tofqdns-min-ttl: "0" tofqdns-proxy-response-max-delay: 100ms - #Replaces tunnel: disabled in v1.15 - routing-mode: "native" + routing-mode: native unmanaged-pod-watcher-interval: "15" vtep-cidr: "" vtep-endpoint: "" vtep-mac: "" vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" kind: ConfigMap metadata: annotations: diff --git a/test/integration/manifests/cilium/cilium-nightly-operator/clusterrole.yaml b/test/integration/manifests/cilium/cilium-nightly-operator/clusterrole.yaml index 8c12e05729..329cc07f5d 100644 --- a/test/integration/manifests/cilium/cilium-nightly-operator/clusterrole.yaml +++ b/test/integration/manifests/cilium/cilium-nightly-operator/clusterrole.yaml @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium rules: - apiGroups: - "" @@ -14,6 +16,15 @@ rules: # to automatically delete [core|kube]dns pods so that are starting to being # managed by Cilium - delete +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cilium-config + verbs: + # allow patching of the configmap to set annotations + - patch - apiGroups: - "" resources: @@ -51,6 +62,7 @@ rules: resources: # to check apiserver connectivity - namespaces + - secrets verbs: - get - list @@ -87,6 +99,7 @@ rules: - ciliumclusterwidenetworkpolicies/status verbs: # Update the auto-generated CNPs and CCNPs status. + - patch - update - apiGroups: - cilium.io @@ -103,6 +116,7 @@ rules: resources: - ciliumidentities verbs: + # To synchronize garbage collection of such resources - update - apiGroups: - cilium.io @@ -127,6 +141,9 @@ rules: resources: - ciliumendpointslices - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs verbs: - create - update @@ -135,6 +152,13 @@ rules: - watch - delete - patch +- apiGroups: + - cilium.io + resources: + - ciliumbgpclusterconfigs/status + - ciliumbgppeerconfigs/status + verbs: + - update - apiGroups: - apiextensions.k8s.io resources: @@ -153,10 +177,14 @@ rules: resourceNames: - ciliumloadbalancerippools.cilium.io - ciliumbgppeeringpolicies.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io - ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io - - ciliumegressnatpolicies.cilium.io - ciliumendpoints.cilium.io - ciliumendpointslices.cilium.io - ciliumenvoyconfigs.cilium.io @@ -166,8 +194,34 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io - #Added in 1.14.0 snapshot 2 - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgppeeringpolicies + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + - ciliumbgppeerconfigs + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch # For cilium-operator running in HA mode. # # Cilium operator running in HA mode requires the use of ResourceLock for Leader Election @@ -181,4 +235,4 @@ rules: verbs: - create - get - - update + - update \ No newline at end of file diff --git a/test/integration/manifests/cilium/cns-write-ovly.yaml b/test/integration/manifests/cilium/cns-write-ovly.yaml index 4f3d919757..4e0a6d7861 100644 --- a/test/integration/manifests/cilium/cns-write-ovly.yaml +++ b/test/integration/manifests/cilium/cns-write-ovly.yaml @@ -82,7 +82,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/test/integration/manifests/cilium/daemonset.yaml b/test/integration/manifests/cilium/daemonset.yaml index a710c23360..f3e6e7093f 100644 --- a/test/integration/manifests/cilium/daemonset.yaml +++ b/test/integration/manifests/cilium/daemonset.yaml @@ -7,6 +7,7 @@ metadata: labels: app.kubernetes.io/managed-by: Helm k8s-app: cilium + app.kubernetes.io/part-of: cilium name: cilium namespace: kube-system spec: @@ -16,10 +17,6 @@ spec: template: metadata: annotations: - container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined - container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined - container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined - container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined prometheus.io/port: "9962" prometheus.io/scrape: "true" creationTimestamp: null @@ -42,12 +39,6 @@ spec: operator: In values: - linux - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map @@ -102,6 +93,8 @@ spec: timeoutSeconds: 5 resources: {} securityContext: + appArmorProfile: + type: Unconfined capabilities: add: - CHOWN @@ -159,6 +152,9 @@ spec: readOnly: true - mountPath: /run/xtables.lock name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer dnsPolicy: ClusterFirst hostNetwork: true initContainers: @@ -197,6 +193,8 @@ spec: name: mount-cgroup resources: {} securityContext: + appArmorProfile: + type: Unconfined capabilities: add: - SYS_ADMIN @@ -229,6 +227,8 @@ spec: name: apply-sysctl-overwrites resources: {} securityContext: + appArmorProfile: + type: Unconfined capabilities: add: - SYS_ADMIN @@ -287,6 +287,8 @@ spec: cpu: 100m memory: 100Mi securityContext: + appArmorProfile: + type: Unconfined capabilities: add: - NET_ADMIN @@ -308,26 +310,6 @@ spec: name: cilium-cgroup - mountPath: /var/run/cilium name: cilium-run - - command: - - bash - - -cex - - | - export LD_LIBRARY_PATH=/host/lib/systemd:/host/usr/lib/aarch64-linux-gnu:/host/usr/lib/x86_64-linux-gnu - export SYSTEMD_VERSION="$(/host/lib/systemd/systemd --version | head -n 1 | cut -d' ' -f2)" - [[ $SYSTEMD_VERSION -ge 249 ]] && { - mkdir -p /host/etc/systemd/networkd.conf.d - echo -e "[Network]\nManageForeignRoutes=no\nManageForeignRoutingPolicyRules=no\n" \ - >/host/etc/systemd/networkd.conf.d/99-cilium-foreign-routes.conf - chmod -R u+rwX,go+rX /host/etc/systemd/networkd.conf.d - } || exit 0 - image: mcr.microsoft.com/cbl-mariner/base/core:2.0 - imagePullPolicy: IfNotPresent - name: systemd-networkd-overrides - resources: {} - securityContext: - privileged: true - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File volumeMounts: - mountPath: /host/etc/systemd name: host-etc-systemd @@ -436,6 +418,10 @@ spec: path: /proc/sys/kernel type: Directory name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns updateStrategy: rollingUpdate: maxSurge: 0 diff --git a/test/integration/manifests/cilium/deployment.yaml b/test/integration/manifests/cilium/deployment.yaml index 2842221eee..0b1a497bd2 100644 --- a/test/integration/manifests/cilium/deployment.yaml +++ b/test/integration/manifests/cilium/deployment.yaml @@ -38,6 +38,8 @@ spec: - --debug=$(CILIUM_DEBUG) - --identity-gc-interval=0m20s - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=fcfs env: - name: K8S_NODE_NAME valueFrom: @@ -60,6 +62,44 @@ spec: containerPort: 9963 hostPort: 9963 protocol: TCP + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL livenessProbe: httpGet: host: "127.0.0.1" @@ -112,6 +152,10 @@ spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule volumes: # To read the configuration from the config map - name: cilium-config-path diff --git a/test/integration/manifests/cilium/lrp/client-ds.yaml b/test/integration/manifests/cilium/lrp/client-ds.yaml new file mode 100644 index 0000000000..698f753069 --- /dev/null +++ b/test/integration/manifests/cilium/lrp/client-ds.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: lrp-test + namespace: default +spec: + selector: + matchLabels: + lrp-test: "true" + template: + metadata: + labels: + lrp-test: "true" + spec: + containers: + - name: no-op + command: ["sleep","3600"] + image: mcr.microsoft.com/cbl-mariner/busybox:2.0 + imagePullPolicy: Always diff --git a/test/integration/manifests/cilium/lrp/config-map.yaml b/test/integration/manifests/cilium/lrp/config-map.yaml new file mode 100644 index 0000000000..2c1235eec0 --- /dev/null +++ b/test/integration/manifests/cilium/lrp/config-map.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system +data: + Corefile: | + cluster.local:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__UPSTREAM__SERVERS__ + prometheus :9253 + } diff --git a/test/integration/manifests/cilium/lrp/fqdn-cnp.yaml b/test/integration/manifests/cilium/lrp/fqdn-cnp.yaml new file mode 100644 index 0000000000..31a3320507 --- /dev/null +++ b/test/integration/manifests/cilium/lrp/fqdn-cnp.yaml @@ -0,0 +1,24 @@ +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: "to-fqdn" + namespace: "default" +spec: + endpointSelector: + matchLabels: + lrp-test: "true" + egress: + - toEndpoints: + - matchLabels: + "k8s:io.kubernetes.pod.namespace": kube-system + "k8s:k8s-app": node-local-dns + toPorts: + - ports: + - port: "53" + protocol: UDP + rules: + dns: + - matchPattern: "*.google.com" + - matchPattern: "*.cloudflare.com" + - toFQDNs: + - matchPattern: "*.google.com" diff --git a/test/integration/manifests/cilium/lrp/lrp.yaml b/test/integration/manifests/cilium/lrp/lrp.yaml new file mode 100644 index 0000000000..377091089c --- /dev/null +++ b/test/integration/manifests/cilium/lrp/lrp.yaml @@ -0,0 +1,21 @@ +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "nodelocaldns" + namespace: kube-system +spec: + redirectFrontend: + serviceMatcher: + serviceName: kube-dns + namespace: kube-system + redirectBackend: + localEndpointSelector: + matchLabels: + k8s-app: node-local-dns + toPorts: + - port: "53" + name: dns + protocol: UDP + - port: "53" + name: dns-tcp + protocol: TCP diff --git a/test/integration/manifests/cilium/lrp/node-local-dns-ds.yaml b/test/integration/manifests/cilium/lrp/node-local-dns-ds.yaml new file mode 100644 index 0000000000..28c23bfb49 --- /dev/null +++ b/test/integration/manifests/cilium/lrp/node-local-dns-ds.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + policy.cilium.io/no-track-port: "53" + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + image: registry.k8s.io/dns/k8s-dns-node-cache:1.15.16 + resources: + requests: + cpu: 25m + memory: 5Mi + args: + [ + "-localip", + "169.254.20.10,__PILLAR__DNS__SERVER__", + "-conf", + "/etc/Corefile", + "-upstreamsvc", + "kube-dns-upstream", + "-skipteardown=true", + "-setupinterface=false", + "-setupiptables=false", + ] + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base diff --git a/test/integration/manifests/cilium/lrp/service-account.yaml b/test/integration/manifests/cilium/lrp/service-account.yaml new file mode 100644 index 0000000000..3f58bd3bfd --- /dev/null +++ b/test/integration/manifests/cilium/lrp/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system diff --git a/test/integration/manifests/cilium/lrp/service.yaml b/test/integration/manifests/cilium/lrp/service.yaml new file mode 100644 index 0000000000..1fdb6b56f1 --- /dev/null +++ b/test/integration/manifests/cilium/lrp/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns diff --git a/test/integration/manifests/cilium/v1.13/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.13/cilium-agent/templates/daemonset.yaml index a710c23360..c7c5625455 100644 --- a/test/integration/manifests/cilium/v1.13/cilium-agent/templates/daemonset.yaml +++ b/test/integration/manifests/cilium/v1.13/cilium-agent/templates/daemonset.yaml @@ -42,12 +42,6 @@ spec: operator: In values: - linux - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map diff --git a/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml index b443b1bb3a..198074750b 100644 --- a/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml +++ b/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml @@ -34,7 +34,7 @@ data: enable-l2-neigh-discovery: "true" enable-l7-proxy: "false" enable-local-node-route: "false" - enable-local-redirect-policy: "false" + enable-local-redirect-policy: "true" # set to true for lrp test enable-metrics: "true" enable-policy: default enable-remote-node-identity: "true" diff --git a/test/integration/manifests/cilium/v1.13/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.13/cilium-operator/templates/deployment.yaml index cdc0e79d83..2f96b4de99 100644 --- a/test/integration/manifests/cilium/v1.13/cilium-operator/templates/deployment.yaml +++ b/test/integration/manifests/cilium/v1.13/cilium-operator/templates/deployment.yaml @@ -38,6 +38,8 @@ spec: - --debug=$(CILIUM_DEBUG) - --identity-gc-interval=0m20s - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=cesSliceModeFCFS env: - name: K8S_NODE_NAME valueFrom: @@ -150,6 +152,10 @@ spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule volumes: # To read the configuration from the config map - name: cilium-config-path diff --git a/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset-dualstack.yaml b/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset-dualstack.yaml index dfec32a34e..c8f901cfbb 100644 --- a/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset-dualstack.yaml +++ b/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset-dualstack.yaml @@ -42,12 +42,6 @@ spec: operator: In values: - linux - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map @@ -309,7 +303,7 @@ spec: - mountPath: /var/run/cilium name: cilium-run - name: start-ipv6-hp-bpf - image: acnpublic.azurecr.io/ipv6-hp-bpf:$IPV6_HP_BPF_VERSION + image: $IPV6_IMAGE_REGISTRY/ipv6-hp-bpf:$IPV6_HP_BPF_VERSION imagePullPolicy: IfNotPresent command: [/ipv6-hp-bpf] securityContext: diff --git a/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset.yaml index 8a2eb9f255..5086a34bb8 100644 --- a/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset.yaml +++ b/test/integration/manifests/cilium/v1.14/cilium-agent/templates/daemonset.yaml @@ -42,12 +42,6 @@ spec: operator: In values: - linux - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: cilium - topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map diff --git a/test/integration/manifests/cilium/v1.14/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.14/cilium-config/cilium-config.yaml index 8d713518af..9dbd460c0b 100644 --- a/test/integration/manifests/cilium/v1.14/cilium-config/cilium-config.yaml +++ b/test/integration/manifests/cilium/v1.14/cilium-config/cilium-config.yaml @@ -34,7 +34,7 @@ data: enable-l2-neigh-discovery: "true" enable-l7-proxy: "false" enable-local-node-route: "false" - enable-local-redirect-policy: "false" + enable-local-redirect-policy: "true" # set to true for lrp test enable-metrics: "true" enable-policy: default enable-remote-node-identity: "true" @@ -100,4 +100,4 @@ metadata: labels: app.kubernetes.io/managed-by: Helm name: cilium-config - namespace: kube-system \ No newline at end of file + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.14/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.14/cilium-operator/templates/deployment.yaml index cdc0e79d83..2f96b4de99 100644 --- a/test/integration/manifests/cilium/v1.14/cilium-operator/templates/deployment.yaml +++ b/test/integration/manifests/cilium/v1.14/cilium-operator/templates/deployment.yaml @@ -38,6 +38,8 @@ spec: - --debug=$(CILIUM_DEBUG) - --identity-gc-interval=0m20s - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=cesSliceModeFCFS env: - name: K8S_NODE_NAME valueFrom: @@ -150,6 +152,10 @@ spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule volumes: # To read the configuration from the config map - name: cilium-config-path diff --git a/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrole.yaml new file mode 100644 index 0000000000..2bc15412c0 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrole.yaml @@ -0,0 +1,112 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch diff --git a/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..93a6e06cdc --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-agent/files/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: "cilium" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.16/cilium-agent/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.16/cilium-agent/files/serviceaccount.yaml new file mode 100644 index 0000000000..edf2e96e34 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-agent/files/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium" + namespace: kube-system + diff --git a/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset-dualstack.yaml b/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset-dualstack.yaml new file mode 100644 index 0000000000..6747013d8a --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset-dualstack.yaml @@ -0,0 +1,442 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + volumeMounts: + - mountPath: /host/etc/systemd + name: host-etc-systemd + - mountPath: /host/lib/systemd + name: host-lib-systemd + readOnly: true + - mountPath: /host/usr/lib + name: host-usr-lib + readOnly: true + - name: start-ipv6-hp-bpf + image: $IPV6_IMAGE_REGISTRY/ipv6-hp-bpf:$IPV6_HP_BPF_VERSION + imagePullPolicy: IfNotPresent + command: [/ipv6-hp-bpf] + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: ipv6-hp-bpf + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/log + type: DirectoryOrCreate + name: ipv6-hp-bpf + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset.yaml new file mode 100644 index 0000000000..13bc922770 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-agent/templates/daemonset.yaml @@ -0,0 +1,429 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + volumeMounts: + - mountPath: /host/etc/systemd + name: host-etc-systemd + - mountPath: /host/lib/systemd + name: host-lib-systemd + readOnly: true + - mountPath: /host/usr/lib + name: host-usr-lib + readOnly: true + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-dualstack.yaml b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-dualstack.yaml new file mode 100644 index 0000000000..b89750da7b --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-dualstack.yaml @@ -0,0 +1,130 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-filter-priority: "2" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "true" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + local-router-ipv6: "fe80::" + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "5" + k8s-client-burst: "10" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system \ No newline at end of file diff --git a/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-hubble.yaml b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-hubble.yaml new file mode 100644 index 0000000000..0d0c5775ba --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config-hubble.yaml @@ -0,0 +1,132 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "true" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + hubble-listen-address: "" + hubble-metrics: flow:sourceContext=pod;destinationContext=pod tcp:sourceContext=pod;destinationContext=pod + dns:query drop:sourceContext=pod;destinationContext=pod + hubble-metrics-server: :9965 + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + # new default values from Cilium v1.14.4 + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "5" + k8s-client-burst: "10" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" + ## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config.yaml new file mode 100644 index 0000000000..6cb192b6c8 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-config/cilium-config.yaml @@ -0,0 +1,126 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" # set to true for lrp test + enable-metrics: "true" + enable-policy: default + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "5" + k8s-client-burst: "10" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrole.yaml new file mode 100644 index 0000000000..7acf4fd3be --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrole.yaml @@ -0,0 +1,222 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups' + - create + - update + - deletecollection + # To update the status of the CNPs and CCNPs + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + # Update the auto-generated CNPs and CCNPs status. + - update + - patch +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + # To perform garbage collection of such resources + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + # To synchronize garbage collection of such resources + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + # To perform CiliumNode garbage collector + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumloadbalancerippools.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io + - ciliumloadbalancerippools.cilium.io +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgppeeringpolicies + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + - ciliumpodippools + verbs: + - patch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update diff --git a/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..eb164361d4 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-operator/files/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.16/cilium-operator/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.16/cilium-operator/files/serviceaccount.yaml new file mode 100644 index 0000000000..be4bfc048a --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-operator/files/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.16/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.16/cilium-operator/templates/deployment.yaml new file mode 100644 index 0000000000..2f96b4de99 --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/cilium-operator/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/port: "9963" + prometheus.io/scrape: "true" + labels: + io.cilium/app: operator + name: cilium-operator + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + containers: + - name: cilium-operator + image: $CILIUM_IMAGE_REGISTRY/cilium/operator-generic:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - cilium-operator-generic + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + - --identity-gc-interval=0m20s + - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=cesSliceModeFCFS + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + ports: + - name: prometheus + containerPort: 9963 + hostPort: 9963 + protocol: TCP + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-operator" + serviceAccountName: "cilium-operator" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.azure.com/mode + operator: In + values: + - system + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + volumes: + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config diff --git a/test/integration/manifests/cilium/v1.16/hubble/hubble-peer-svc.yaml b/test/integration/manifests/cilium/v1.16/hubble/hubble-peer-svc.yaml new file mode 100644 index 0000000000..6ba733885c --- /dev/null +++ b/test/integration/manifests/cilium/v1.16/hubble/hubble-peer-svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: cilium + name: hubble-peer + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium + sessionAffinity: None + type: ClusterIP diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrole.yaml new file mode 100644 index 0000000000..2bc15412c0 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrole.yaml @@ -0,0 +1,112 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..93a6e06cdc --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/files/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: "cilium" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/files/serviceaccount.yaml new file mode 100644 index 0000000000..f7097b1616 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/files/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset-dualstack.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset-dualstack.yaml new file mode 100644 index 0000000000..e1cc49a2c5 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset-dualstack.yaml @@ -0,0 +1,442 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + volumeMounts: + - mountPath: /host/etc/systemd + name: host-etc-systemd + - mountPath: /host/lib/systemd + name: host-lib-systemd + readOnly: true + - mountPath: /host/usr/lib + name: host-usr-lib + readOnly: true + - name: start-ipv6-hp-bpf + image: $IPV6_IMAGE_REGISTRY/ipv6-hp-bpf:$IPV6_HP_BPF_VERSION + imagePullPolicy: IfNotPresent + command: [/ipv6-hp-bpf] + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: ipv6-hp-bpf + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/log + type: DirectoryOrCreate + name: ipv6-hp-bpf + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml new file mode 100644 index 0000000000..f3e6e7093f --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-agent/templates/daemonset.yaml @@ -0,0 +1,429 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + volumeMounts: + - mountPath: /host/etc/systemd + name: host-etc-systemd + - mountPath: /host/lib/systemd + name: host-lib-systemd + readOnly: true + - mountPath: /host/usr/lib + name: host-usr-lib + readOnly: true + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-dualstack.yaml b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-dualstack.yaml new file mode 100644 index 0000000000..70695460c1 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-dualstack.yaml @@ -0,0 +1,143 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-filter-priority: "2" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "true" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + local-router-ipv6: "fe80::" + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system \ No newline at end of file diff --git a/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-hubble.yaml b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-hubble.yaml new file mode 100644 index 0000000000..be39cf7e97 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config-hubble.yaml @@ -0,0 +1,145 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "true" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + hubble-listen-address: "" + hubble-metrics: flow:sourceContext=pod;destinationContext=pod tcp:sourceContext=pod;destinationContext=pod + dns:query drop:sourceContext=pod;destinationContext=pod + hubble-metrics-server: :9965 + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + # new default values from Cilium v1.14.4 + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" + ## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config.yaml new file mode 100644 index 0000000000..4512e00862 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-config/cilium-config.yaml @@ -0,0 +1,139 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" # set to true for lrp test + enable-metrics: "true" + enable-policy: default + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + unmanaged-pod-watcher-interval: "15" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrole.yaml new file mode 100644 index 0000000000..329cc07f5d --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrole.yaml @@ -0,0 +1,238 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cilium-config + verbs: + # allow patching of the configmap to set annotations + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups' + - create + - update + - deletecollection + # To update the status of the CNPs and CCNPs + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + # Update the auto-generated CNPs and CCNPs status. + - patch + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + # To perform garbage collection of such resources + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + # To synchronize garbage collection of such resources + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + # To perform CiliumNode garbage collector + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch +- apiGroups: + - cilium.io + resources: + - ciliumbgpclusterconfigs/status + - ciliumbgppeerconfigs/status + verbs: + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgppeeringpolicies + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + - ciliumbgppeerconfigs + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update \ No newline at end of file diff --git a/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..eb164361d4 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-operator/files/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-operator/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.17/cilium-operator/files/serviceaccount.yaml new file mode 100644 index 0000000000..be4bfc048a --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-operator/files/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml new file mode 100644 index 0000000000..0b1a497bd2 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/cilium-operator/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/port: "9963" + prometheus.io/scrape: "true" + labels: + io.cilium/app: operator + name: cilium-operator + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + containers: + - name: cilium-operator + image: $CILIUM_IMAGE_REGISTRY/cilium/operator-generic:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - cilium-operator-generic + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + - --identity-gc-interval=0m20s + - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=fcfs + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + ports: + - name: prometheus + containerPort: 9963 + hostPort: 9963 + protocol: TCP + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-operator" + serviceAccountName: "cilium-operator" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.azure.com/mode + operator: In + values: + - system + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + volumes: + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config diff --git a/test/integration/manifests/cilium/v1.17/hubble/hubble-peer-svc.yaml b/test/integration/manifests/cilium/v1.17/hubble/hubble-peer-svc.yaml new file mode 100644 index 0000000000..6ba733885c --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/hubble/hubble-peer-svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: cilium + name: hubble-peer + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium + sessionAffinity: None + type: ClusterIP diff --git a/test/integration/manifests/cns/daemonset-linux.yaml b/test/integration/manifests/cns/daemonset-linux.yaml index 3a30796e4d..a4af25a6cd 100644 --- a/test/integration/manifests/cns/daemonset-linux.yaml +++ b/test/integration/manifests/cns/daemonset-linux.yaml @@ -27,7 +27,7 @@ spec: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux diff --git a/test/integration/manifests/cns/daemonset-windows.yaml b/test/integration/manifests/cns/daemonset-windows.yaml index 04e2aaa3fa..96b8dc9c40 100644 --- a/test/integration/manifests/cns/daemonset-windows.yaml +++ b/test/integration/manifests/cns/daemonset-windows.yaml @@ -49,12 +49,9 @@ spec: securityContext: privileged: true workingDir: $env:CONTAINER_SANDBOX_MOUNT_POINT - command: ["powershell.exe"] + command: ["azure-cns.exe"] args: [ - '.\setkubeconfigpath.ps1', - ";", - '.\azure-cns.exe', "-c", "tcp://$(CNSIpAddress):$(CNSPort)", "-t", @@ -65,8 +62,6 @@ spec: "$(CNSStoreFilePath)", "-config-path", "%CONTAINER_SANDBOX_MOUNT_POINT%\\$(CNS_CONFIGURATION_PATH)", - "--kubeconfig", - '.\kubeconfig', ] volumeMounts: - name: log @@ -111,7 +106,7 @@ spec: image: acnpublic.azurecr.io/cni-dropgz:latest imagePullPolicy: Always command: - - powershell.exe; $env:CONTAINER_SANDBOX_MOUNT_POINT/dropgz + - $env:CONTAINER_SANDBOX_MOUNT_POINT/dropgz args: - deploy - azure-vnet diff --git a/test/integration/manifests/cnsconfig/azurecnidualstackoverlaylinuxconfigmap.yaml b/test/integration/manifests/cnsconfig/azurecnidualstackoverlaylinuxconfigmap.yaml index b893c3b5aa..e8e52dd1b8 100644 --- a/test/integration/manifests/cnsconfig/azurecnidualstackoverlaylinuxconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/azurecnidualstackoverlaylinuxconfigmap.yaml @@ -21,13 +21,16 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": true, - "ManageEndpointState": false, - "ProgramSNATIPTables" : false, - "EnableCNIConflistGeneration": true, "CNIConflistFilepath": "/etc/cni/net.d/15-azure-swift-overlay.conflist", "CNIConflistScenario": "overlay", "EnableAsyncPodDelete": false, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": true, + "ManageEndpointState": false, + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/azurecnidualstackoverlaywindowsconfigmap.yaml b/test/integration/manifests/cnsconfig/azurecnidualstackoverlaywindowsconfigmap.yaml index f72add070c..e6dd333fca 100644 --- a/test/integration/manifests/cnsconfig/azurecnidualstackoverlaywindowsconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/azurecnidualstackoverlaywindowsconfigmap.yaml @@ -21,15 +21,17 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, - "EnableSubnetScarcity": false, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": true, - "ManageEndpointState": false, - "ProgramSNATIPTables" : false, - "MetricsBindAddress": ":10092", - "EnableCNIConflistGeneration": false, "CNIConflistFilepath": "C:\\k\\azurecni\\netconf\\10-azure.conflist", "CNIConflistScenario": "dualStackOverlay", "EnableAsyncPodDelete": false, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": false, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": true, + "ManageEndpointState": false, + "MetricsBindAddress": ":10092", + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/azurecnioverlaylinuxconfigmap.yaml b/test/integration/manifests/cnsconfig/azurecnioverlaylinuxconfigmap.yaml index cde7927bb0..1c158df4e0 100644 --- a/test/integration/manifests/cnsconfig/azurecnioverlaylinuxconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/azurecnioverlaylinuxconfigmap.yaml @@ -21,13 +21,16 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": true, - "ManageEndpointState": false, - "ProgramSNATIPTables" : false, - "EnableCNIConflistGeneration": true, "CNIConflistFilepath": "/etc/cni/net.d/15-azure-swift-overlay.conflist", "CNIConflistScenario": "v4overlay", "EnableAsyncPodDelete": true, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": true, + "ManageEndpointState": false, + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/azurecnioverlaywindowsconfigmap.yaml b/test/integration/manifests/cnsconfig/azurecnioverlaywindowsconfigmap.yaml index 422dc462fa..40be0c0e3d 100644 --- a/test/integration/manifests/cnsconfig/azurecnioverlaywindowsconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/azurecnioverlaywindowsconfigmap.yaml @@ -21,15 +21,17 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, - "EnableSubnetScarcity": false, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": true, - "ManageEndpointState": false, - "ProgramSNATIPTables" : false, - "MetricsBindAddress": ":10092", - "EnableCNIConflistGeneration": false, "CNIConflistFilepath": "C:\\k\\azurecni\\netconf\\10-azure.conflist", "CNIConflistScenario": "v4overlay", "EnableAsyncPodDelete": true, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": false, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": true, + "ManageEndpointState": false, + "MetricsBindAddress": ":10092", + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/azurestatelesscnioverlaywindowsconfigmap.yaml b/test/integration/manifests/cnsconfig/azurestatelesscnioverlaywindowsconfigmap.yaml new file mode 100644 index 0000000000..f1488e2860 --- /dev/null +++ b/test/integration/manifests/cnsconfig/azurestatelesscnioverlaywindowsconfigmap.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cns-win-config + namespace: kube-system +data: + cns_config.json: | + { + "TelemetrySettings": { + "TelemetryBatchSizeBytes": 16384, + "TelemetryBatchIntervalInSecs": 15, + "RefreshIntervalInSecs": 15, + "DisableAll": false, + "HeartBeatIntervalInMins": 30, + "DebugMode": false, + "SnapshotIntervalInMins": 60 + }, + "ManagedSettings": { + "PrivateEndpoint": "", + "InfrastructureNetworkID": "", + "NodeID": "", + "NodeSyncIntervalInSeconds": 30 + }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", + "ChannelMode": "CRD", + "CNIConflistFilepath": "C:\\k\\azurecni\\netconf\\10-azure.conflist", + "CNIConflistScenario": "v4overlay", + "EnableAsyncPodDelete": true, + "EnableCNIConflistGeneration": false, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": false, + "ManageEndpointState": true, + "MetricsBindAddress": ":10092", + "ProgramSNATIPTables": false + } diff --git a/test/integration/manifests/cnsconfig/ciliumconfigmap.yaml b/test/integration/manifests/cnsconfig/ciliumconfigmap.yaml index 38610dc8df..5d1b3f585c 100644 --- a/test/integration/manifests/cnsconfig/ciliumconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/ciliumconfigmap.yaml @@ -21,13 +21,16 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": false, - "ManageEndpointState": true, - "ProgramSNATIPTables" : true, - "EnableCNIConflistGeneration": true, "CNIConflistFilepath": "/etc/cni/net.d/05-cilium.conflist", "CNIConflistScenario": "cilium", "EnableAsyncPodDelete": true, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": false, + "ManageEndpointState": true, + "ProgramSNATIPTables": true } diff --git a/test/integration/manifests/cnsconfig/ciliumnodesubnetconfigmap.yaml b/test/integration/manifests/cnsconfig/ciliumnodesubnetconfigmap.yaml new file mode 100644 index 0000000000..6796b8068b --- /dev/null +++ b/test/integration/manifests/cnsconfig/ciliumnodesubnetconfigmap.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cns-config + namespace: kube-system +data: + cns_config.json: | + { + "TelemetrySettings": { + "TelemetryBatchSizeBytes": 16384, + "TelemetryBatchIntervalInSecs": 15, + "RefreshIntervalInSecs": 15, + "DisableAll": false, + "HeartBeatIntervalInMins": 30, + "DebugMode": false, + "SnapshotIntervalInMins": 60 + }, + "ManagedSettings": { + "PrivateEndpoint": "", + "InfrastructureNetworkID": "", + "NodeID": "", + "NodeSyncIntervalInSeconds": 30 + }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", + "ChannelMode": "AzureHost", + "CNIConflistFilepath": "/etc/cni/net.d/05-cilium.conflist", + "CNIConflistScenario": "cilium", + "EnableAsyncPodDelete": true, + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": false, + "ManageEndpointState": true, + "ProgramSNATIPTables": false + } diff --git a/test/integration/manifests/cnsconfig/overlayconfigmap.yaml b/test/integration/manifests/cnsconfig/overlayconfigmap.yaml index f9959f9898..3321ec3698 100644 --- a/test/integration/manifests/cnsconfig/overlayconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/overlayconfigmap.yaml @@ -21,13 +21,16 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", - "InitializeFromCNI": false, - "ManageEndpointState": true, - "ProgramSNATIPTables" : false, - "EnableCNIConflistGeneration": true, "CNIConflistFilepath": "/etc/cni/net.d/05-cilium.conflist", "CNIConflistScenario": "cilium", "EnableAsyncPodDelete": true, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "EnableCNIConflistGeneration": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, + "InitializeFromCNI": false, + "ManageEndpointState": true, + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/swiftlinuxconfigmap.yaml b/test/integration/manifests/cnsconfig/swiftlinuxconfigmap.yaml index 254118e391..5b842dd278 100644 --- a/test/integration/manifests/cnsconfig/swiftlinuxconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/swiftlinuxconfigmap.yaml @@ -25,7 +25,9 @@ data: "ChannelMode": "CRD", "EnableAsyncPodDelete": true, "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, "InitializeFromCNI": true, "ManageEndpointState": false, - "ProgramSNATIPTables" : false + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/cnsconfig/swiftwindowsconfigmap.yaml b/test/integration/manifests/cnsconfig/swiftwindowsconfigmap.yaml index 3eb1bf236a..829e964442 100644 --- a/test/integration/manifests/cnsconfig/swiftwindowsconfigmap.yaml +++ b/test/integration/manifests/cnsconfig/swiftwindowsconfigmap.yaml @@ -21,10 +21,13 @@ data: "NodeID": "", "NodeSyncIntervalInSeconds": 30 }, + "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs", "ChannelMode": "CRD", + "EnableAsyncPodDelete": true, + "EnableIPAMv2": true, + "EnableStateMigration": false, + "EnableSubnetScarcity": false, "InitializeFromCNI": true, "ManageEndpointState": false, - "ProgramSNATIPTables": false, - "EnableAsyncPodDelete": true, - "AsyncPodDeletePath": "/var/run/azure-vnet/deleteIDs" + "ProgramSNATIPTables": false } diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml index d7550f6069..68dbe4063d 100644 --- a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml +++ b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml @@ -4,7 +4,7 @@ metadata: name: goldpinger-deploy namespace: linux-datapath-test spec: - replicas: 4 + replicas: 8 selector: matchLabels: app: goldpinger @@ -86,3 +86,10 @@ spec: periodSeconds: 5 nodeSelector: kubernetes.io/os: linux + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: "goldpinger" diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml index 7963fbb29e..73e75fd077 100644 --- a/test/integration/manifests/datapath/linux-deployment.yaml +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -4,7 +4,7 @@ metadata: name: goldpinger-deploy namespace: linux-datapath-test spec: - replicas: 4 + replicas: 8 selector: matchLabels: app: goldpinger @@ -84,3 +84,10 @@ spec: periodSeconds: 5 nodeSelector: kubernetes.io/os: linux + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: "goldpinger" diff --git a/test/integration/manifests/datapath/windows-deployment.yaml b/test/integration/manifests/datapath/windows-deployment.yaml index e4a5bb36bf..c17874d066 100644 --- a/test/integration/manifests/datapath/windows-deployment.yaml +++ b/test/integration/manifests/datapath/windows-deployment.yaml @@ -4,7 +4,7 @@ metadata: name: windows-pod namespace: datapath-win spec: - replicas: 4 + replicas: 8 selector: matchLabels: app: datapod @@ -20,3 +20,10 @@ spec: args: ["sleep", "5000"] nodeSelector: kubernetes.io/os: windows + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: "datapod" diff --git a/test/integration/manifests/ip-masq-agent/config-custom.yaml b/test/integration/manifests/ip-masq-agent/config-custom.yaml index 4bdc6cc5ee..3ace541311 100644 --- a/test/integration/manifests/ip-masq-agent/config-custom.yaml +++ b/test/integration/manifests/ip-masq-agent/config-custom.yaml @@ -13,5 +13,6 @@ data: - 192.168.0.0/16 - 100.64.0.0/10 - 10.244.0.0/16 + - 10.10.0.0/16 masqLinkLocal: false masqLinkLocalIPv6: true diff --git a/test/integration/manifests/ip-masq-agent/config-reconcile.yaml b/test/integration/manifests/ip-masq-agent/config-reconcile.yaml index 0f715267d8..67944cd917 100644 --- a/test/integration/manifests/ip-masq-agent/config-reconcile.yaml +++ b/test/integration/manifests/ip-masq-agent/config-reconcile.yaml @@ -11,4 +11,5 @@ data: - 192.168.0.0/16 - 100.64.0.0/10 - 10.244.0.0/16 + - 10.10.0.0/16 masqLinkLocal: true diff --git a/test/integration/networkobservability/hubble_test.go b/test/integration/networkobservability/hubble_test.go index a363b9a97c..15c07aea0e 100644 --- a/test/integration/networkobservability/hubble_test.go +++ b/test/integration/networkobservability/hubble_test.go @@ -5,17 +5,14 @@ package networkobservability import ( "context" "fmt" - "io" - "net/http" - "strings" "testing" "time" k8s "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/prometheus" "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" io_prometheus_client "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" "github.com/stretchr/testify/require" ) @@ -48,11 +45,11 @@ var ( drop = []string{"source", "source"} ) -func TestPromtheusStringInputParser(t *testing.T) { +func TestPrometheusStringInputParser(t *testing.T) { input := ` hubble_tcp_flags_total{destination="",family="IPv4",flag="RST",source="kube-system/metrics-server"} 980 ` - metrics, err := parseStringPrometheusMetrics(input) + metrics, err := prometheus.ParseStringMetrics(input) if err != nil { t.Fail() } @@ -76,13 +73,13 @@ func TestPromtheusStringInputParser(t *testing.T) { testMetrichubbletcpflagstotal(t, kv, "RST", "kube-system/metrics-server", 980) } -func TestPromtheusStringThreeInputParser(t *testing.T) { +func TestPrometheusStringThreeInputParser(t *testing.T) { input := ` hubble_tcp_flags_total{destination="",family="IPv4",flag="RST",source="kube-system/metrics-server"} 980 hubble_tcp_flags_total{destination="",family="IPv4",flag="SYN",source="kube-system/ama-metrics"} 1777 hubble_flows_processed_total{destination="kube-system/coredns-76b9877f49-2p4fc",protocol="UDP",source="",subtype="to-stack",type="Trace",verdict="FORWARDED"} 3 ` - metrics, err := parseStringPrometheusMetrics(input) + metrics, err := prometheus.ParseStringMetrics(input) if err != nil { t.Fail() } @@ -122,7 +119,7 @@ func TestLabelCheck(t *testing.T) { hubble_tcp_flags_total{destination="",family="IPv4",flag="RST",source="kube-system/metrics-server"} 980 hubble_flows_processed_total{destination="kube-system/coredns-76b9877f49-2p4fc",protocol="UDP",source="",subtype="to-stack",type="Trace",verdict="FORWARDED"} 3 ` - metrics, err := parseStringPrometheusMetrics(input) + metrics, err := prometheus.ParseStringMetrics(input) if err != nil { t.Fail() } @@ -132,11 +129,11 @@ func TestLabelCheck(t *testing.T) { } } -func TestPromtheusInvalidStringInputParser(t *testing.T) { +func TestPrometheusInvalidStringInputParser(t *testing.T) { input := ` This clearly should fail. If it ever passes blame Prometheus. ` - _, err := parseStringPrometheusMetrics(input) + _, err := prometheus.ParseStringMetrics(input) require.Error(t, err) } @@ -220,7 +217,7 @@ func TestEndpoints(t *testing.T) { defer cancel() pingCheckFn := func() error { var pf *k8s.PortForwarder - pf, err := k8s.NewPortForwarder(config, t, k8s.PortForwardingOpts{ + pf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{ Namespace: namespace, LabelSelector: labelSelector, LocalPort: 9965, @@ -248,7 +245,7 @@ func TestEndpoints(t *testing.T) { defer pf.Stop() // scrape the hubble metrics - metrics, err := getPrometheusMetrics(promAddress) + metrics, err := prometheus.GetMetrics(promAddress) if err != nil { return fmt.Errorf("scraping %s, failed with error: %w", promAddress, err) } @@ -264,34 +261,3 @@ func TestEndpoints(t *testing.T) { t.Fatalf("metrics check failed with error: %v", err) } } - -func getPrometheusMetrics(url string) (map[string]*io_prometheus_client.MetricFamily, error) { - client := http.Client{} - resp, err := client.Get(url) //nolint - if err != nil { - return nil, fmt.Errorf("HTTP request failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("HTTP request failed with status: %v", resp.Status) //nolint:goerr113,gocritic - } - - metrics, err := parseReaderPrometheusMetrics(resp.Body) - if err != nil { - return nil, err - } - - return metrics, nil -} - -func parseReaderPrometheusMetrics(input io.Reader) (map[string]*io_prometheus_client.MetricFamily, error) { - var parser expfmt.TextParser - return parser.TextToMetricFamilies(input) //nolint -} - -func parseStringPrometheusMetrics(input string) (map[string]*io_prometheus_client.MetricFamily, error) { - var parser expfmt.TextParser - reader := strings.NewReader(input) - return parser.TextToMetricFamilies(reader) //nolint -} diff --git a/test/integration/portforward.go b/test/integration/portforward.go index 4765b9a715..a4a5089c5a 100644 --- a/test/integration/portforward.go +++ b/test/integration/portforward.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "log" "math/rand" "net/http" "sync" @@ -26,7 +27,6 @@ type PortForwarder struct { clientset *kubernetes.Clientset transport http.RoundTripper upgrader spdy.Upgrader - logger logger opts PortForwardingOpts @@ -45,7 +45,7 @@ type PortForwardingOpts struct { } // NewPortForwarder creates a PortForwarder. -func NewPortForwarder(restConfig *rest.Config, logger logger, opts PortForwardingOpts) (*PortForwarder, error) { +func NewPortForwarder(restConfig *rest.Config, opts PortForwardingOpts) (*PortForwarder, error) { clientset, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, fmt.Errorf("could not create clientset: %w", err) @@ -60,7 +60,6 @@ func NewPortForwarder(restConfig *rest.Config, logger logger, opts PortForwardin clientset: clientset, transport: transport, upgrader: upgrader, - logger: logger, opts: opts, stopChan: make(chan struct{}, 1), }, nil @@ -173,7 +172,7 @@ func (p *PortForwarder) KeepAlive(ctx context.Context) { for { select { case <-ctx.Done(): - p.logger.Logf("port forwarder: keep alive cancelled: %v", ctx.Err()) + log.Printf("port forwarder: keep alive cancelled: %v", ctx.Err()) return case pfErr := <-p.errChan: // as of client-go v0.26.1, if the connection is successful at first but then fails, @@ -182,14 +181,14 @@ func (p *PortForwarder) KeepAlive(ctx context.Context) { // // see https://github.com/kubernetes/client-go/commit/d0842249d3b92ea67c446fe273f84fe74ebaed9f // for the relevant change. - p.logger.Logf("port forwarder: received error signal: %v. restarting session", pfErr) + log.Printf("port forwarder: received error signal: %v. restarting session", pfErr) p.Stop() if err := p.Forward(ctx); err != nil { - p.logger.Logf("port forwarder: could not restart session: %v. retrying", err) + log.Printf("port forwarder: could not restart session: %v. retrying", err) select { case <-ctx.Done(): - p.logger.Logf("port forwarder: keep alive cancelled: %v", ctx.Err()) + log.Printf("port forwarder: keep alive cancelled: %v", ctx.Err()) return case <-time.After(time.Second): // todo: make configurable? continue diff --git a/test/integration/prometheus/prometheus.go b/test/integration/prometheus/prometheus.go new file mode 100644 index 0000000000..addacd5e6c --- /dev/null +++ b/test/integration/prometheus/prometheus.go @@ -0,0 +1,95 @@ +package prometheus + +import ( + "errors" + "fmt" + "io" + "net/http" + "strings" + + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +var ( + errNoMetricFamilyFound = errors.New("no metric family found") + errNoMetricFound = errors.New("no metric found") +) + +// GetMetrics issues a web request to the specified url and parses any metrics returned +func GetMetrics(url string) (map[string]*io_prometheus_client.MetricFamily, error) { + client := http.Client{} + resp, err := client.Get(url) //nolint + if err != nil { + return nil, fmt.Errorf("HTTP request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP request failed with status: %v", resp.Status) //nolint:goerr113,gocritic + } + + metrics, err := ParseReaderMetrics(resp.Body) + if err != nil { + return nil, err + } + + return metrics, nil +} + +func ParseReaderMetrics(input io.Reader) (map[string]*io_prometheus_client.MetricFamily, error) { + var parser expfmt.TextParser + return parser.TextToMetricFamilies(input) //nolint +} + +func ParseStringMetrics(input string) (map[string]*io_prometheus_client.MetricFamily, error) { + var parser expfmt.TextParser + reader := strings.NewReader(input) + return parser.TextToMetricFamilies(reader) //nolint +} + +// SelectMetric retrieves a particular metric from a map of MetricFamily based on the name (key) and +// the provided label kv pairs. Every label kv pair on the metric must match for it to be returned +// For example, to match the following metric: my_metric{a="1",b="udp"} 7 +// name must be "my_metric", and the map of matchLabels must be exactly {"a": "1", "b": "udp"} +func SelectMetric(metrics map[string]*io_prometheus_client.MetricFamily, name string, matchLabels map[string]string) (*io_prometheus_client.Metric, error) { + metricFamily := metrics[name] + if metricFamily == nil { + return nil, errNoMetricFamilyFound + } + + // gets all label combinations and their values and then checks each one + metricList := metricFamily.GetMetric() + for _, metric := range metricList { + // number of kv pairs in this label must match expected + if len(metric.GetLabel()) != len(matchLabels) { + continue + } + + // search this label to see if it matches all our expected labels + allKVMatch := true + for _, kvPair := range metric.GetLabel() { + if matchLabels[kvPair.GetName()] != kvPair.GetValue() { + allKVMatch = false + break + } + } + + // metric with label that matches all kv pairs + if allKVMatch { + return metric, nil + } + } + return nil, errNoMetricFound +} + +// GetMetric is a convenience function to issue a web request to the specified url and then +// select a particular metric that exactly matches the name and labels. The metric is then returned +// and values can be retrieved based on what type of metric it is, for example .GetCounter().GetValue() +func GetMetric(url, name string, labels map[string]string) (*io_prometheus_client.Metric, error) { + metrics, err := GetMetrics(url) + if err != nil { + return nil, err + } + return SelectMetric(metrics, name, labels) +} diff --git a/test/integration/prometheus/prometheus_test.go b/test/integration/prometheus/prometheus_test.go new file mode 100644 index 0000000000..42d6408f0b --- /dev/null +++ b/test/integration/prometheus/prometheus_test.go @@ -0,0 +1,134 @@ +package prometheus + +import ( + "testing" + + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +func TestGetMetricValue(t *testing.T) { + metrics := map[string]*io_prometheus_client.MetricFamily{ + "test_metric_0": { + Metric: []*io_prometheus_client.Metric{ + { + Label: []*io_prometheus_client.LabelPair{ + {Name: ptr("scenario"), Value: ptr("local")}, + {Name: ptr("sku"), Value: ptr("large")}, + }, + Counter: &io_prometheus_client.Counter{Value: ptrFloat(30.0)}, + }, + }, + }, + "test_metric_1": { + Metric: []*io_prometheus_client.Metric{ + { + Label: []*io_prometheus_client.LabelPair{ + {Name: ptr("instance"), Value: ptr("localhost")}, + {Name: ptr("job"), Value: ptr("test")}, + }, + Counter: &io_prometheus_client.Counter{Value: ptrFloat(42.5)}, + }, + { + Label: []*io_prometheus_client.LabelPair{ + {Name: ptr("instance"), Value: ptr("remotehost")}, + {Name: ptr("job"), Value: ptr("test")}, + }, + Counter: &io_prometheus_client.Counter{Value: ptrFloat(55.0)}, + }, + }, + }, + } + + tests := []struct { + name string + metricName string + target map[string]string + expectedVal float64 + expectErr bool + }{ + { + name: "Match metric", + metricName: "test_metric_0", + target: map[string]string{ + "sku": "large", + "scenario": "local", + }, + expectedVal: 30, + expectErr: false, + }, + { + name: "Match first metric", + metricName: "test_metric_1", + target: map[string]string{ + "instance": "localhost", + "job": "test", + }, + expectedVal: 42.5, + expectErr: false, + }, + { + name: "Match second metric", + metricName: "test_metric_1", + target: map[string]string{ + "instance": "remotehost", + "job": "test", + }, + expectedVal: 55.0, + expectErr: false, + }, + { + name: "Metric not found", + metricName: "non_existent_metric", + target: map[string]string{"instance": "localhost"}, + expectErr: true, + }, + { + name: "No matching labels", + metricName: "test_metric_1", + target: map[string]string{ + "instance": "missing_host", + }, + expectErr: true, + }, + { + name: "No exact match", + metricName: "test_metric_1", + target: map[string]string{ + "instance": "localhost", + "job": "foo", + }, + expectErr: true, + }, + { + name: "Different number of labels", + metricName: "test_metric_1", + target: map[string]string{ + "instance": "localhost", + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric, err := SelectMetric(metrics, tt.metricName, tt.target) + val := metric.GetCounter().GetValue() + + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.InDelta(t, tt.expectedVal, val, 0.01) + } + }) + } +} + +func ptr(s string) *string { + return &s +} + +func ptrFloat(f float64) *float64 { + return &f +} diff --git a/test/internal/datapath/datapath_win.go b/test/internal/datapath/datapath_win.go index 39d92e3571..504aaecc16 100644 --- a/test/internal/datapath/datapath_win.go +++ b/test/internal/datapath/datapath_win.go @@ -19,7 +19,7 @@ var ipv6PrefixPolicy = []string{"powershell", "-c", "curl.exe", "-6", "-v", "www func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error { logrus.Infof("podTest() - %v %v", srcPod.Name, cmd) - output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc) + output, _, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc, true) if err != nil { return errors.Wrapf(err, "failed to execute command on pod: %v", srcPod.Name) } diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index cee4669c83..c3deaec726 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -13,6 +13,8 @@ import ( "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/Azure/azure-container-networking/test/internal/retry" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + cilium "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -200,10 +202,69 @@ func MustSetUpRBAC(ctx context.Context, clientset *kubernetes.Clientset, rolePat mustCreateRoleBinding(ctx, roleBindings, roleBinding) } -func MustSetupConfigMap(ctx context.Context, clientset *kubernetes.Clientset, configMapPath string) { +func MustSetupConfigMap(ctx context.Context, clientset *kubernetes.Clientset, configMapPath string) (corev1.ConfigMap, func()) { // nolint cm := mustParseConfigMap(configMapPath) configmaps := clientset.CoreV1().ConfigMaps(cm.Namespace) mustCreateConfigMap(ctx, configmaps, cm) + return cm, func() { + MustDeleteConfigMap(ctx, configmaps, cm) + } +} + +// MustSetupDaemonset is a convenience function to directly apply the daemonset at dsPath to the cluster, +// returning the parsed daemonset struct and a cleanup function in the process +func MustSetupDaemonset(ctx context.Context, clientset *kubernetes.Clientset, dsPath string) (appsv1.DaemonSet, func()) { // nolint + ds := MustParseDaemonSet(dsPath) + dsClient := clientset.AppsV1().DaemonSets(ds.Namespace) + MustCreateDaemonset(ctx, dsClient, ds) + return ds, func() { + MustDeleteDaemonset(ctx, dsClient, ds) + } +} + +func MustSetupDeployment(ctx context.Context, clientset *kubernetes.Clientset, depPath string) (appsv1.Deployment, func()) { // nolint + dep := MustParseDeployment(depPath) + depClient := clientset.AppsV1().Deployments(dep.Namespace) + MustCreateDeployment(ctx, depClient, dep) + return dep, func() { + MustDeleteDeployment(ctx, depClient, dep) + } +} + +func MustSetupServiceAccount(ctx context.Context, clientset *kubernetes.Clientset, serviceAccountPath string) (corev1.ServiceAccount, func()) { // nolint + sa := mustParseServiceAccount(serviceAccountPath) + saClient := clientset.CoreV1().ServiceAccounts(sa.Namespace) + mustCreateServiceAccount(ctx, saClient, sa) + return sa, func() { + MustDeleteServiceAccount(ctx, saClient, sa) + } +} + +func MustSetupService(ctx context.Context, clientset *kubernetes.Clientset, servicePath string) (corev1.Service, func()) { // nolint + svc := mustParseService(servicePath) + svcClient := clientset.CoreV1().Services(svc.Namespace) + mustCreateService(ctx, svcClient, svc) + return svc, func() { + MustDeleteService(ctx, svcClient, svc) + } +} + +func MustSetupLRP(ctx context.Context, clientset *cilium.Clientset, lrpPath string) (ciliumv2.CiliumLocalRedirectPolicy, func()) { // nolint + lrp := mustParseLRP(lrpPath) + lrpClient := clientset.CiliumV2().CiliumLocalRedirectPolicies(lrp.Namespace) + mustCreateCiliumLocalRedirectPolicy(ctx, lrpClient, lrp) + return lrp, func() { + MustDeleteCiliumLocalRedirectPolicy(ctx, lrpClient, lrp) + } +} + +func MustSetupCNP(ctx context.Context, clientset *cilium.Clientset, cnpPath string) (ciliumv2.CiliumNetworkPolicy, func()) { // nolint + cnp := mustParseCNP(cnpPath) + cnpClient := clientset.CiliumV2().CiliumNetworkPolicies(cnp.Namespace) + mustCreateCiliumNetworkPolicy(ctx, cnpClient, cnp) + return cnp, func() { + MustDeleteCiliumNetworkPolicy(ctx, cnpClient, cnp) + } } func Int32ToPtr(i int32) *int32 { return &i } @@ -427,8 +488,11 @@ func writeToFile(dir, fileName, str string) error { return errors.Wrap(err, "failed to write string") } -func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config) ([]byte, error) { +// ExecCmdOnPod runs the specified command on a particular pod and retries the command on failure if doRetry is set to true +// The function returns the standard output, standard error, and error (if any) in that order +func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config, doRetry bool) ([]byte, []byte, error) { // nolint var result []byte + var errResult []byte execCmdOnPod := func() error { req := clientset.CoreV1().RESTClient().Post(). Resource("pods"). @@ -456,19 +520,28 @@ func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespac Stderr: &stderr, Tty: false, }) + + result = stdout.Bytes() + errResult = stderr.Bytes() + if err != nil { - log.Printf("Error: %v had error %v from command - %v, will retry", podName, err, cmd) + log.Printf("Error: %v had error %v from command - %v", podName, err, cmd) return errors.Wrapf(err, "error in executing command %s", cmd) } if len(stdout.Bytes()) == 0 { log.Printf("Warning: %v had 0 bytes returned from command - %v", podName, cmd) } - result = stdout.Bytes() return nil } - retrier := retry.Retrier{Attempts: ShortRetryAttempts, Delay: RetryDelay} - err := retrier.Do(ctx, execCmdOnPod) - return result, errors.Wrapf(err, "could not execute the cmd %s on %s", cmd, podName) + + var err error + if doRetry { + retrier := retry.Retrier{Attempts: ShortRetryAttempts, Delay: RetryDelay} + err = retrier.Do(ctx, execCmdOnPod) + } else { + err = execCmdOnPod() + } + return result, errResult, errors.Wrapf(err, "could not execute the cmd %s on %s", cmd, podName) } func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, namespace string) (bool, error) { @@ -583,7 +656,7 @@ func RestartKubeProxyService(ctx context.Context, clientset *kubernetes.Clientse } privilegedPod := pod.Items[0] // exec into the pod and restart kubeproxy - _, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, "", restartKubeProxyCmd, config) + _, _, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, "", restartKubeProxyCmd, config, true) if err != nil { return errors.Wrapf(err, "failed to exec into privileged pod %s on node %s", privilegedPod.Name, node.Name) } diff --git a/test/internal/kubernetes/utils_create.go b/test/internal/kubernetes/utils_create.go index 59918f4ff1..26941db945 100644 --- a/test/internal/kubernetes/utils_create.go +++ b/test/internal/kubernetes/utils_create.go @@ -9,6 +9,8 @@ import ( "runtime" "strconv" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + typedciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -24,11 +26,13 @@ import ( type CNSScenario string const ( - EnvInstallAzilium CNSScenario = "INSTALL_AZILIUM" - EnvInstallAzureVnet CNSScenario = "INSTALL_AZURE_VNET" - EnvInstallOverlay CNSScenario = "INSTALL_OVERLAY" - EnvInstallAzureCNIOverlay CNSScenario = "INSTALL_AZURE_CNI_OVERLAY" - EnvInstallDualStackOverlay CNSScenario = "INSTALL_DUALSTACK_OVERLAY" + EnvInstallAzilium CNSScenario = "INSTALL_AZILIUM" + EnvInstallAzureVnet CNSScenario = "INSTALL_AZURE_VNET" + EnvInstallAzureVnetStateless CNSScenario = "INSTALL_AZURE_VNET_STATELESS" + EnvInstallOverlay CNSScenario = "INSTALL_OVERLAY" + EnvInstallAzureCNIOverlay CNSScenario = "INSTALL_AZURE_CNI_OVERLAY" + EnvInstallDualStackOverlay CNSScenario = "INSTALL_DUALSTACK_OVERLAY" + EnvInstallCNSNodeSubnet CNSScenario = "INSTALL_CNS_NODESUBNET" ) type cnsDetails struct { @@ -49,17 +53,20 @@ type cnsDetails struct { } const ( - envAzureIPAMVersion = "AZURE_IPAM_VERSION" - envCNIVersion = "CNI_VERSION" - envCNSVersion = "CNS_VERSION" - envCNIImageRepo = "CNI_IMAGE_REPO" - envCNSImageRepo = "CNS_IMAGE_REPO" - envAzureIPAMImageRepo = "IPAM_IMAGE_REPO" - EnvInstallCNS = "INSTALL_CNS" - cnsLinuxLabelSelector = "k8s-app=azure-cns" - cnsWindowsLabelSelector = "k8s-app=azure-cns-win" - acnImageRepoURL = "acnpublic.azurecr.io" - mcrImageRepoURL = "mcr.microsoft.com/containernetworking" + envAzureIPAMVersion = "AZURE_IPAM_VERSION" + envCNIVersion = "CNI_VERSION" + envCNSVersion = "CNS_VERSION" + envCNIImageRepo = "CNI_IMAGE_REPO" + envCNSImageRepo = "CNS_IMAGE_REPO" + envAzureIPAMImageRepo = "IPAM_IMAGE_REPO" + envCNIImageNameOverride = "CNI_IMAGE_NAME_OVERRIDE" + envCNSImageNameOverride = "CNS_IMAGE_NAME_OVERRIDE" + envIPAMImageNameOverride = "IPAM_IMAGE_NAME_OVERRIDE" + EnvInstallCNS = "INSTALL_CNS" + cnsLinuxLabelSelector = "k8s-app=azure-cns" + cnsWindowsLabelSelector = "k8s-app=azure-cns-win" + acnImageRepoURL = "acnpublic.azurecr.io" + mcrImageRepoURL = "mcr.microsoft.com/containernetworking" ) var imageRepoURL = map[string]string{ @@ -161,6 +168,30 @@ func mustCreateConfigMap(ctx context.Context, cmi typedcorev1.ConfigMapInterface } } +func mustCreateService(ctx context.Context, svci typedcorev1.ServiceInterface, svc corev1.Service) { + MustDeleteService(ctx, svci, svc) + log.Printf("Creating Service %v", svc.Name) + if _, err := svci.Create(ctx, &svc, metav1.CreateOptions{}); err != nil { + panic(errors.Wrap(err, "failed to create service")) + } +} + +func mustCreateCiliumLocalRedirectPolicy(ctx context.Context, lrpClient typedciliumv2.CiliumLocalRedirectPolicyInterface, clrp ciliumv2.CiliumLocalRedirectPolicy) { + MustDeleteCiliumLocalRedirectPolicy(ctx, lrpClient, clrp) + log.Printf("Creating CiliumLocalRedirectPolicy %v", clrp.Name) + if _, err := lrpClient.Create(ctx, &clrp, metav1.CreateOptions{}); err != nil { + panic(errors.Wrap(err, "failed to create cilium local redirect policy")) + } +} + +func mustCreateCiliumNetworkPolicy(ctx context.Context, cnpClient typedciliumv2.CiliumNetworkPolicyInterface, cnp ciliumv2.CiliumNetworkPolicy) { + MustDeleteCiliumNetworkPolicy(ctx, cnpClient, cnp) + log.Printf("Creating CiliumNetworkPolicy %v", cnp.Name) + if _, err := cnpClient.Create(ctx, &cnp, metav1.CreateOptions{}); err != nil { + panic(errors.Wrap(err, "failed to create cilium network policy")) + } +} + func MustScaleDeployment(ctx context.Context, deploymentsClient typedappsv1.DeploymentInterface, deployment appsv1.Deployment, @@ -330,9 +361,11 @@ func initCNSScenarioVars() (map[CNSScenario]map[corev1.OSName]cnsDetails, error) cnsSwiftLinuxConfigMapPath := cnsConfigFolder + "/swiftlinuxconfigmap.yaml" cnsSwiftWindowsConfigMapPath := cnsConfigFolder + "/swiftwindowsconfigmap.yaml" cnsCiliumConfigMapPath := cnsConfigFolder + "/ciliumconfigmap.yaml" + cnsNodeSubnetLinuxConfigMapPath := cnsConfigFolder + "/ciliumnodesubnetconfigmap.yaml" cnsOverlayConfigMapPath := cnsConfigFolder + "/overlayconfigmap.yaml" cnsAzureCNIOverlayLinuxConfigMapPath := cnsConfigFolder + "/azurecnioverlaylinuxconfigmap.yaml" cnsAzureCNIOverlayWindowsConfigMapPath := cnsConfigFolder + "/azurecnioverlaywindowsconfigmap.yaml" + cnsAzureStatelessCNIOverlayWindowsConfigMapPath := cnsConfigFolder + "/azurestatelesscnioverlaywindowsconfigmap.yaml" cnsAzureCNIDualStackLinuxConfigMapPath := cnsConfigFolder + "/azurecnidualstackoverlaylinuxconfigmap.yaml" cnsAzureCNIDualStackWindowsConfigMapPath := cnsConfigFolder + "/azurecnidualstackoverlaywindowsconfigmap.yaml" cnsRolePath := cnsManifestFolder + "/role.yaml" @@ -344,7 +377,13 @@ func initCNSScenarioVars() (map[CNSScenario]map[corev1.OSName]cnsDetails, error) log.Printf("%s not set to expected value \"ACN\", \"MCR\". Default to %s", envCNIImageRepo, imageRepoURL["ACN"]) url = imageRepoURL["ACN"] } - initContainerNameCNI := path.Join(url, "azure-cni:") + os.Getenv(envCNIVersion) + + cniImageName := "azure-cni" + if len(os.Getenv(string(envCNIImageNameOverride))) > 1 { + cniImageName = os.Getenv(string(envCNIImageNameOverride)) + } + cniImageName += ":" + initContainerNameCNI := path.Join(url, cniImageName) + os.Getenv(envCNIVersion) log.Printf("CNI init container image - %v", initContainerNameCNI) url, key = imageRepoURL[os.Getenv(string(envAzureIPAMImageRepo))] @@ -352,7 +391,14 @@ func initCNSScenarioVars() (map[CNSScenario]map[corev1.OSName]cnsDetails, error) log.Printf("%s not set to expected value \"ACN\", \"MCR\". Default to %s", envAzureIPAMImageRepo, imageRepoURL["ACN"]) url = imageRepoURL["ACN"] } - initContainerNameIPAM := path.Join(url, "azure-ipam:") + os.Getenv(envAzureIPAMVersion) + + ipamImageName := "azure-ipam" + if len(os.Getenv(string(envIPAMImageNameOverride))) > 1 { + ipamImageName = os.Getenv(string(envIPAMImageNameOverride)) + } + ipamImageName += ":" + + initContainerNameIPAM := path.Join(url, ipamImageName) + os.Getenv(envAzureIPAMVersion) log.Printf("IPAM init container image - %v", initContainerNameIPAM) // cns scenario map @@ -393,6 +439,47 @@ func initCNSScenarioVars() (map[CNSScenario]map[corev1.OSName]cnsDetails, error) installIPMasqAgent: false, }, }, + EnvInstallAzureVnetStateless: { + corev1.Linux: { + daemonsetPath: cnsLinuxDaemonSetPath, + labelSelector: cnsLinuxLabelSelector, + rolePath: cnsRolePath, + roleBindingPath: cnsRoleBindingPath, + clusterRolePath: cnsClusterRolePath, + clusterRoleBindingPath: cnsClusterRoleBindingPath, + serviceAccountPath: cnsServiceAccountPath, + initContainerArgs: []string{ + "deploy", + "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", + "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", + }, + initContainerName: initContainerNameCNI, + volumes: volumesForAzureCNIOverlayLinux(), + initContainerVolumeMounts: dropgzVolumeMountsForAzureCNIOverlayLinux(), + containerVolumeMounts: cnsVolumeMountsForAzureCNIOverlayLinux(), + configMapPath: cnsAzureCNIOverlayLinuxConfigMapPath, + installIPMasqAgent: true, + }, + corev1.Windows: { + daemonsetPath: cnsWindowsDaemonSetPath, + labelSelector: cnsWindowsLabelSelector, + rolePath: cnsRolePath, + roleBindingPath: cnsRoleBindingPath, + clusterRolePath: cnsClusterRolePath, + clusterRoleBindingPath: cnsClusterRoleBindingPath, + serviceAccountPath: cnsServiceAccountPath, + initContainerArgs: []string{ + "deploy", + "azure-vnet-stateless", "-o", "/k/azurecni/bin/azure-vnet.exe", + }, + initContainerName: initContainerNameCNI, + volumes: volumesForAzureCNIOverlayWindows(), + initContainerVolumeMounts: dropgzVolumeMountsForAzureCNIOverlayWindows(), + containerVolumeMounts: cnsVolumeMountsForAzureCNIOverlayWindows(), + configMapPath: cnsAzureStatelessCNIOverlayWindowsConfigMapPath, + installIPMasqAgent: true, + }, + }, EnvInstallAzilium: { corev1.Linux: { daemonsetPath: cnsLinuxDaemonSetPath, @@ -411,6 +498,24 @@ func initCNSScenarioVars() (map[CNSScenario]map[corev1.OSName]cnsDetails, error) installIPMasqAgent: false, }, }, + EnvInstallCNSNodeSubnet: { + corev1.Linux: { + daemonsetPath: cnsLinuxDaemonSetPath, + labelSelector: cnsLinuxLabelSelector, + rolePath: cnsRolePath, + roleBindingPath: cnsRoleBindingPath, + clusterRolePath: cnsClusterRolePath, + clusterRoleBindingPath: cnsClusterRoleBindingPath, + serviceAccountPath: cnsServiceAccountPath, + initContainerArgs: []string{ + "deploy", + "azure-ipam", "-o", "/opt/cni/bin/azure-ipam", + }, + initContainerName: initContainerNameIPAM, + configMapPath: cnsNodeSubnetLinuxConfigMapPath, + installIPMasqAgent: true, + }, + }, EnvInstallOverlay: { corev1.Linux: { daemonsetPath: cnsLinuxDaemonSetPath, @@ -585,7 +690,13 @@ func parseCNSDaemonset(cnsScenarioMap map[CNSScenario]map[corev1.OSName]cnsDetai url = imageRepoURL["ACN"] } - cns.Spec.Template.Spec.Containers[0].Image = path.Join(url, "azure-cns:") + cnsVersion + cnsImageName := "azure-cns" + if len(os.Getenv(string(envCNSImageNameOverride))) > 1 { + cnsImageName = os.Getenv(string(envCNSImageNameOverride)) + } + cnsImageName += ":" + + cns.Spec.Template.Spec.Containers[0].Image = path.Join(url, cnsImageName) + cnsVersion log.Printf("Checking environment scenario") cns.Spec.Template.Spec.InitContainers[0].Image = cnsScenarioDetails.initContainerName diff --git a/test/internal/kubernetes/utils_delete.go b/test/internal/kubernetes/utils_delete.go index acbdf8a375..399167d9c1 100644 --- a/test/internal/kubernetes/utils_delete.go +++ b/test/internal/kubernetes/utils_delete.go @@ -3,6 +3,8 @@ package kubernetes import ( "context" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + typedciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -47,3 +49,43 @@ func MustDeleteNamespace(ctx context.Context, clienset *kubernetes.Clientset, na } } } + +func MustDeleteConfigMap(ctx context.Context, configMaps typedcorev1.ConfigMapInterface, cm corev1.ConfigMap) { + if err := configMaps.Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + panic(errors.Wrap(err, "failed to delete config map")) + } + } +} + +func MustDeleteServiceAccount(ctx context.Context, serviceAccounts typedcorev1.ServiceAccountInterface, svcAcct corev1.ServiceAccount) { + if err := serviceAccounts.Delete(ctx, svcAcct.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + panic(errors.Wrap(err, "failed to delete service account")) + } + } +} + +func MustDeleteService(ctx context.Context, services typedcorev1.ServiceInterface, svc corev1.Service) { + if err := services.Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + panic(errors.Wrap(err, "failed to delete service")) + } + } +} + +func MustDeleteCiliumLocalRedirectPolicy(ctx context.Context, lrpClient typedciliumv2.CiliumLocalRedirectPolicyInterface, clrp ciliumv2.CiliumLocalRedirectPolicy) { + if err := lrpClient.Delete(ctx, clrp.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + panic(errors.Wrap(err, "failed to delete cilium local redirect policy")) + } + } +} + +func MustDeleteCiliumNetworkPolicy(ctx context.Context, cnpClient typedciliumv2.CiliumNetworkPolicyInterface, cnp ciliumv2.CiliumNetworkPolicy) { + if err := cnpClient.Delete(ctx, cnp.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + panic(errors.Wrap(err, "failed to delete cilium network policy")) + } + } +} diff --git a/test/internal/kubernetes/utils_get.go b/test/internal/kubernetes/utils_get.go index a6fecd597e..e47849b952 100644 --- a/test/internal/kubernetes/utils_get.go +++ b/test/internal/kubernetes/utils_get.go @@ -61,3 +61,19 @@ func GetDeploymentAvailableReplicas(ctx context.Context, deploymentsClient typed return deployment.Status.AvailableReplicas, nil } + +func GetService(ctx context.Context, clientset *kubernetes.Clientset, namespace, name string) (*corev1.Service, error) { + service, err := clientset.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return service, errors.Wrap(err, "could not get service") + } + return service, nil +} + +func GetConfigmap(ctx context.Context, clientset *kubernetes.Clientset, namespace, name string) (*corev1.ConfigMap, error) { + configmap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return configmap, errors.Wrap(err, "could not get configmap") + } + return configmap, nil +} diff --git a/test/internal/kubernetes/utils_parse.go b/test/internal/kubernetes/utils_parse.go index 950efebe55..fc8f1f61ba 100644 --- a/test/internal/kubernetes/utils_parse.go +++ b/test/internal/kubernetes/utils_parse.go @@ -1,6 +1,7 @@ package kubernetes import ( + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -53,3 +54,21 @@ func mustParseConfigMap(path string) corev1.ConfigMap { mustParseResource(path, &cm) return cm } + +func mustParseService(path string) corev1.Service { + var svc corev1.Service + mustParseResource(path, &svc) + return svc +} + +func mustParseLRP(path string) ciliumv2.CiliumLocalRedirectPolicy { + var lrp ciliumv2.CiliumLocalRedirectPolicy + mustParseResource(path, &lrp) + return lrp +} + +func mustParseCNP(path string) ciliumv2.CiliumNetworkPolicy { + var cnp ciliumv2.CiliumNetworkPolicy + mustParseResource(path, &cnp) + return cnp +} diff --git a/test/scale/templates/kwok-node.yaml b/test/scale/templates/kwok-node.yaml index 249cc717af..00a72cc6e4 100644 --- a/test/scale/templates/kwok-node.yaml +++ b/test/scale/templates/kwok-node.yaml @@ -6,7 +6,6 @@ metadata: kwok.x-k8s.io/node: fake labels: beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux kubernetes.io/arch: amd64 kubernetes.io/hostname: kwok-node-INSERT_NUMBER kubernetes.io/os: linux diff --git a/test/validate/linux_validate.go b/test/validate/linux_validate.go index a7a43fbbef..0f38a4c718 100644 --- a/test/validate/linux_validate.go +++ b/test/validate/linux_validate.go @@ -22,7 +22,7 @@ var ( cnsManagedStateFileCmd = []string{"bash", "-c", "cat /var/run/azure-cns/azure-endpoints.json"} azureVnetStateFileCmd = []string{"bash", "-c", "cat /var/run/azure-vnet.json"} azureVnetIpamStateCmd = []string{"bash", "-c", "cat /var/run/azure-vnet-ipam.json"} - ciliumStateFileCmd = []string{"bash", "-c", "cilium endpoint list -o json"} + ciliumStateFileCmd = []string{"cilium", "endpoint", "list", "-o", "json"} cnsCachedAssignedIPStateCmd = []string{"curl", "localhost:10090/debug/ipaddresses", "-d", "{\"IPConfigStateFilter\":[\"Assigned\"]}"} ) @@ -206,24 +206,6 @@ type AzureVnetEndpointInfo struct { PodName string } -func cnsManagedStateFileIps(result []byte) (map[string]string, error) { - var cnsResult CnsManagedState - err := json.Unmarshal(result, &cnsResult) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal cns endpoint list") - } - - cnsPodIps := make(map[string]string) - for _, v := range cnsResult.Endpoints { - for ifName, ip := range v.IfnameToIPMap { - if ifName == "eth0" { - cnsPodIps[ip.IPv4[0].IP.String()] = v.PodName - } - } - } - return cnsPodIps, nil -} - func cnsManagedStateFileDualStackIps(result []byte) (map[string]string, error) { var cnsResult CnsManagedState err := json.Unmarshal(result, &cnsResult) @@ -344,7 +326,7 @@ func (v *Validator) validateRestartNetwork(ctx context.Context) error { } privilegedPod := pod.Items[0] // exec into the pod to get the state file - _, err = acnk8s.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privilegedPod.Name, "", restartNetworkCmd, v.config) + _, _, err = acnk8s.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privilegedPod.Name, "", restartNetworkCmd, v.config, true) if err != nil { return errors.Wrapf(err, "failed to exec into privileged pod %s on node %s", privilegedPod.Name, node.Name) } diff --git a/test/validate/validate.go b/test/validate/validate.go index 32912e8aba..ce9be2b230 100644 --- a/test/validate/validate.go +++ b/test/validate/validate.go @@ -124,7 +124,7 @@ func (v *Validator) ValidateStateFile(ctx context.Context) error { } func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector, containerName string) error { - log.Printf("Validating %s state file", checkType) + log.Printf("Validating %s state file for %s on %s", checkType, v.cni, v.os) nodes, err := acnk8s.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) if err != nil { return errors.Wrapf(err, "failed to get node list") @@ -142,7 +142,7 @@ func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFu podName := pod.Items[0].Name // exec into the pod to get the state file log.Printf("Executing command %s on pod %s, container %s", cmd, podName, containerName) - result, err := acnk8s.ExecCmdOnPod(ctx, v.clientset, namespace, podName, containerName, cmd, v.config) + result, _, err := acnk8s.ExecCmdOnPod(ctx, v.clientset, namespace, podName, containerName, cmd, v.config, true) if err != nil { return errors.Wrapf(err, "failed to exec into privileged pod - %s", podName) } @@ -261,3 +261,21 @@ func cnsCacheStateFileIps(result []byte) (map[string]string, error) { } return cnsPodIps, nil } + +func cnsManagedStateFileIps(result []byte) (map[string]string, error) { + var cnsResult CnsManagedState + err := json.Unmarshal(result, &cnsResult) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal cns endpoint list") + } + + cnsPodIps := make(map[string]string) + for _, v := range cnsResult.Endpoints { + for ifName, ip := range v.IfnameToIPMap { + if ifName == "eth0" { + cnsPodIps[ip.IPv4[0].IP.String()] = v.PodName + } + } + } + return cnsPodIps, nil +} diff --git a/test/validate/windows_validate.go b/test/validate/windows_validate.go index 6a69ed6f45..3628c818ac 100644 --- a/test/validate/windows_validate.go +++ b/test/validate/windows_validate.go @@ -23,6 +23,7 @@ var ( hnsNetworkCmd = []string{"powershell", "-c", "Get-HnsNetwork | ConvertTo-Json"} azureVnetCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet.json"} azureVnetIpamCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet-ipam.json"} + cnsWinManagedStateFileCmd = []string{"powershell", "-c", "cat ../../k/azurecns/azure-endpoints.json"} cnsWinCachedAssignedIPStateCmd = []string{ "powershell", "Invoke-WebRequest -Uri 127.0.0.1:10090/debug/ipaddresses", "-Method Post -ContentType application/x-www-form-urlencoded", @@ -78,6 +79,29 @@ var windowsChecksMap = map[string][]check{ cmd: cnsWinCachedAssignedIPStateCmd, }, }, + "stateless": { + { + name: "hns", + stateFileIPs: hnsStateFileIPs, + podLabelSelector: privilegedLabelSelector, + podNamespace: privilegedNamespace, + cmd: hnsEndPointCmd, + }, + { + name: "cns", + stateFileIPs: cnsManagedStateFileIps, + podLabelSelector: privilegedLabelSelector, + podNamespace: privilegedNamespace, + cmd: cnsWinManagedStateFileCmd, + }, // cns configmap "ManageEndpointState": true, | Endpoints managed in CNS State File + { + name: "cns cache", + stateFileIPs: cnsCacheStateFileIps, + podLabelSelector: cnsWinLabelSelector, + podNamespace: privilegedNamespace, + cmd: cnsWinCachedAssignedIPStateCmd, + }, + }, } type HNSEndpoint struct { @@ -245,7 +269,7 @@ func validateHNSNetworkState(ctx context.Context, nodes *corev1.NodeList, client } podName := pod.Items[0].Name // exec into the pod to get the state file - result, err := acnk8s.ExecCmdOnPod(ctx, clientset, privilegedNamespace, podName, "", hnsNetworkCmd, restConfig) + result, _, err := acnk8s.ExecCmdOnPod(ctx, clientset, privilegedNamespace, podName, "", hnsNetworkCmd, restConfig, true) if err != nil { return errors.Wrap(err, "failed to exec into privileged pod") } diff --git a/tools/acncli/Dockerfile b/tools/acncli/Dockerfile index 49d523609e..302f5d5e34 100644 --- a/tools/acncli/Dockerfile +++ b/tools/acncli/Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 as build +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23 as build WORKDIR /go/src/github.com/Azure/azure-container-networking/ ARG VERSION ADD . . diff --git a/tools/acncli/deployment/manager_swift.yaml b/tools/acncli/deployment/manager_swift.yaml index edbdfb1423..58c3424314 100644 --- a/tools/acncli/deployment/manager_swift.yaml +++ b/tools/acncli/deployment/manager_swift.yaml @@ -13,7 +13,7 @@ spec: acn: azure-cni-manager spec: nodeSelector: - "beta.kubernetes.io/os": linux + "kubernetes.io/os": linux tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/tools/azure-npm-to-cilium-validator/README.md b/tools/azure-npm-to-cilium-validator/README.md new file mode 100644 index 0000000000..c7697f28ef --- /dev/null +++ b/tools/azure-npm-to-cilium-validator/README.md @@ -0,0 +1,55 @@ +# Azure NPM to Cilium Validator + +This tool validates the migration from Azure NPM to Cilium. It will provide information on if you can safely proceed with a manual update from Azure NPM to Cilium. It will verify the following checks to determine if the cluster is safe to migrate. + +- NetworkPolicy with endPort +- NetworkPolicy with ipBlock +- NetworkPolicy with named Ports +- NetworkPolicy with Egress Policies (not Allow All) +- Disruption for some Services (LoadBalancer or NodePort) with externalTrafficPolicy=Cluster + +## Prerequisites + +- Go 1.16 or later +- A Kubernetes cluster with Azure NPM installed + +## Installation + +Clone the repository and navigate to the tool directory: + +```bash +git clone https://github.com/Azure/azure-container-networking.git +cd azure-container-networking/tools/azure-npm-to-cilium-validator +``` + +## Setting Up Dependencies + +Initialize the Go module and download dependencies: + +```bash +go mod tidy && go mod vendor +``` + +## Running the Tool + +Run the following command with the path to your kube config file with the cluster you want to validate. + +```bash +go run azure-npm-to-cilium-validator.go --kubeconfig ~/.kube/config +``` + +This will execute the validator and print the migration summary. You can use the `--detailed-migration-summary` flag to get more information on flagged network policies and services as well as total number of network policies, services, and pods on the cluster targeted. + +```bash +go run azure-npm-to-cilium-validator.go --kubeconfig ~/.kube/config --detailed-migration-summary +``` + +## Running Tests + +To run the tests for the Azure NPM to Cilium Validator, use the following command in the azure-npm-to-cilium-validator directory: + +```bash +go test . +``` + +This will execute all the test files in azure-npm-to-cilium-validator_test.go and provide a summary of the test results. diff --git a/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator.go b/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator.go new file mode 100644 index 0000000000..ed5983bf5b --- /dev/null +++ b/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator.go @@ -0,0 +1,646 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "time" + + "github.com/Azure/azure-container-networking/npm/metrics" + "github.com/olekukonko/tablewriter" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" +) + +// Note: The operationID is set to a high number so it doesn't conflict with other telemetry +const scriptMetricOperationID = 10000 + +// Use this tool to validate if your cluster is ready to migrate from Azure Network Policy Manager (NPM) to Cilium. +func main() { + // Parse the kubeconfig flag + kubeconfig := flag.String("kubeconfig", "~/.kube/config", "absolute path to the kubeconfig file") + detailedMigrationSummary := flag.Bool("detailed-migration-summary", false, "display flagged network polices/services and total cluster resource count") + flag.Parse() + + // Build the Kubernetes client config + config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) + if err != nil { + log.Fatalf("Error building kubeconfig: %v", err) + } + + // Create a Kubernetes client + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Fatalf("Error creating Kubernetes client: %v", err) + } + + // Get namespaces + namespaces, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Fatalf("Error getting namespaces: %v\n", err) + } + + // Copy namespaces.Items into a slice of pointers + namespacePointers := make([]*corev1.Namespace, len(namespaces.Items)) + for i := range namespaces.Items { + namespacePointers[i] = &namespaces.Items[i] + } + + // Store network policies and services in maps + policiesByNamespace := make(map[string][]*networkingv1.NetworkPolicy) + servicesByNamespace := make(map[string][]*corev1.Service) + podsByNamespace := make(map[string][]*corev1.Pod) + + // Iterate over namespaces and store policies/services + for _, ns := range namespacePointers { + // Get network policies + networkPolicies, err := clientset.NetworkingV1().NetworkPolicies(ns.Name).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Error getting network policies in namespace %s: %v\n", ns.Name, err) + continue + } + policiesByNamespace[ns.Name] = make([]*networkingv1.NetworkPolicy, len(networkPolicies.Items)) + for i := range networkPolicies.Items { + policiesByNamespace[ns.Name][i] = &networkPolicies.Items[i] + } + + // Get services + services, err := clientset.CoreV1().Services(ns.Name).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Error getting services in namespace %s: %v\n", ns.Name, err) + continue + } + servicesByNamespace[ns.Name] = make([]*corev1.Service, len(services.Items)) + for i := range services.Items { + servicesByNamespace[ns.Name][i] = &services.Items[i] + } + + // Get pods + pods, err := clientset.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Error getting pods in namespace %s: %v\n", ns.Name, err) + continue + } + podsByNamespace[ns.Name] = make([]*corev1.Pod, len(pods.Items)) + for i := range pods.Items { + podsByNamespace[ns.Name][i] = &pods.Items[i] + } + } + + // Create telemetry handle + // Note: npmVersionNum and imageVersion telemetry is not needed for this tool so they are set to abitrary values + err = metrics.CreateTelemetryHandle(0, "NPM-script-v0.0.1", "014c22bd-4107-459e-8475-67909e96edcb") + + if err != nil { + klog.Infof("CreateTelemetryHandle failed with error %v. AITelemetry is not initialized.", err) + } + + // Print the migration summary + printMigrationSummary(detailedMigrationSummary, namespaces, policiesByNamespace, servicesByNamespace, podsByNamespace) +} + +func printMigrationSummary( + detailedMigrationSummary *bool, + namespaces *corev1.NamespaceList, + policiesByNamespace map[string][]*networkingv1.NetworkPolicy, + servicesByNamespace map[string][]*corev1.Service, + podsByNamespace map[string][]*corev1.Pod, +) { + // Get the network policies with endports + ingressEndportNetworkPolicy, egressEndportNetworkPolicy := getEndportNetworkPolicies(policiesByNamespace) + + // Send endPort telemetry + metrics.SendLog(scriptMetricOperationID, fmt.Sprintf("[migration script] Found %d network policies with endPort", len(ingressEndportNetworkPolicy)+len(egressEndportNetworkPolicy)), metrics.DonotPrint) + + // Get the network policies with cidr + ingressPoliciesWithCIDR, egressPoliciesWithCIDR := getCIDRNetworkPolicies(policiesByNamespace) + + // Send cidr telemetry + metrics.SendLog(scriptMetricOperationID, fmt.Sprintf("[migration script] Found %d network policies with CIDR", len(ingressPoliciesWithCIDR)+len(egressPoliciesWithCIDR)), metrics.DonotPrint) + + // Get the named port + ingressPoliciesWithNamedPort, egressPoliciesWithNamedPort := getNamedPortPolicies(policiesByNamespace) + + // Send named port telemetry + metrics.SendLog(scriptMetricOperationID, fmt.Sprintf("[migration script] Found %d network policies with named port", len(ingressPoliciesWithNamedPort)+len(egressPoliciesWithNamedPort)), metrics.DonotPrint) + + // Get the network policies with egress (except not egress allow all) + egressPolicies := getEgressPolicies(policiesByNamespace) + + // Send egress telemetry + metrics.SendLog(scriptMetricOperationID, fmt.Sprintf("[migration script] Found %d network policies with egress", len(egressPolicies)), metrics.DonotPrint) + + // Get services that have externalTrafficPolicy!=Local that are unsafe (might have traffic disruption) + unsafeServices := getUnsafeExternalTrafficPolicyClusterServices(namespaces, servicesByNamespace, policiesByNamespace) + + // Send unsafe services telemetry + metrics.SendLog(scriptMetricOperationID, fmt.Sprintf("[migration script] Found %d services with externalTrafficPolicy=Cluster", len(unsafeServices)), metrics.DonotPrint) + + unsafeNetworkPolicesInCluster := false + unsafeServicesInCluster := false + if len(ingressEndportNetworkPolicy) > 0 || len(egressEndportNetworkPolicy) > 0 || + len(ingressPoliciesWithCIDR) > 0 || len(egressPoliciesWithCIDR) > 0 || + len(ingressPoliciesWithNamedPort) > 0 || len(egressPoliciesWithNamedPort) > 0 || + len(egressPolicies) > 0 { + unsafeNetworkPolicesInCluster = true + } + if len(unsafeServices) > 0 { + unsafeServicesInCluster = true + } + + if unsafeNetworkPolicesInCluster || unsafeServicesInCluster { + // Send cluster unsafe telemetry + metrics.SendLog(scriptMetricOperationID, "[migration script] Fails some checks. Unsafe to migrate this cluster", metrics.DonotPrint) + } else { + // Send cluster safe telemetry + metrics.SendLog(scriptMetricOperationID, "[migration script] Passes all checks. Safe to migrate this cluster", metrics.DonotPrint) + } + + // Close the metrics before table is rendered and wait one second to prevent formatting issues + metrics.Close() + time.Sleep(time.Second) + + // Print the migration summary table + renderMigrationSummaryTable(ingressEndportNetworkPolicy, egressEndportNetworkPolicy, ingressPoliciesWithCIDR, egressPoliciesWithCIDR, ingressPoliciesWithNamedPort, egressPoliciesWithNamedPort, egressPolicies, unsafeServices) + + // Print the flagged resource table and cluster resource table if the detailed-report flag is set + if *detailedMigrationSummary { + if unsafeNetworkPolicesInCluster { + renderFlaggedNetworkPolicyTable(ingressEndportNetworkPolicy, egressEndportNetworkPolicy, ingressPoliciesWithCIDR, egressPoliciesWithCIDR, ingressPoliciesWithNamedPort, egressPoliciesWithNamedPort, egressPolicies) + } + if unsafeServicesInCluster { + renderFlaggedServiceTable(unsafeServices) + } + renderClusterResourceTable(policiesByNamespace, servicesByNamespace, podsByNamespace) + } + + // Print if the cluster is safe to migrate + if unsafeNetworkPolicesInCluster || unsafeServicesInCluster { + fmt.Println("\n\033[31m✘ Review above issues before migration.\033[0m") + fmt.Println("Please see \033[32maka.ms/azurenpmtocilium\033[0m for instructions on how to evaluate/assess the above warnings marked by ❌.") + fmt.Println("NOTE: rerun this script if any modifications (create/update/delete) are made to services or policies.") + } else { + fmt.Println("\n\033[32m✔ Safe to migrate this cluster.\033[0m") + fmt.Println("For more details please see \033[32maka.ms/azurenpmtocilium\033[0m.") + } +} + +func renderMigrationSummaryTable( + ingressEndportNetworkPolicy, + egressEndportNetworkPolicy, + ingressPoliciesWithCIDR, + egressPoliciesWithCIDR, + ingressPoliciesWithNamedPort, + egressPoliciesWithNamedPort, + egressPolicies, + unsafeServices []string, +) { + migrationSummarytable := tablewriter.NewWriter(os.Stdout) + migrationSummarytable.SetHeader([]string{"Breaking Change", "Upgrade compatibility", "Count"}) + migrationSummarytable.SetRowLine(true) + if len(ingressEndportNetworkPolicy) == 0 && len(egressEndportNetworkPolicy) == 0 { + migrationSummarytable.Append([]string{"NetworkPolicy with endPort", "✅", fmt.Sprintf("0")}) + } else { + migrationSummarytable.Append([]string{"NetworkPolicy with endPort", "❌", fmt.Sprintf("%d", len(ingressEndportNetworkPolicy)+len(egressEndportNetworkPolicy))}) + } + if len(ingressPoliciesWithCIDR) == 0 && len(egressPoliciesWithCIDR) == 0 { + migrationSummarytable.Append([]string{"NetworkPolicy with CIDR", "✅", "0"}) + } else { + migrationSummarytable.Append([]string{"NetworkPolicy with CIDR", "❌", fmt.Sprintf("%d", len(ingressPoliciesWithCIDR)+len(egressPoliciesWithCIDR))}) + } + if len(ingressPoliciesWithNamedPort) == 0 && len(egressPoliciesWithNamedPort) == 0 { + migrationSummarytable.Append([]string{"NetworkPolicy with Named Port", "✅", "0"}) + } else { + migrationSummarytable.Append([]string{"NetworkPolicy with Named Port", "❌", fmt.Sprintf("%d", len(ingressPoliciesWithNamedPort)+len(egressPoliciesWithNamedPort))}) + } + if len(egressPolicies) == 0 { + migrationSummarytable.Append([]string{"NetworkPolicy with Egress (Not Allow All Egress)", "✅", "0"}) + } else { + migrationSummarytable.Append([]string{"NetworkPolicy with Egress (Not Allow All Egress)", "❌", fmt.Sprintf("%d", len(egressPolicies))}) + } + if len(unsafeServices) == 0 { + migrationSummarytable.Append([]string{"Disruption for some Services with externalTrafficPolicy=Cluster", "✅", "0"}) + } else { + migrationSummarytable.Append([]string{"Disruption for some Services with externalTrafficPolicy=Cluster", "❌", fmt.Sprintf("%d", len(unsafeServices))}) + } + + fmt.Println("\nMigration Summary:") + migrationSummarytable.Render() +} + +func renderFlaggedNetworkPolicyTable( + ingressEndportNetworkPolicy, + egressEndportNetworkPolicy, + ingressPoliciesWithCIDR, + egressPoliciesWithCIDR, + ingressPoliciesWithNamedPort, + egressPoliciesWithNamedPort, + egressPolicies []string, +) { + flaggedResourceTable := tablewriter.NewWriter(os.Stdout) + flaggedResourceTable.SetHeader([]string{"Network Policy", "NetworkPolicy with endPort", "NetworkPolicy with CIDR", "NetworkPolicy with Named Port", "NetworkPolicy with Egress (Not Allow All Egress)"}) + flaggedResourceTable.SetRowLine(true) + + // Create a map to store the policies and their flags + policyFlags := make(map[string][]string) + + // Helper function to add a flag to a policy + addFlag := func(policy string, flag string) { + if _, exists := policyFlags[policy]; !exists { + policyFlags[policy] = []string{"✅", "✅", "✅", "✅"} + } + switch flag { + case "ingressEndPort": + policyFlags[policy][0] = "❌ (ingress)" + case "egressEndPort": + policyFlags[policy][0] = "❌ (egress)" + case "ingressCIDR": + policyFlags[policy][1] = "❌ (ingress)" + case "egressCIDR": + policyFlags[policy][1] = "❌ (egress)" + case "ingressNamedPort": + policyFlags[policy][2] = "❌ (ingress)" + case "egressNamedPort": + policyFlags[policy][2] = "❌ (egress)" + case "Egress": + policyFlags[policy][3] = "❌" + } + } + + // Add flags for each policy + for _, policy := range ingressEndportNetworkPolicy { + addFlag(policy, "ingressEndPort") + } + for _, policy := range egressEndportNetworkPolicy { + addFlag(policy, "egressEndPort") + } + for _, policy := range ingressPoliciesWithCIDR { + addFlag(policy, "ingressCIDR") + } + for _, policy := range egressPoliciesWithCIDR { + addFlag(policy, "egressCIDR") + } + for _, policy := range ingressPoliciesWithNamedPort { + addFlag(policy, "ingressNamedPort") + } + for _, policy := range egressPoliciesWithNamedPort { + addFlag(policy, "egressNamedPort") + } + for _, policy := range egressPolicies { + addFlag(policy, "Egress") + } + + // Append the policies and their flags to the table + for policy, flags := range policyFlags { + flaggedResourceTable.Append([]string{policy, flags[0], flags[1], flags[2], flags[3]}) + } + + fmt.Println("\nFlagged Network Policies:") + flaggedResourceTable.Render() +} + +func renderFlaggedServiceTable(unsafeServices []string) { + fmt.Println("\nFlagged Services:") + flaggedResourceTable := tablewriter.NewWriter(os.Stdout) + flaggedResourceTable.SetHeader([]string{"Service", "Disruption for some Services with externalTrafficPolicy=Cluster"}) + flaggedResourceTable.SetRowLine(true) + for _, service := range unsafeServices { + flaggedResourceTable.Append([]string{fmt.Sprintf("%s", service), "❌"}) + } + flaggedResourceTable.Render() +} + +func renderClusterResourceTable(policiesByNamespace map[string][]*networkingv1.NetworkPolicy, servicesByNamespace map[string][]*corev1.Service, podsByNamespace map[string][]*corev1.Pod) { + resourceTable := tablewriter.NewWriter(os.Stdout) + resourceTable.SetHeader([]string{"Resource", "Count"}) + resourceTable.SetRowLine(true) + + // Count the total number of policies + totalPolicies := 0 + for _, policies := range policiesByNamespace { + totalPolicies += len(policies) + } + resourceTable.Append([]string{"NetworkPolicy", fmt.Sprintf("%d", totalPolicies)}) + + // Count the total number of services + totalServices := 0 + for _, services := range servicesByNamespace { + totalServices += len(services) + } + resourceTable.Append([]string{"Service", fmt.Sprintf("%d", totalServices)}) + + // Count the total number of pods + totalPods := 0 + for _, pods := range podsByNamespace { + totalPods += len(pods) + } + resourceTable.Append([]string{"Pod", fmt.Sprintf("%d", totalPods)}) + + fmt.Println("\nCluster Resources:") + resourceTable.Render() +} + +func getEndportNetworkPolicies(policiesByNamespace map[string][]*networkingv1.NetworkPolicy) (ingressPoliciesWithEndport, egressPoliciesWithEndport []string) { + for namespace, policies := range policiesByNamespace { + for _, policy := range policies { + // Check the ingress field for endport + for _, ingress := range policy.Spec.Ingress { + foundEndPort := checkEndportInPolicyRules(ingress.Ports) + if foundEndPort { + ingressPoliciesWithEndport = append(ingressPoliciesWithEndport, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + // Check the egress field for endport + for _, egress := range policy.Spec.Egress { + foundEndPort := checkEndportInPolicyRules(egress.Ports) + if foundEndPort { + egressPoliciesWithEndport = append(egressPoliciesWithEndport, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + } + } + return ingressPoliciesWithEndport, egressPoliciesWithEndport +} + +func checkEndportInPolicyRules(ports []networkingv1.NetworkPolicyPort) bool { + for _, port := range ports { + if port.EndPort != nil { + return true + } + } + return false +} + +func getCIDRNetworkPolicies(policiesByNamespace map[string][]*networkingv1.NetworkPolicy) (ingressPoliciesWithCIDR, egressPoliciesWithCIDR []string) { + for namespace, policies := range policiesByNamespace { + for _, policy := range policies { + // Check the ingress field for cidr + for _, ingress := range policy.Spec.Ingress { + foundCIDRIngress := checkCIDRInPolicyRules(ingress.From) + if foundCIDRIngress { + ingressPoliciesWithCIDR = append(ingressPoliciesWithCIDR, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + // Check the egress field for cidr + for _, egress := range policy.Spec.Egress { + foundCIDREgress := checkCIDRInPolicyRules(egress.To) + if foundCIDREgress { + egressPoliciesWithCIDR = append(egressPoliciesWithCIDR, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + } + } + return ingressPoliciesWithCIDR, egressPoliciesWithCIDR +} + +// Check for CIDR in ingress or egress rules +func checkCIDRInPolicyRules(to []networkingv1.NetworkPolicyPeer) bool { + for _, toRule := range to { + if toRule.IPBlock != nil && toRule.IPBlock.CIDR != "" { + return true + } + } + return false +} + +func getNamedPortPolicies(policiesByNamespace map[string][]*networkingv1.NetworkPolicy) (ingressPoliciesWithNamedPort, egressPoliciesWithNamedPort []string) { + for namespace, policies := range policiesByNamespace { + for _, policy := range policies { + // Check the ingress field for named port + for _, ingress := range policy.Spec.Ingress { + if checkNamedPortInPolicyRules(ingress.Ports) { + ingressPoliciesWithNamedPort = append(ingressPoliciesWithNamedPort, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + // Check the egress field for named port + for _, egress := range policy.Spec.Egress { + if checkNamedPortInPolicyRules(egress.Ports) { + egressPoliciesWithNamedPort = append(egressPoliciesWithNamedPort, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + } + } + return ingressPoliciesWithNamedPort, egressPoliciesWithNamedPort +} + +func checkNamedPortInPolicyRules(ports []networkingv1.NetworkPolicyPort) bool { + for _, port := range ports { + // If port is a string it is a named port + if port.Port.Type == intstr.String { + return true + } + } + return false +} + +func getEgressPolicies(policiesByNamespace map[string][]*networkingv1.NetworkPolicy) []string { + var egressPolicies []string + for namespace, policies := range policiesByNamespace { + for _, policy := range policies { + for _, policyType := range policy.Spec.PolicyTypes { + // If the policy is an egress type and has no egress field it is an deny all flag it + if policyType == networkingv1.PolicyTypeEgress && len(policy.Spec.Egress) == 0 { + egressPolicies = append(egressPolicies, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + for _, egress := range policy.Spec.Egress { + // If the policy has a egress field thats not an egress allow all flag it + if len(egress.To) > 0 || len(egress.Ports) > 0 { + egressPolicies = append(egressPolicies, fmt.Sprintf("%s/%s", namespace, policy.Name)) + break + } + } + } + } + return egressPolicies +} + +func getUnsafeExternalTrafficPolicyClusterServices( + namespaces *corev1.NamespaceList, + servicesByNamespace map[string][]*corev1.Service, + policiesByNamespace map[string][]*networkingv1.NetworkPolicy, +) (unsafeServices []string) { + var riskServices, safeServices []string + + for i := range namespaces.Items { + namespace := &namespaces.Items[i] + // Check if are there ingress policies in the namespace if not skip + policyListAtNamespace := policiesByNamespace[namespace.Name] + if !hasIngressPolicies(policyListAtNamespace) { + continue + } + serviceListAtNamespace := servicesByNamespace[namespace.Name] + + // Check if are there services with externalTrafficPolicy=Cluster (applicable if Type=NodePort or Type=LoadBalancer) + for _, service := range serviceListAtNamespace { + if service.Spec.Type == corev1.ServiceTypeLoadBalancer || service.Spec.Type == corev1.ServiceTypeNodePort { + externalTrafficPolicy := service.Spec.ExternalTrafficPolicy + // If the service has externalTrafficPolicy is set to "Cluster" add it to the riskServices list (ExternalTrafficPolicy: "" defaults to Cluster) + if externalTrafficPolicy != corev1.ServiceExternalTrafficPolicyTypeLocal { + // Any service with externalTrafficPolicy=Cluster is at risk so need to elimate any services that are incorrectly flagged + riskServices = append(riskServices, fmt.Sprintf("%s/%s", namespace.Name, service.Name)) + // Check if are there services with selector that are allowed by a network policy that can be safely migrated + if checkNoServiceRisk(service, policyListAtNamespace) { + safeServices = append(safeServices, fmt.Sprintf("%s/%s", namespace.Name, service.Name)) + } + } + } + } + } + + // Remove all the safe services from the services at risk + unsafeServices = difference(riskServices, safeServices) + return unsafeServices +} + +func hasIngressPolicies(policies []*networkingv1.NetworkPolicy) bool { + // Check if any policy is ingress (including allow all and deny all) + for _, policy := range policies { + for _, policyType := range policy.Spec.PolicyTypes { + if policyType == networkingv1.PolicyTypeIngress { + return true + } + } + } + return false +} + +func checkNoServiceRisk(service *corev1.Service, policiesListAtNamespace []*networkingv1.NetworkPolicy) bool { + for _, policy := range policiesListAtNamespace { + // Skips deny all policies as they do not have any ingress rules + for _, ingress := range policy.Spec.Ingress { + // Check for each policy label that that label is present in the service labels meaning the service is being targeted by the policy + if checkPolicyMatchServiceLabels(service.Spec.Selector, policy.Spec.PodSelector) { + // Check if there is an allow all ingress policy as the policy allows all services in the namespace + if len(ingress.From) == 0 && len(ingress.Ports) == 0 { + return true + } + // If there are no ingress from but there are ports in the policy; check if the service is safe + if len(ingress.From) == 0 { + // If the policy targets all pods (allow all) or only pods that are in the service selector, check if traffic is allowed to all the service's target ports + // Note: ingress.Ports.protocol will never be nil if len(ingress.Ports) is greater than 0. It defaults to "TCP" if not set + // Note: for loadbalancer services the health probe always hits the service target ports + if checkServiceTargetPortMatchPolicyPorts(service.Spec.Ports, ingress.Ports) { + return true + } + } + } + } + } + return false +} + +func checkPolicyMatchServiceLabels(serviceLabels map[string]string, podSelector metav1.LabelSelector) bool { + // Check if there is an target all ingress policy with empty selectors if so the service is safe + if len(podSelector.MatchLabels) == 0 && len(podSelector.MatchExpressions) == 0 { + return true + } + + // Return false if the policy has matchExpressions + // Note: does not check matchExpressions. It will only validate based on matchLabels + if len(podSelector.MatchExpressions) > 0 { + return false + } + + // Return false if the policy has more labels than the service + if len(podSelector.MatchLabels) > len(serviceLabels) { + return false + } + + // Check for each policy label that that label is present in the service labels + // Note: a policy with no matchLabels is an allow all policy + for policyKey, policyValue := range podSelector.MatchLabels { + matchedPolicyLabelToServiceLabel := false + for serviceKey, serviceValue := range serviceLabels { + if policyKey == serviceKey && policyValue == serviceValue { + matchedPolicyLabelToServiceLabel = true + break + } + } + if !matchedPolicyLabelToServiceLabel { + return false + } + } + return true +} + +func checkServiceTargetPortMatchPolicyPorts(servicePorts []corev1.ServicePort, policyPorts []networkingv1.NetworkPolicyPort) bool { + // If the service has no ports then it is at risk + if len(servicePorts) == 0 { + return false + } + + for _, servicePort := range servicePorts { + // If the target port is a string then it is a named port and service is at risk + if servicePort.TargetPort.Type == intstr.String { + return false + } + + // If the target port is 0 then it is at risk as Cilium treats port 0 in a special way + if servicePort.TargetPort.IntValue() == 0 { + return false + } + + // Check if all the services target ports are in the policies ingress ports + matchedserviceTargetPortToPolicyPort := false + for _, policyPort := range policyPorts { + // If the policy only has a protocol check the protocol against the service + // Note: if a network policy on NPM just targets a protocol it will allow all traffic with containing that protocol (ignoring the port) + // Note: an empty protocols default to "TCP" for both policies and services + if policyPort.Port == nil && policyPort.Protocol != nil { + if string(servicePort.Protocol) == string(*policyPort.Protocol) { + matchedserviceTargetPortToPolicyPort = true + break + } + continue + } + // If the port is a string then it is a named port and it cant be evaluated + if policyPort.Port.Type == intstr.String { + continue + } + // Cilium treats port 0 in a special way so skip policys allowing port 0 + if int(policyPort.Port.IntVal) == 0 { + continue + } + // Check if the service target port and protocol matches the policy port and protocol + // Note: that the service target port will never been undefined as it defaults to port which is a required field when Ports is defined + // Note: an empty protocols default to "TCP" for both policies and services + if servicePort.TargetPort.IntValue() == int(policyPort.Port.IntVal) && string(servicePort.Protocol) == string(*policyPort.Protocol) { + matchedserviceTargetPortToPolicyPort = true + break + } + } + if !matchedserviceTargetPortToPolicyPort { + return false + } + } + return true +} + +func difference(slice1, slice2 []string) []string { + m := make(map[string]struct{}) + for _, s := range slice2 { + m[s] = struct{}{} + } + var diff []string + for _, s := range slice1 { + if _, ok := m[s]; !ok { + diff = append(diff, s) + } + } + return diff +} diff --git a/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator_test.go b/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator_test.go new file mode 100644 index 0000000000..3e1f37e0f8 --- /dev/null +++ b/tools/azure-npm-to-cilium-validator/azure-npm-to-cilium-validator_test.go @@ -0,0 +1,2904 @@ +package main + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Test function for getEndportNetworkPolicies +func TestGetEndportNetworkPolicies(t *testing.T) { + tests := []struct { + name string + policiesByNamespace map[string][]*networkingv1.NetworkPolicy + expectedIngressEndportPolicies []string + expectedEgressEndportPolicies []string + }{ + { + name: "No policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{}, + expectedIngressEndportPolicies: []string{}, + expectedEgressEndportPolicies: []string{}, + }, + { + name: "No endport in policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{}, + expectedEgressEndportPolicies: []string{}, + }, + { + name: "Ingress endport in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{"namespace1/ingress-endport-policy"}, + expectedEgressEndportPolicies: []string{}, + }, + { + name: "Egress endport in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{}, + expectedEgressEndportPolicies: []string{"namespace1/egress-endport-policy"}, + }, + { + name: "Both ingress and egress endport in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{"namespace1/ingress-and-egress-endport-policy"}, + expectedEgressEndportPolicies: []string{"namespace1/ingress-and-egress-endport-policy"}, + }, + { + name: "Multiple polices in a namespace with ingress or egress endport", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{"namespace1/ingress-endport-policy", "namespace1/ingress-and-egress-endport-policy"}, + expectedEgressEndportPolicies: []string{"namespace1/egress-endport-policy", "namespace1/ingress-and-egress-endport-policy"}, + }, + { + name: "Multiple polices in multiple namespaces with ingress or egress endport or no endport", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80)), EndPort: int32Ptr(90)}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "no-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-endport-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressEndportPolicies: []string{"namespace1/ingress-endport-policy", "namespace1/ingress-and-egress-endport-policy"}, + expectedEgressEndportPolicies: []string{"namespace1/ingress-and-egress-endport-policy", "namespace2/egress-endport-policy"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ingressPolicies, egressPolicies := getEndportNetworkPolicies(tt.policiesByNamespace) + if !equal(ingressPolicies, tt.expectedIngressEndportPolicies) { + t.Errorf("expected ingress policies %v, got %v", tt.expectedIngressEndportPolicies, ingressPolicies) + } + if !equal(egressPolicies, tt.expectedEgressEndportPolicies) { + t.Errorf("expected egress policies %v, got %v", tt.expectedEgressEndportPolicies, egressPolicies) + } + }) + } +} + +func TestGetCIDRNetworkPolicies(t *testing.T) { + tests := []struct { + name string + policiesByNamespace map[string][]*networkingv1.NetworkPolicy + expectedIngressCIDRPolicies []string + expectedEgressCIDRPolicies []string + }{ + { + name: "No policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{}, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "No CIDR in policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Ingress CIDR in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-cidr-policy"}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Egress CIDR in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{"namespace1/egress-cidr-policy"}, + }, + { + name: "Both ingress and egress CIDR in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-and-egress-cidr-policy"}, + expectedEgressCIDRPolicies: []string{"namespace1/ingress-and-egress-cidr-policy"}, + }, + { + name: "Multiple polices in a namespace with ingress or egress CIDR", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-cidr-policy", "namespace1/ingress-and-egress-cidr-policy"}, + expectedEgressCIDRPolicies: []string{"namespace1/egress-cidr-policy", "namespace1/ingress-and-egress-cidr-policy"}, + }, + { + name: "Multiple polices in multiple namespaces with ingress or egress CIDR or no CIDR", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "192.168.0.0/16"}}, + }, + }, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {IPBlock: &networkingv1.IPBlock{CIDR: "10.0.0.0/8"}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "no-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-cidr-policy", "namespace1/ingress-and-egress-cidr-policy"}, + expectedEgressCIDRPolicies: []string{"namespace2/egress-cidr-policy", "namespace1/ingress-and-egress-cidr-policy"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ingressPolicies, egressPolicies := getCIDRNetworkPolicies(tt.policiesByNamespace) + if !equal(ingressPolicies, tt.expectedIngressCIDRPolicies) { + t.Errorf("expected ingress policies %v, got %v", tt.expectedIngressCIDRPolicies, ingressPolicies) + } + if !equal(egressPolicies, tt.expectedEgressCIDRPolicies) { + t.Errorf("expected egress policies %v, got %v", tt.expectedEgressCIDRPolicies, egressPolicies) + } + }) + } +} + +func TestGetNamedPortPolicies(t *testing.T) { + tests := []struct { + name string + policiesByNamespace map[string][]*networkingv1.NetworkPolicy + expectedIngressCIDRPolicies []string + expectedEgressCIDRPolicies []string + }{ + { + name: "No policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{}, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "No named port in policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-cidr-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Ingress named port in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-named-port-policy"}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Ingress int port in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-int-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Egress named port in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{"namespace1/egress-named-port-policy"}, + }, + { + name: "Egress int port in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-int-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{}, + expectedEgressCIDRPolicies: []string{}, + }, + { + name: "Both ingress and egress name ports in policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-and-egress-named-port-policy"}, + expectedEgressCIDRPolicies: []string{"namespace1/ingress-and-egress-named-port-policy"}, + }, + { + name: "Multiple polices in a namespace with ingress or egress named ports", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-named-port-policy", "namespace1/ingress-and-egress-named-port-policy"}, + expectedEgressCIDRPolicies: []string{"namespace1/egress-named-port-policy", "namespace1/ingress-and-egress-named-port-policy"}, + }, + { + name: "Multiple polices in multiple namespaces with ingress or egress CIDR or no CIDR", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-and-egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-int-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "no-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-named-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-int-port-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedIngressCIDRPolicies: []string{"namespace1/ingress-named-port-policy", "namespace1/ingress-and-egress-named-port-policy"}, + expectedEgressCIDRPolicies: []string{"namespace2/egress-named-port-policy", "namespace1/ingress-and-egress-named-port-policy"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ingressPolicies, egressPolicies := getNamedPortPolicies(tt.policiesByNamespace) + if !equal(ingressPolicies, tt.expectedIngressCIDRPolicies) { + t.Errorf("expected ingress policies %v, got %v", tt.expectedIngressCIDRPolicies, ingressPolicies) + } + if !equal(egressPolicies, tt.expectedEgressCIDRPolicies) { + t.Errorf("expected egress policies %v, got %v", tt.expectedEgressCIDRPolicies, egressPolicies) + } + }) + } +} + +func TestGetEgressPolicies(t *testing.T) { + tests := []struct { + name string + policiesByNamespace map[string][]*networkingv1.NetworkPolicy + expectedEgressPolicies []string + }{ + { + name: "No policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{}, + expectedEgressPolicies: []string{}, + }, + { + name: "No egress in policies", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{}, + }, + { + name: "Allow all egress policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{"Egress"}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + {}, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{}, + }, + { + name: "Deny all egress policy", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{"Egress"}, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/deny-all-egress-policy"}, + }, + { + name: "Egress policy with To field", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/egress-to-policy"}, + }, + { + name: "Egress policy with Ports field", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/egress-ports-policy"}, + }, + { + name: "Egress policy with both To and Ports fields", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-and-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/egress-to-and-ports-policy"}, + }, + { + name: "Multiple egress polices in a namespace with To or Port fields", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-and-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/egress-to-policy", "namespace1/egress-ports-policy", "namespace1/egress-to-and-ports-policy"}, + }, + { + name: "Multiple egresss polices in multiple namespaces with To or Port fields or no egress", + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-and-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-ports-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: intstrPtr(intstr.FromInt(80))}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "no-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "egress-to-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{"Egress"}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + {}, + }, + }, + }, + }, + "namespace4": { + { + ObjectMeta: metav1.ObjectMeta{Name: "no-egress-policy"}, + Spec: networkingv1.NetworkPolicySpec{ + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{}}, + }, + }, + }, + }, + }, + }, + }, + expectedEgressPolicies: []string{"namespace1/egress-to-policy", "namespace1/egress-to-and-ports-policy", "namespace2/egress-ports-policy", "namespace3/egress-to-policy"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + egressPolicies := getEgressPolicies(tt.policiesByNamespace) + if !equal(egressPolicies, tt.expectedEgressPolicies) { + t.Errorf("expected egress policies %v, got %v", tt.expectedEgressPolicies, egressPolicies) + } + }) + } +} + +func TestGetExternalTrafficPolicyClusterServices(t *testing.T) { + tests := []struct { + name string + namespaces *corev1.NamespaceList + servicesByNamespace map[string][]*corev1.Service + policiesByNamespace map[string][]*networkingv1.NetworkPolicy + expectedUnsafeServices []string + }{ + // Scenarios where there are no LoadBalancer or NodePort services + { + name: "No namespaces", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{}, + }, + servicesByNamespace: map[string][]*corev1.Service{}, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{}, + expectedUnsafeServices: []string{}, + }, + { + name: "Namespace with no policies and services", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": {}, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": {}, + }, + expectedUnsafeServices: []string{}, + }, + // Scenarios where there are LoadBalancer or NodePort services but externalTrafficPolicy is not Cluster + { + name: "LoadBalancer service with externalTrafficPolicy=Local with no selector and a deny all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "NodePort service with externalTrafficPolicy=Local with no selector and a deny all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + // Scenarios where there are LoadBalancer or NodePort services with externalTrafficPolicy=Cluster but no policies + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and no policies", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": {}, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "NodePort service with externalTrafficPolicy=Cluster with no selector and no policies", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": {}, + }, + expectedUnsafeServices: []string{}, + }, + // Scenarios where there are LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and policies allow traffic + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and an allow all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a matching selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an ingress policy with a matching selector and ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "NodePort service with externalTrafficPolicy=Cluster with no selector and an allow all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "NodePort service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a matching selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "NodePort service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a matching selector and ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + // Scenarios where there are LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and policies deny traffic + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and a deny all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-no-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and an allow all ingress policy with a selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-no-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an deny all ingress policy with a matching selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster and matching policy that has a pod selector but no ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "external-traffic-policy-cluster-service"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + Selector: map[string]string{"app": "test"}, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "policy1"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/external-traffic-policy-cluster-service"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a selector that doesnt match", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test", "app3": "test3", "app4": "test4"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test", "app2": "test2"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a selector that has more labels", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test", "app2": "test2", "app3": "test3"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an ingress policy with a matching selector but ports dont match", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-named-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Port: 100, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(100), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + { + Port: intstrPtr(intstr.FromInt(90)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-named-ports"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an ingress policy with a matching selector but uses named ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-named-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString("http"), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-policy-with-selector-and-named-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromString("http")), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-named-ports"}, + }, + // Scenarios covering edge cases + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and a allow all and deny all ingress policy with no selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-no-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and a allow all and deny all ingress policy with a matching selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "deny-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-a-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and no ports and an ingress policy with a matching selector and ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with no selector and an allow all ingress policy with a matchExpressions selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-matchexpressins-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"test"}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-ports"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a matchExpressions selector", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-no-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-matchexpressions-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"test"}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-no-selector"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an ingress policy with a matching selector and protocol with no ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an allow all ingress policy with a matching selector and port and port=0", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(0)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and targetport=0 and an allow all ingress policy with a matching selector and different ports", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(0), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-ports"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and targetport=0 and an allow all ingress policy with a matching selector and ports=0", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(0), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(0)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-ports"}, + }, + { + name: "LoadBalancer service with externalTrafficPolicy=Cluster with a selector and an ingress policy with a matching selector and ports and pod/namespace selectors", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-and-ports"}, + }, + // Scenarios where there are LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and there are multiple namespaces + { + name: "LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and allow all ingress policies with matching label and ports in multiple namespaces", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace3"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{}, + }, + { + name: "LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and allow all ingress policies without matching label and ports in multiple namespaces", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace3"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test2"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Port: 90, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(90), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolUDP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector", "namespace2/service-with-selector-and-ports", "namespace3/service-with-selector-and-ports"}, + }, + { + name: "LoadBalancer or NodePort services with externalTrafficPolicy=Cluster and allow all ingress policies with some matching label and ports in multiple namespaces", + namespaces: &corev1.NamespaceList{ + Items: []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "namespace1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "namespace3"}}, + }, + }, + servicesByNamespace: map[string][]*corev1.Service{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-no-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test2"}, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports-no-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Port: 90, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(90), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolUDP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "service-with-selector-and-ports-no-match"}, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Selector: map[string]string{"app": "test"}, + Ports: []corev1.ServicePort{ + { + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + }, + }, + }, + }, + policiesByNamespace: map[string][]*networkingv1.NetworkPolicy{ + "namespace1": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + }, + }, + }, + "namespace2": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: intstrPtr(intstr.FromInt(80)), + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolTCP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + "namespace3": { + { + ObjectMeta: metav1.ObjectMeta{Name: "allow-all-ingress-policy-with-selector-and-ports"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + PolicyTypes: []networkingv1.PolicyType{"Ingress"}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: func() *corev1.Protocol { + protocol := corev1.ProtocolUDP + return &protocol + }(), + }, + }, + }, + }, + }, + }, + }, + }, + expectedUnsafeServices: []string{"namespace1/service-with-selector-no-match", "namespace2/service-with-selector-and-ports-no-match", "namespace3/service-with-selector-and-ports-no-match"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + unsafeServices := getUnsafeExternalTrafficPolicyClusterServices(tt.namespaces, tt.servicesByNamespace, tt.policiesByNamespace) + if !equal(unsafeServices, tt.expectedUnsafeServices) { + t.Errorf("expected unsafe services %v, got %v", tt.expectedUnsafeServices, unsafeServices) + } + }) + } +} + +// Helper to test the list output of functions +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + m := make(map[string]bool) + for _, v := range a { + m[v] = true + } + for _, v := range b { + if !m[v] { + return false + } + } + return true +} + +// Helper function to create a pointer to an intstr.IntOrString +func intstrPtr(i intstr.IntOrString) *intstr.IntOrString { + return &i +} + +// Helper function to create a pointer to an int32 +func int32Ptr(i int32) *int32 { + return &i +} diff --git a/tools/azure-npm-to-cilium-validator/go.mod b/tools/azure-npm-to-cilium-validator/go.mod new file mode 100644 index 0000000000..6ea372a07b --- /dev/null +++ b/tools/azure-npm-to-cilium-validator/go.mod @@ -0,0 +1,85 @@ +module azure-npm-to-cilium-validator + +go 1.23.0 + +toolchain go1.23.6 + +require ( + github.com/Azure/azure-container-networking v1.6.21 + github.com/olekukonko/tablewriter v0.0.5 + k8s.io/api v0.30.7 + k8s.io/apimachinery v0.30.7 + k8s.io/client-go v0.30.7 + k8s.io/klog/v2 v2.130.1 +) + +require ( + code.cloudfoundry.org/clock v1.0.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Microsoft/hcsshim v0.12.9 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gofrs/uuid v4.0.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/microsoft/ApplicationInsights-Go v0.4.4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.19.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/stretchr/testify v1.10.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.69.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog v1.0.0 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/tools/azure-npm-to-cilium-validator/go.sum b/tools/azure-npm-to-cilium-validator/go.sum new file mode 100644 index 0000000000..0f9b370698 --- /dev/null +++ b/tools/azure-npm-to-cilium-validator/go.sum @@ -0,0 +1,314 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= +code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +github.com/Azure/azure-container-networking v1.6.21 h1:1O+6D7upf23qMlPRhlB9EPdj9sqpgXiwyCTnSscQ2VM= +github.com/Azure/azure-container-networking v1.6.21/go.mod h1:ecy7xVz3A+vpH6oAyYZLQfN1yrRSQc3iN9a31w0N8VI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= +github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.30.7 h1:wB2eHI+IptVYsz5WsAQpI6+Dqi3+11wEWBqIh4fh980= +k8s.io/api v0.30.7/go.mod h1:bR0EwbmhYmJvUoeza7ZzBUmYCrVXccQ9JOdfv0BxhH0= +k8s.io/apimachinery v0.30.7 h1:CoQFxvzPFKwU1eJGN/8LgM3ZJBC3hKgvwGqRrL43uIY= +k8s.io/apimachinery v0.30.7/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.7 h1:DQRfuGWxDzxPEyyiTE/fxzAsZcj2p9sbc5671njR52w= +k8s.io/client-go v0.30.7/go.mod h1:oED9+njB91ExCc4BNPAotniB7WH1ig7CmiBx5pVA1yw= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/zapai/go.mod b/zapai/go.mod index 6c7738379b..60dd9648e4 100644 --- a/zapai/go.mod +++ b/zapai/go.mod @@ -1,6 +1,8 @@ module github.com/Azure/azure-container-networking/zapai -go 1.21 +go 1.23 + +toolchain go1.23.2 require ( github.com/jsternberg/zap-logfmt v1.3.0 diff --git a/zapetw/core_windows.go b/zapetw/core_windows.go index 955b1149f6..699cb8bd24 100644 --- a/zapetw/core_windows.go +++ b/zapetw/core_windows.go @@ -9,48 +9,45 @@ import ( // - const providername = "ACN-Monitoring" -type ETWCore struct { +type Core struct { provider *etw.Provider eventName string encoder zapcore.Encoder - fields []zapcore.Field zapcore.LevelEnabler } -func NewETWCore(eventName string, encoder zapcore.Encoder, levelEnabler zapcore.LevelEnabler) (*ETWCore, error) { - provider, err := etw.NewProviderWithOptions(providername) +func New(providerName, eventName string, encoder zapcore.Encoder, levelEnabler zapcore.LevelEnabler) (zapcore.Core, func(), error) { + provider, err := etw.NewProviderWithOptions(providerName) if err != nil { - return nil, errors.Wrap(err, "failed to create ETW provider") + return nil, func() { _ = provider.Close() }, errors.Wrap(err, "failed to create ETW provider") } - return &ETWCore{ + return &Core{ provider: provider, eventName: eventName, encoder: encoder, LevelEnabler: levelEnabler, - }, nil + }, func() { _ = provider.Close() }, nil } -func (core *ETWCore) With(fields []zapcore.Field) zapcore.Core { - return &ETWCore{ - provider: core.provider, - eventName: core.eventName, - encoder: core.encoder, - LevelEnabler: core.LevelEnabler, - fields: append(core.fields, fields...), +func (core *Core) With(fields []zapcore.Field) zapcore.Core { + clone := core.clone() + for i := range fields { + fields[i].AddTo(clone.encoder) } + return clone } // Check is an implementation of the zapcore.Core interface's Check method. // Check determines whether the logger core is enabled at the supplied zapcore.Entry's Level. // If enabled, it adds the core to the CheckedEntry and returns it, otherwise returns the CheckedEntry unchanged. -func (core *ETWCore) Check(entry zapcore.Entry, checkedEntry *zapcore.CheckedEntry) *zapcore.CheckedEntry { +func (core *Core) Check(entry zapcore.Entry, checkedEntry *zapcore.CheckedEntry) *zapcore.CheckedEntry { if core.Enabled(entry.Level) { return checkedEntry.AddCore(entry, core) } return checkedEntry } -func (core *ETWCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { +func (core *Core) Write(entry zapcore.Entry, fields []zapcore.Field) error { etwLevel := zapLevelToETWLevel(entry.Level) buffer, err := core.encoder.EncodeEntry(entry, fields) @@ -70,10 +67,19 @@ func (core *ETWCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { return nil } -func (core *ETWCore) Sync() error { +func (core *Core) Sync() error { return nil } +func (core *Core) clone() *Core { + return &Core{ + provider: core.provider, + eventName: core.eventName, + encoder: core.encoder.Clone(), + LevelEnabler: core.LevelEnabler, + } +} + func zapLevelToETWLevel(level zapcore.Level) etw.Level { switch level { case zapcore.DebugLevel: