test: refactor cluster creation to a shell script

Signed-off-by: Xe Iaso <me@xeiaso.net>
This commit is contained in:
Xe Iaso
2026-05-16 03:20:39 -04:00
parent c082cd89dc
commit 8480175eac
3 changed files with 70 additions and 11 deletions
+20 -10
View File
@@ -100,15 +100,21 @@ spec:
- name: create-cluster - name: create-cluster
image: $(tasks.docker-build-ci.results.IMAGE_URL)@$(tasks.docker-build-ci.results.IMAGE_DIGEST) image: $(tasks.docker-build-ci.results.IMAGE_URL)@$(tasks.docker-build-ci.results.IMAGE_DIGEST)
workingDir: $(workspaces.repo.path)/repo workingDir: $(workspaces.repo.path)/repo
env:
- name: NAMESPACE
value: $(context.pipelineRun.namespace)
- name: PIPELINE_NAME
value: $(context.pipeline.name)
- name: PIPELINERUN_NAME
value: $(context.pipelineRun.name)
- name: PIPELINERUN_UID
value: $(context.pipelineRun.uid)
- name: KUBECONFIG_OUT
value: $(workspaces.repo.path)/kube/config
script: | script: |
CLUSTER_NAME=`kubectl create -f test/k3k/test-cluster.yaml -ojson | jq -r '.metadata.name'` #!/usr/bin/env bash
echo -n "${CLUSTER_NAME}" | tee "$(results.cluster-name.path)" set -euo pipefail
echo ./test/k3k/create-cluster.sh > "$(results.cluster-name.path)"
kubectl label -n $(context.pipelineRun.namespace) clusters.k3k.io/"${CLUSTER_NAME}" tekton.dev/memberOf=tasks tekton.dev/pipeline="$(context.pipeline.name)" tekton.dev/pipelineRun=$(context.pipelineRun.name) tekton.dev/pipelineRunUID=$(context.pipelineRun.uid)
kubectl wait --for=condition=Ready clusters.k3k.io/"${CLUSTER_NAME}" -n $(context.pipelineRun.namespace) --timeout 5m
kubectl wait --for=create "secret/k3k-${CLUSTER_NAME}-kubeconfig" -n $(context.pipelineRun.namespace) --timeout 5m
mkdir -p $(workspaces.repo.path)/kube
kubectl get -ojson -n $(context.pipelineRun.namespace) "secret/k3k-${CLUSTER_NAME}-kubeconfig" | jq '.data["kubeconfig.yaml"]' -r | base64 -d > $(workspaces.repo.path)/kube/config
- name: build-assets - name: build-assets
runAfter: ["docker-build-ci"] runAfter: ["docker-build-ci"]
taskSpec: taskSpec:
@@ -193,8 +199,12 @@ spec:
env: env:
- name: KUBECONFIG - name: KUBECONFIG
value: "$(workspaces.repo.path)/kube/config" value: "$(workspaces.repo.path)/kube/config"
finally:
- name: teardown-cluster - name: teardown-cluster
runAfter: ["provision-test-cluster", "go-test", "integration"] when:
- input: "$(tasks.provision-test-cluster.status)"
operator: in
values: ["Succeeded"]
taskSpec: taskSpec:
workspaces: workspaces:
- name: repo - name: repo
@@ -204,4 +214,4 @@ spec:
image: $(tasks.docker-build-ci.results.IMAGE_URL)@$(tasks.docker-build-ci.results.IMAGE_DIGEST) image: $(tasks.docker-build-ci.results.IMAGE_URL)@$(tasks.docker-build-ci.results.IMAGE_DIGEST)
workingDir: $(workspaces.repo.path)/repo workingDir: $(workspaces.repo.path)/repo
script: | script: |
kubectl delete -n $(context.pipelineRun.namespace) clusters.k3k.io/"$(tasks.provision-test-cluster.results.cluster-name)" kubectl delete --ignore-not-found -n $(context.pipelineRun.namespace) clusters.k3k.io/"$(tasks.provision-test-cluster.results.cluster-name)"
+49
View File
@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Create a k3k cluster, wait for it to be Ready, and write its kubeconfig.
# Prints the generated cluster name to stdout on success.
#
# Required env:
# NAMESPACE Kubernetes namespace to create the cluster in
# KUBECONFIG_OUT Path to write the resulting kubeconfig
#
# Optional env (set under Tekton to enable ownerReference-based GC + labels):
# PIPELINE_NAME Tekton Pipeline name
# PIPELINERUN_NAME Tekton PipelineRun name
# PIPELINERUN_UID Tekton PipelineRun UID
set -euo pipefail
: "${NAMESPACE:?NAMESPACE must be set}"
: "${KUBECONFIG_OUT:?KUBECONFIG_OUT must be set}"
script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)
cluster_name=$(kubectl create -n "${NAMESPACE}" -f "${script_dir}/test-cluster.yaml" -ojson | jq -r '.metadata.name')
if [[ -n "${PIPELINERUN_NAME:-}" && -n "${PIPELINERUN_UID:-}" ]]; then
owner_ref=$(jo \
apiVersion=tekton.dev/v1 \
kind=PipelineRun \
name="${PIPELINERUN_NAME}" \
uid="${PIPELINERUN_UID}" \
blockOwnerDeletion=false)
patch=$(jo metadata=$(jo "ownerReferences[]=${owner_ref}"))
kubectl patch -n "${NAMESPACE}" "clusters.k3k.io/${cluster_name}" --type=merge -p "${patch}" >&2
kubectl label -n "${NAMESPACE}" "clusters.k3k.io/${cluster_name}" \
"tekton.dev/memberOf=tasks" \
"tekton.dev/pipeline=${PIPELINE_NAME:-}" \
"tekton.dev/pipelineRun=${PIPELINERUN_NAME}" \
"tekton.dev/pipelineRunUID=${PIPELINERUN_UID}" >&2
fi
kubectl wait --for=condition=Ready "clusters.k3k.io/${cluster_name}" -n "${NAMESPACE}" --timeout 5m >&2
kubectl wait --for=create "secret/k3k-${cluster_name}-kubeconfig" -n "${NAMESPACE}" --timeout 5m >&2
mkdir -p "$(dirname "${KUBECONFIG_OUT}")"
kubectl get -ojson -n "${NAMESPACE}" "secret/k3k-${cluster_name}-kubeconfig" \
| jq -r '.data["kubeconfig.yaml"]' \
| base64 -d > "${KUBECONFIG_OUT}"
echo "${cluster_name}"
+1 -1
View File
@@ -11,5 +11,5 @@ RUN CGO_ENABLED=0 go install golang.org/dl/go1.23.6@latest \
FROM alpine:${ALPINE_VERSION} FROM alpine:${ALPINE_VERSION}
COPY --from=go /app/bin/go /usr/local/bin/go COPY --from=go /app/bin/go /usr/local/bin/go
RUN apk add -U nodejs git build-base git npm bash zstd brotli gzip jq kubectl python3 py3-pip py3-virtualenv \ RUN apk add -U nodejs git build-base git npm bash zstd brotli gzip jq jo kubectl python3 py3-pip py3-virtualenv \
&& go download && go download