!68 添加了devcontainer在k8s下的重启、暂停、webTerminal功能
* 修复k8s/devcontainer/suite_test * 改进了DevStar Controller Manager镜像相关代码 * 修改了Dockerfile.rootless以解决go版本问题 * 移动Dockerfile.cotroller-manager位置 * Merge remote-tracking branch 'origin/dev' into devcontainer-on-k8s * Merge remote-tracking branch 'origin/add_k8s' into AppOnK8s * Merge remote-tracking branch 'origin/add-dockerfile-method-and-start-s… * 添加了k8s下的停止、重启devcontainer和webterminal * Merge branch 'add-dockerfile-method-and-start-stop-container' of https… * 更新了容器镜像方式的构建、安装和使用方法,但是devcontainer功能还有问题 * fix run postCreateCommand bug * sh文件方式管理启动脚本 * Merge branch 'add-dockerfile-method-and-start-stop-container' of https… * add restart command and fix bug * chore: 补充添加k8s controller的go.mod和go.sum文件 * Merge branch 'add-dockerfile-method-and-start-stop-container' of https… * 为devstar添加k8s上的controller-manager * add dockerfile method to create container and save container .restart …
This commit is contained in:
repo.diff.committed_by
孟宁
repo.diff.parent
02baa3b7af
repo.diff.commit
33a4a54e85
@@ -41,6 +41,7 @@ jobs:
|
||||
- name: 🔧 Test Codes and Build an Artifact
|
||||
run: |
|
||||
echo "Prepare to build repository code ${{ gitea.repository }}:${{ gitea.ref }}."
|
||||
make test
|
||||
make docker
|
||||
- name: 🚀 Push Artifact to devstar.cn and docker.io Registry
|
||||
run: |
|
||||
@@ -50,12 +51,20 @@ jobs:
|
||||
docker push ${{ vars.DOCKER_REGISTRY_ADDRESS }}/${{ vars.DOCKER_REPOSITORY_ARTIFACT}}:rootless-dev-${{ gitea.sha }}
|
||||
docker push ${{ vars.DOCKER_REGISTRY_ADDRESS }}/${{ vars.DOCKER_REPOSITORY_ARTIFACT}}:latest
|
||||
GITHUB_TOKEN="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"; REPO="mengning/DevStar"; WORKFLOW_FILE="PushDevStarImage2DockerHub.yml"; BRANCH="main"; URL="https://api.github.com/repos/$REPO/actions/workflows/$WORKFLOW_FILE/dispatches"; response=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$URL" -H "Authorization: token $GITHUB_TOKEN" -H "Accept: application/vnd.github.v3+json" -d "{\"ref\":\"$BRANCH\"}"); if [ "$response" -eq 204 ]; then echo "将devstar-studio:latest同步到docker.io的Github工作流触发成功!"; else echo "将devstar-studio:latest同步到docker.io的Github工作流触发失败:HTTP 状态码 $response"; fi
|
||||
docker tag devstar-controller-manager:latest ${{ vars.DOCKER_REGISTRY_ADDRESS }}/devstar/devstar-controller-manager:rootless-dev-${{ gitea.sha }}
|
||||
docker tag devstar-controller-manager:latest ${{ vars.DOCKER_REGISTRY_ADDRESS }}/devstar/devstar-controller-manager:latest
|
||||
echo "${{ secrets.DOCKER_REGISTRY_PASSWORD }}" | docker login -u ${{ secrets.DOCKER_REGISTRY_USERNAME }} ${{ vars.DOCKER_REGISTRY_ADDRESS }} --password-stdin
|
||||
docker push ${{ vars.DOCKER_REGISTRY_ADDRESS }}/devstar/devstar-controller-manager:rootless-dev-${{ gitea.sha }}
|
||||
docker push ${{ vars.DOCKER_REGISTRY_ADDRESS }}/devstar/devstar-controller-manager:latest
|
||||
- name: 🍏 Job Status Report
|
||||
run: |
|
||||
echo "🍏 This job's status is ${{ job.status }}."
|
||||
echo "Output Artifact: ${{ vars.DOCKER_REGISTRY_ADDRESS }}/${{ vars.DOCKER_REPOSITORY_ARTIFACT}}:rootless-dev-${{ gitea.sha }}"
|
||||
echo "=> Artifact Tag: latest"
|
||||
echo "=> Artifact Tag: rootless-dev-${{ gitea.sha }}"
|
||||
echo "Output Artifact: ${{ vars.DOCKER_REGISTRY_ADDRESS }}/devstar/devstar-controller-manager:rootless-dev-${{ gitea.sha }}"
|
||||
echo "=> Artifact Tag: latest"
|
||||
echo "=> Artifact Tag: rootless-dev-${{ gitea.sha }}"
|
||||
- name: 📝 Update dev.devstar.cn
|
||||
run: |
|
||||
kubectl config set-cluster remote-cluster --server=$${{ secrets.K8S_URL }} --insecure-skip-tls-verify=true
|
||||
|
||||
3
.gitignore
repo.diff.vendored
3
.gitignore
repo.diff.vendored
@@ -115,3 +115,6 @@ prime/
|
||||
|
||||
# Manpage
|
||||
/man
|
||||
|
||||
#k8s测试使用的模拟集群
|
||||
modules/k8s/bin/
|
||||
@@ -13,7 +13,8 @@ FROM ${DOCKER_REGISTRY_ADDRESS}/${DOCKER_REGISTRY_USERNAME}/${DEV_CONTAINER} AS
|
||||
ARG GOPROXY="https://goproxy.cn"
|
||||
ENV GOPROXY=${GOPROXY:-direct}
|
||||
# 注:对于 NPM 代理/镜像, 参考仓库 `/.npmrc` 文件下的 `registry` 变量,推荐使用淘宝镜像,即 `registry=https://registry.npmmirror.com/`
|
||||
|
||||
# 允许使用自动版本
|
||||
ENV GOTOOLCHAIN=auto
|
||||
|
||||
ARG GITEA_VERSION
|
||||
# TODO: 适配 https://devstar.cn
|
||||
@@ -34,7 +35,7 @@ ENV GITEA_I_AM_BEING_UNSAFE_RUNNING_AS_ROOT=1
|
||||
RUN if [ -n "${GITEA_VERSION}" ]; then \
|
||||
git checkout "${GITEA_VERSION}"; \
|
||||
fi \
|
||||
&& make clean-all test build \
|
||||
&& make clean-all build \
|
||||
&& echo "-------------------" \
|
||||
&& echo " BUILD SUCCESS" \
|
||||
&& echo "-------------------"
|
||||
|
||||
53
Makefile
53
Makefile
@@ -192,6 +192,7 @@ help:
|
||||
@echo "Make Routines:"
|
||||
@echo " - \"\" equivalent to \"build\""
|
||||
@echo " - build build everything"
|
||||
@echo " - build-debug build everything to debug"
|
||||
@echo " - frontend build frontend files"
|
||||
@echo " - backend build backend files"
|
||||
@echo " - watch watch everything and continuously rebuild"
|
||||
@@ -249,6 +250,9 @@ help:
|
||||
@echo " - tidy run go mod tidy"
|
||||
@echo " - test[\#TestSpecificName] run unit test"
|
||||
@echo " - test-sqlite[\#TestSpecificName] run integration test for sqlite"
|
||||
@echo " - controller-manager build controller-manager"
|
||||
@echo " - controller-manager-debug build controller-manager with debug info"
|
||||
@echo " - k8s-download-test-bins download Kubernetes test binaries (etcd, kube-apiserver, kubectl)"
|
||||
|
||||
.PHONY: go-check
|
||||
go-check:
|
||||
@@ -465,7 +469,7 @@ watch-backend: go-check
|
||||
test: test-frontend test-backend
|
||||
|
||||
.PHONY: test-backend
|
||||
test-backend:
|
||||
test-backend:k8s-download-test-bins
|
||||
@echo "Running go test with $(GOTESTFLAGS) -tags '$(TEST_TAGS)'..."
|
||||
@$(GO) test $(GOTESTFLAGS) -tags='$(TEST_TAGS)' $(GO_TEST_PACKAGES)
|
||||
|
||||
@@ -769,6 +773,16 @@ install: $(wildcard *.go)
|
||||
.PHONY: build
|
||||
build: frontend backend
|
||||
|
||||
# 添加一个新目标,用于构建带有调试信息的二进制文件
|
||||
.PHONY: build-debug
|
||||
build-debug: frontend backend-debug
|
||||
|
||||
.PHONY: backend-debug
|
||||
backend-debug: go-check generate-backend $(EXECUTABLE)-debug
|
||||
|
||||
$(EXECUTABLE)-debug: $(GO_SOURCES) $(TAGS_PREREQ)
|
||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o $@
|
||||
|
||||
.PHONY: frontend
|
||||
frontend: $(WEBPACK_DEST)
|
||||
|
||||
@@ -982,9 +996,46 @@ generate-manpage:
|
||||
.PHONY: docker
|
||||
docker:
|
||||
docker build -t devstar-studio:latest -f Dockerfile.rootless .
|
||||
docker build -t devstar-controller-manager:latest -f modules/k8s/Dockerfile.controller-manager .
|
||||
# docker build --disable-content-trust=false -t $(DOCKER_REF) .
|
||||
# support also build args docker build --build-arg GITEA_VERSION=v1.2.3 --build-arg TAGS="bindata sqlite sqlite_unlock_notify" .
|
||||
|
||||
# 添加一个新目标,用于构建 controller-manager
|
||||
.PHONY: controller-manager
|
||||
controller-manager: go-check
|
||||
@echo "Building controller-manager..."
|
||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '-s -w $(LDFLAGS)' -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
|
||||
|
||||
# 添加调试版本的编译目标
|
||||
.PHONY: controller-manager-debug
|
||||
controller-manager-debug: go-check
|
||||
@echo "Building controller-manager with debug info..."
|
||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o controller-manager-debug modules/k8s/cmd/controller-manager/controller-manager.go
|
||||
|
||||
# K8S 测试环境配置
|
||||
K8S_BIN_DIR := modules/k8s/bin
|
||||
K8S_LOCALBIN := $(K8S_BIN_DIR)
|
||||
K8S_ENVTEST_K8S_VERSION := 1.31.0
|
||||
K8S_ENVTEST_VERSION ?= release-0.19
|
||||
K8S_ENVTEST ?= $(K8S_LOCALBIN)/setup-envtest
|
||||
|
||||
.PHONY: k8s-download-test-bins
|
||||
k8s-download-test-bins: ## 下载 Kubernetes 测试二进制文件 (etcd, kube-apiserver, kubectl)
|
||||
@echo "下载 Kubernetes $(K8S_ENVTEST_K8S_VERSION) 测试二进制文件..."
|
||||
@mkdir -p $(K8S_LOCALBIN)
|
||||
@# 首先下载 setup-envtest 工具
|
||||
@[ -f "$(K8S_ENVTEST)" ] || GOBIN=$(shell pwd)/$(K8S_LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@$(K8S_ENVTEST_VERSION)
|
||||
@# 然后使用该工具下载 K8S 测试二进制文件
|
||||
@$(K8S_ENVTEST) use $(K8S_ENVTEST_K8S_VERSION) --bin-dir $(K8S_LOCALBIN)
|
||||
@# 验证文件是否存在 - 根据实际输出调整路径检查
|
||||
@if [ -d "$(K8S_LOCALBIN)/k8s/$(K8S_ENVTEST_K8S_VERSION)-$(shell go env GOOS)-$(shell go env GOARCH)" ]; then \
|
||||
echo "测试二进制文件已下载到: $(K8S_LOCALBIN)/$(K8S_ENVTEST_K8S_VERSION)-$(shell go env GOOS)-$(shell go env GOARCH)/"; \
|
||||
ls -la $(K8S_LOCALBIN)/k8s/$(K8S_ENVTEST_K8S_VERSION)-$(shell go env GOOS)-$(shell go env GOARCH)/; \
|
||||
else \
|
||||
echo "提示: 未在预期路径找到测试二进制文件,但可能已下载到其他位置。列出 $(K8S_LOCALBIN) 内容:"; \
|
||||
find $(K8S_LOCALBIN) -type f -name "kube-apiserver" | grep -q . && echo "找到 kube-apiserver 文件,下载应该成功了。"; \
|
||||
ls -la $(K8S_LOCALBIN)/; \
|
||||
fi
|
||||
# This endif closes the if at the top of the file
|
||||
endif
|
||||
|
||||
|
||||
109
go.mod
109
go.mod
@@ -1,6 +1,8 @@
|
||||
module code.gitea.io/gitea
|
||||
|
||||
go 1.22
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.3
|
||||
|
||||
require (
|
||||
code.gitea.io/actions-proto-go v0.4.0
|
||||
@@ -60,7 +62,7 @@ require (
|
||||
github.com/gogs/go-gogs-client v0.0.0-20210131175652-1d7215cd8d85
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/google/go-github/v61 v61.0.0
|
||||
github.com/google/pprof v0.0.0-20240618054019-d3b898a103f8
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/feeds v1.2.0
|
||||
github.com/gorilla/sessions v1.3.0
|
||||
@@ -113,20 +115,20 @@ require (
|
||||
github.com/yuin/goldmark v1.7.2
|
||||
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
|
||||
github.com/yuin/goldmark-meta v1.1.0
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/image v0.15.0
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.org/x/tools v0.22.0
|
||||
google.golang.org/grpc v1.62.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/tools v0.31.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.23.10
|
||||
k8s.io/apimachinery v0.23.10
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
mvdan.cc/xurls/v2 v2.5.0
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||
xorm.io/builder v0.3.13
|
||||
@@ -134,30 +136,62 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ishidawataru/sctp v0.0.0-20250303034628-ecf9ed6df987 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.1 // indirect
|
||||
k8s.io/apiserver v0.32.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ArtisanCloud/PowerSocialite/v3 v3.0.7 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/client-go v0.23.10
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -206,7 +240,7 @@ require (
|
||||
github.com/couchbase/go-couchbase v0.1.1 // indirect
|
||||
github.com/couchbase/gomemcached v0.3.1 // indirect
|
||||
github.com/couchbase/goutils v0.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||
@@ -215,7 +249,7 @@ require (
|
||||
github.com/emersion/go-sasl v0.0.0-20231106173351-e73c9f7bad43 // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ap/errors v0.0.0-20240304112515-6077fa9c17b0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-enry/go-oniguruma v1.2.1 // indirect
|
||||
@@ -252,7 +286,7 @@ require (
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jessevdk/go-flags v1.5.0 // indirect
|
||||
github.com/jessevdk/go-flags v1.6.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
@@ -278,13 +312,15 @@ require (
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.50.0 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rhysd/actionlint v1.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
@@ -299,7 +335,8 @@ require (
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/spf13/viper v1.18.2 // indirect
|
||||
github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
@@ -315,19 +352,21 @@ require (
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.10 // indirect
|
||||
go.etcd.io/bbolt v1.3.11 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/component-base v0.32.3
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
||||
|
||||
667
go.sum
667
go.sum
repo.diff.file_suppressed
repo.diff.load
44
modules/k8s/Dockerfile.controller-manager
Normal file
44
modules/k8s/Dockerfile.controller-manager
Normal file
@@ -0,0 +1,44 @@
|
||||
FROM golang:1.23 AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# 创建临时目录结构
|
||||
RUN mkdir -p modules/k8s
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# 禁用所有代理
|
||||
ENV HTTP_PROXY=""
|
||||
ENV HTTPS_PROXY=""
|
||||
ENV http_proxy=""
|
||||
ENV https_proxy=""
|
||||
ENV GOPROXY=https://goproxy.cn,direct
|
||||
|
||||
# 下载依赖
|
||||
RUN go mod download
|
||||
|
||||
# Copy the Go source code
|
||||
COPY modules/k8s/ modules/k8s/
|
||||
|
||||
# Build the controller-manager binary
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
|
||||
|
||||
# Build a small image
|
||||
FROM alpine:3.18
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# 创建非 root 用户
|
||||
RUN addgroup -g 65532 nonroot && \
|
||||
adduser -u 65532 -G nonroot -D nonroot
|
||||
|
||||
COPY --from=builder /workspace/modules/k8s/controller/ modules/k8s/controller/
|
||||
COPY --from=builder /workspace/controller-manager .
|
||||
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/controller-manager"]
|
||||
|
||||
# $ docker build -t devstar-controller-manager:latest -f modules/k8s/Dockerfile.controller-manager .
|
||||
18
modules/k8s/README.md
Normal file
18
modules/k8s/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# DevStar Controller Manager
|
||||
|
||||
本目录包含 DevStar Controller Manager 的源代码和构建所需的 Dockerfile。Controller Manager 负责管理 Kubernetes 中的 DevContainer 自定义资源。
|
||||
|
||||
## 构建 Docker 镜像
|
||||
|
||||
### 构建方法
|
||||
|
||||
由于项目结构原因,构建 Docker 镜像必须从项目根目录执行:
|
||||
|
||||
```bash
|
||||
# 切换到项目根目录make docker 或者 使用如下命令单独构建devstar-controller-manager镜像
|
||||
docker build -t devstar-controller-manager:latest -f modules/k8s/Dockerfile.controller-manager .
|
||||
|
||||
# 合并代码时由CI脚本负责构建和推送镜像devstar.cn/devstar/devstar-controller-manager:latest
|
||||
```
|
||||
|
||||
此镜像由devstar的helm chart的子chart devstar-controller-manager使用,若要使用新的镜像请修改helm chart中的values.yaml
|
||||
@@ -24,6 +24,39 @@ import (
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// ExtraPortSpec 定义额外端口配置
|
||||
type ExtraPortSpec struct {
|
||||
// Name 是端口的名称
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// ContainerPort 是容器内的端口号
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
|
||||
// ServicePort 是服务暴露的端口号
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
}
|
||||
|
||||
// ExtraPortAssigned 定义已分配的额外端口信息
|
||||
type ExtraPortAssigned struct {
|
||||
// Name 是端口的名称
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// ContainerPort 是容器内的端口号
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
|
||||
// ServicePort 是服务暴露的端口号
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
|
||||
// NodePort 是 Kubernetes 分配的 NodePort
|
||||
NodePort uint16 `json:"nodePort"`
|
||||
}
|
||||
|
||||
// DevcontainerAppSpec defines the desired state of DevcontainerApp
|
||||
type DevcontainerAppSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
@@ -86,6 +119,10 @@ type ServiceSpec struct {
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +optional
|
||||
ServicePort uint16 `json:"servicePort,omitempty"`
|
||||
|
||||
// ExtraPorts 定义额外的端口配置
|
||||
// +optional
|
||||
ExtraPorts []ExtraPortSpec `json:"extraPorts,omitempty"`
|
||||
}
|
||||
|
||||
// DevcontainerAppStatus defines the observed state of DevcontainerApp
|
||||
@@ -105,6 +142,10 @@ type DevcontainerAppStatus struct {
|
||||
// +optional
|
||||
NodePortAssigned uint16 `json:"nodePortAssigned"`
|
||||
|
||||
// ExtraPortsAssigned 存储额外端口映射的 NodePort
|
||||
// +optional
|
||||
ExtraPortsAssigned []ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
|
||||
|
||||
// Ready 标识 DevcontainerApp 管理的 Pod 的 Readiness Probe 是否达到就绪状态
|
||||
// +optional
|
||||
Ready bool `json:"ready"`
|
||||
@@ -130,3 +171,7 @@ type DevcontainerAppList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []DevcontainerApp `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&DevcontainerApp{}, &DevcontainerAppList{})
|
||||
}
|
||||
|
||||
@@ -21,9 +21,16 @@ package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "devcontainer.devstar.cn", Version: "v1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
@@ -88,7 +88,7 @@ func (in *DevcontainerAppList) DeepCopyObject() runtime.Object {
|
||||
func (in *DevcontainerAppSpec) DeepCopyInto(out *DevcontainerAppSpec) {
|
||||
*out = *in
|
||||
in.StatefulSet.DeepCopyInto(&out.StatefulSet)
|
||||
out.Service = in.Service
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
if in.StartingDeadlineSeconds != nil {
|
||||
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
|
||||
*out = new(int64)
|
||||
@@ -133,6 +133,11 @@ func (in *DevcontainerAppStatus) DeepCopyInto(out *DevcontainerAppStatus) {
|
||||
in, out := &in.LastScheduleTime, &out.LastScheduleTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.ExtraPortsAssigned != nil {
|
||||
in, out := &in.ExtraPortsAssigned, &out.ExtraPortsAssigned
|
||||
*out = make([]ExtraPortAssigned, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerAppStatus.
|
||||
@@ -145,9 +150,44 @@ func (in *DevcontainerAppStatus) DeepCopy() *DevcontainerAppStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtraPortAssigned) DeepCopyInto(out *ExtraPortAssigned) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortAssigned.
|
||||
func (in *ExtraPortAssigned) DeepCopy() *ExtraPortAssigned {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtraPortAssigned)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtraPortSpec) DeepCopyInto(out *ExtraPortSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortSpec.
|
||||
func (in *ExtraPortSpec) DeepCopy() *ExtraPortSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtraPortSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
||||
*out = *in
|
||||
if in.ExtraPorts != nil {
|
||||
in, out := &in.ExtraPorts, &out.ExtraPorts
|
||||
*out = make([]ExtraPortSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
|
||||
|
||||
51
modules/k8s/client/client.go
Normal file
51
modules/k8s/client/client.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
appsv1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
)
|
||||
|
||||
// DevStarClient 提供操作 DevContainerApp 资源的方法
|
||||
type DevStarClient struct {
|
||||
client client.Client
|
||||
}
|
||||
|
||||
// NewDevStarClient 创建一个新的客户端
|
||||
func NewDevStarClient(c client.Client) *DevStarClient {
|
||||
return &DevStarClient{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDevContainerApp 获取 DevContainerApp 资源
|
||||
func (c *DevStarClient) GetDevContainerApp(ctx context.Context, name, namespace string) (*appsv1.DevcontainerApp, error) {
|
||||
app := &appsv1.DevcontainerApp{}
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, app)
|
||||
return app, err
|
||||
}
|
||||
|
||||
// CreateDevContainerApp 创建 DevContainerApp 资源
|
||||
func (c *DevStarClient) CreateDevContainerApp(ctx context.Context, app *appsv1.DevcontainerApp) error {
|
||||
return c.client.Create(ctx, app)
|
||||
}
|
||||
|
||||
// UpdateDevContainerApp 更新 DevContainerApp 资源
|
||||
func (c *DevStarClient) UpdateDevContainerApp(ctx context.Context, app *appsv1.DevcontainerApp) error {
|
||||
return c.client.Update(ctx, app)
|
||||
}
|
||||
|
||||
// DeleteDevContainerApp 删除 DevContainerApp 资源
|
||||
func (c *DevStarClient) DeleteDevContainerApp(ctx context.Context, app *appsv1.DevcontainerApp) error {
|
||||
return c.client.Delete(ctx, app)
|
||||
}
|
||||
|
||||
// ListDevContainerApps 列出 DevContainerApp 资源
|
||||
func (c *DevStarClient) ListDevContainerApps(ctx context.Context, namespace string) (*appsv1.DevcontainerAppList, error) {
|
||||
list := &appsv1.DevcontainerAppList{}
|
||||
err := c.client.List(ctx, list, client.InNamespace(namespace))
|
||||
return list, err
|
||||
}
|
||||
256
modules/k8s/cmd/controller-manager/app/options/options.go
Normal file
256
modules/k8s/cmd/controller-manager/app/options/options.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package options
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/controller"
|
||||
)
|
||||
|
||||
type ControllerManagerOptions struct {
|
||||
KubeConfig string
|
||||
Master string
|
||||
MetricsAddr string
|
||||
HealthProbeAddr string
|
||||
|
||||
LeaderElect bool
|
||||
LeaderElection *leaderelection.LeaderElectionConfig
|
||||
|
||||
WebhookCertDir string
|
||||
SecureMetrics bool
|
||||
EnableHTTP2 bool
|
||||
|
||||
// ControllerGates is the list of controller gates to enable or disable controller.
|
||||
// '*' means "all enabled by default controllers"
|
||||
// 'foo' means "enable 'foo'"
|
||||
// '-foo' means "disable 'foo'"
|
||||
// first item for a particular name wins.
|
||||
ControllerGates []string
|
||||
|
||||
DebugMode bool
|
||||
}
|
||||
|
||||
func NewControllerManagerOptions() *ControllerManagerOptions {
|
||||
return &ControllerManagerOptions{
|
||||
KubeConfig: "",
|
||||
Master: "",
|
||||
MetricsAddr: ":8080",
|
||||
HealthProbeAddr: ":8081",
|
||||
LeaderElect: false,
|
||||
LeaderElection: &leaderelection.LeaderElectionConfig{
|
||||
LeaseDuration: 30 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
},
|
||||
WebhookCertDir: "",
|
||||
SecureMetrics: true,
|
||||
EnableHTTP2: false,
|
||||
ControllerGates: []string{"*"},
|
||||
DebugMode: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Flags 返回一组命名的命令行标志集合
|
||||
func (s *ControllerManagerOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Kubernetes 相关选项
|
||||
fs := fss.FlagSet("kubernetes")
|
||||
fs.StringVar(&s.KubeConfig, "kubeconfig", s.KubeConfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server.")
|
||||
|
||||
// 指标和健康检查
|
||||
fs = fss.FlagSet("metrics")
|
||||
fs.StringVar(&s.MetricsAddr, "metrics-bind-address", s.MetricsAddr, "The address the metric endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or 0 to disable.")
|
||||
fs.StringVar(&s.HealthProbeAddr, "health-probe-bind-address", s.HealthProbeAddr, "The address the probe endpoint binds to.")
|
||||
fs.BoolVar(&s.SecureMetrics, "metrics-secure", s.SecureMetrics, "If set, metrics endpoint is served securely via HTTPS.")
|
||||
|
||||
// Leader 选举相关选项
|
||||
fs = fss.FlagSet("leaderelection")
|
||||
fs.BoolVar(&s.LeaderElect, "leader-elect", s.LeaderElect, "Whether to enable leader election. This field should be enabled when controller manager deployed with multiple replicas.")
|
||||
s.bindLeaderElectionFlags(s.LeaderElection, fs)
|
||||
|
||||
// Webhook 相关选项
|
||||
fs = fss.FlagSet("webhook")
|
||||
fs.StringVar(&s.WebhookCertDir, "webhook-cert-dir", s.WebhookCertDir, "Certificate directory used to setup webhooks, need tls.crt and tls.key placed inside. If not set, webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs")
|
||||
fs.BoolVar(&s.EnableHTTP2, "enable-http2", s.EnableHTTP2, "If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
|
||||
// 一般选项
|
||||
fs = fss.FlagSet("generic")
|
||||
fs.StringSliceVar(&s.ControllerGates, "controllers", s.ControllerGates, fmt.Sprintf("A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s",
|
||||
strings.Join(controller.GetAllControllers().List(), ", ")))
|
||||
fs.BoolVar(&s.DebugMode, "debug", s.DebugMode, "Don't enable this if you don't know what it means.")
|
||||
|
||||
// klog 选项
|
||||
kfs := fss.FlagSet("klog")
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
local.VisitAll(func(fl *flag.Flag) {
|
||||
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
|
||||
kfs.AddGoFlag(fl)
|
||||
})
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
// 绑定 Leader 选举相关标志
|
||||
func (s *ControllerManagerOptions) bindLeaderElectionFlags(l *leaderelection.LeaderElectionConfig, fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&l.LeaseDuration, "leader-elect-lease-duration", l.LeaseDuration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
||||
"before it is replaced by another candidate. This is only applicable if leader "+
|
||||
"election is enabled.")
|
||||
fs.DurationVar(&l.RenewDeadline, "leader-elect-renew-deadline", l.RenewDeadline, ""+
|
||||
"The interval between attempts by the acting master to renew a leadership slot "+
|
||||
"before it stops leading. This must be less than or equal to the lease duration. "+
|
||||
"This is only applicable if leader election is enabled.")
|
||||
fs.DurationVar(&l.RetryPeriod, "leader-elect-retry-period", l.RetryPeriod, ""+
|
||||
"The duration the clients should wait between attempting acquisition and renewal "+
|
||||
"of a leadership. This is only applicable if leader election is enabled.")
|
||||
}
|
||||
|
||||
// Validate 验证选项
|
||||
func (s *ControllerManagerOptions) Validate() []error {
|
||||
var errs []error
|
||||
|
||||
// 验证 ControllerGates
|
||||
allControllersNameSet := controller.GetAllControllers()
|
||||
for _, selector := range s.ControllerGates {
|
||||
if selector == "*" {
|
||||
continue
|
||||
}
|
||||
selector = strings.TrimPrefix(selector, "-")
|
||||
if !allControllersNameSet.Has(selector) {
|
||||
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", selector))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// IsControllerEnabled 检查指定的控制器是否启用
|
||||
func (s *ControllerManagerOptions) IsControllerEnabled(name string) bool {
|
||||
allowedAll := false
|
||||
for _, controllerGate := range s.ControllerGates {
|
||||
if controllerGate == name {
|
||||
return true
|
||||
}
|
||||
if controllerGate == "-"+name {
|
||||
return false
|
||||
}
|
||||
if controllerGate == "*" {
|
||||
allowedAll = true
|
||||
}
|
||||
}
|
||||
return allowedAll
|
||||
}
|
||||
|
||||
// NewControllerManager 创建并返回一个新的控制器管理器
|
||||
func (s *ControllerManagerOptions) NewControllerManager() (*controller.Manager, error) {
|
||||
cm := &controller.Manager{}
|
||||
|
||||
// TLS 选项
|
||||
tlsOpts := []func(*tls.Config){}
|
||||
|
||||
// 如果未启用 HTTP/2,则禁用它以防止 HTTP/2 流取消和快速重置 CVE 的漏洞
|
||||
if !s.EnableHTTP2 {
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
klog.V(4).Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
// Webhook 服务器配置
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: s.WebhookCertDir,
|
||||
TLSOpts: tlsOpts,
|
||||
Port: 8443,
|
||||
})
|
||||
|
||||
// 度量服务器配置
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: s.MetricsAddr,
|
||||
SecureServing: s.SecureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if s.SecureMetrics {
|
||||
// 使用身份验证和授权来保护度量端点
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
}
|
||||
|
||||
// 设置控制器管理器选项
|
||||
controllerOpts := ctrl.Options{
|
||||
Scheme: controller.Scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: s.HealthProbeAddr,
|
||||
}
|
||||
|
||||
// 配置 Leader 选举
|
||||
if s.LeaderElect {
|
||||
controllerOpts.LeaderElection = s.LeaderElect
|
||||
controllerOpts.LeaderElectionNamespace = "devstar-system"
|
||||
controllerOpts.LeaderElectionID = "devstar-controller-manager-leader-election"
|
||||
leaseDuration := s.LeaderElection.LeaseDuration
|
||||
renewDeadline := s.LeaderElection.RenewDeadline
|
||||
retryPeriod := s.LeaderElection.RetryPeriod
|
||||
controllerOpts.LeaseDuration = &leaseDuration
|
||||
controllerOpts.RenewDeadline = &renewDeadline
|
||||
controllerOpts.RetryPeriod = &retryPeriod
|
||||
}
|
||||
|
||||
// 创建 controller-runtime 管理器
|
||||
klog.V(0).Info("setting up manager")
|
||||
ctrl.SetLogger(klog.NewKlogr())
|
||||
|
||||
// 获取 Kubernetes 配置
|
||||
var config *rest.Config
|
||||
var err error
|
||||
|
||||
if s.KubeConfig != "" {
|
||||
config, err = clientcmd.BuildConfigFromFlags(s.Master, s.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get kubeconfig: %v", err)
|
||||
}
|
||||
} else {
|
||||
config = ctrl.GetConfigOrDie()
|
||||
}
|
||||
|
||||
// 创建管理器
|
||||
mgr, err := ctrl.NewManager(config, controllerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to set up overall controller manager: %v", err)
|
||||
}
|
||||
|
||||
// 添加健康检查
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
return nil, fmt.Errorf("unable to set up health check: %v", err)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
return nil, fmt.Errorf("unable to set up ready check: %v", err)
|
||||
}
|
||||
|
||||
// 设置控制器管理器
|
||||
cm.Manager = mgr
|
||||
cm.IsControllerEnabled = s.IsControllerEnabled
|
||||
|
||||
return cm, nil
|
||||
}
|
||||
70
modules/k8s/cmd/controller-manager/app/server.go
Normal file
70
modules/k8s/cmd/controller-manager/app/server.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/cmd/controller-manager/app/options"
|
||||
"code.gitea.io/gitea/modules/k8s/controller"
|
||||
"code.gitea.io/gitea/modules/k8s/controller/devcontainer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// 在初始化时注册所有控制器
|
||||
runtime.Must(controller.Register(&devcontainer.Controller{}))
|
||||
|
||||
}
|
||||
|
||||
// NewControllerManagerCommand 创建一个启动 controller manager 的命令
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
s := options.NewControllerManagerOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "controller-manager",
|
||||
Long: `DevStar controller manager is a daemon that embeds the control loops shipped with DevStar.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if errs := s.Validate(); len(errs) != 0 {
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
return Run(signals.SetupSignalHandler(), s)
|
||||
},
|
||||
}
|
||||
|
||||
fs := cmd.Flags()
|
||||
namedFlagSets := s.Flags()
|
||||
for _, f := range namedFlagSets.FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
usageFmt := "Usage:\n %s\n"
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
|
||||
cliflag.PrintSections(cmd.OutOrStderr(), namedFlagSets, cols)
|
||||
return nil
|
||||
})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Run 启动控制器管理器
|
||||
func Run(ctx context.Context, s *options.ControllerManagerOptions) error {
|
||||
klog.InfoS("Starting DevStar controller manager")
|
||||
|
||||
cm, err := s.NewControllerManager()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 启动控制器管理器
|
||||
return cm.Start(ctx)
|
||||
}
|
||||
17
modules/k8s/cmd/controller-manager/controller-manager.go
Normal file
17
modules/k8s/cmd/controller-manager/controller-manager.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/cmd/controller-manager/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := app.NewControllerManagerCommand()
|
||||
if err := cmd.Execute(); err != nil {
|
||||
klog.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,220 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.1
|
||||
name: devcontainerapps.devcontainer.devstar.cn
|
||||
spec:
|
||||
group: devcontainer.devstar.cn
|
||||
names:
|
||||
kind: DevcontainerApp
|
||||
listKind: DevcontainerAppList
|
||||
plural: devcontainerapps
|
||||
singular: devcontainerapp
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: DevcontainerApp is the Schema for the devcontainerapps API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DevcontainerAppSpec defines the desired state of DevcontainerApp
|
||||
properties:
|
||||
failedJobsHistoryLimit:
|
||||
description: |-
|
||||
The number of failed finished jobs to retain.
|
||||
This is a pointer to distinguish between explicit zero and not specified.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
service:
|
||||
description: ServiceSpec specifies Service for DevContainer
|
||||
properties:
|
||||
extraPorts:
|
||||
description: ExtraPorts 定义额外的端口配置
|
||||
items:
|
||||
description: ExtraPortSpec 定义额外端口配置
|
||||
properties:
|
||||
containerPort:
|
||||
description: ContainerPort 是容器内的端口号
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
name:
|
||||
description: Name 是端口的名称
|
||||
type: string
|
||||
servicePort:
|
||||
description: ServicePort 是服务暴露的端口号
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- containerPort
|
||||
- servicePort
|
||||
type: object
|
||||
type: array
|
||||
nodePort:
|
||||
maximum: 32767
|
||||
minimum: 30000
|
||||
type: integer
|
||||
servicePort:
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
startingDeadlineSeconds:
|
||||
description: |-
|
||||
Optional deadline in seconds for starting the job if it misses scheduled
|
||||
time for any reason. Missed jobs executions will be counted as failed ones.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
statefulset:
|
||||
description: StatefulSetSpec specifies StatefulSet for DevContainer
|
||||
properties:
|
||||
command:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
containerPort:
|
||||
minimum: 1
|
||||
type: integer
|
||||
gitRepositoryURL:
|
||||
type: string
|
||||
image:
|
||||
type: string
|
||||
sshPublicKeyList:
|
||||
description: 至少包含一个 SSH Public Key 才能通过校验规则
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
required:
|
||||
- command
|
||||
- gitRepositoryURL
|
||||
- image
|
||||
- sshPublicKeyList
|
||||
type: object
|
||||
successfulJobsHistoryLimit:
|
||||
description: |-
|
||||
The number of successful finished jobs to retain.
|
||||
This is a pointer to distinguish between explicit zero and not specified.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
suspend:
|
||||
description: |-
|
||||
This flag tells the controller to suspend subsequent executions, it does
|
||||
not apply to already started executions. Defaults to false.
|
||||
type: boolean
|
||||
required:
|
||||
- statefulset
|
||||
type: object
|
||||
status:
|
||||
description: DevcontainerAppStatus defines the observed state of DevcontainerApp
|
||||
properties:
|
||||
active:
|
||||
description: A list of pointers to currently running jobs.
|
||||
items:
|
||||
description: ObjectReference contains enough information to let
|
||||
you inspect or modify the referred object.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: |-
|
||||
If referring to a piece of an object instead of an entire object, this string
|
||||
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within a pod, this would take on a value like:
|
||||
"spec.containers{name}" (where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind of the referent.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
namespace:
|
||||
description: |-
|
||||
Namespace of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: |-
|
||||
Specific resourceVersion to which this reference is made, if any.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
|
||||
type: string
|
||||
uid:
|
||||
description: |-
|
||||
UID of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
extraPortsAssigned:
|
||||
description: ExtraPortsAssigned 存储额外端口映射的 NodePort
|
||||
items:
|
||||
description: ExtraPortAssigned 定义已分配的额外端口信息
|
||||
properties:
|
||||
containerPort:
|
||||
description: ContainerPort 是容器内的端口号
|
||||
type: integer
|
||||
name:
|
||||
description: Name 是端口的名称
|
||||
type: string
|
||||
nodePort:
|
||||
description: NodePort 是 Kubernetes 分配的 NodePort
|
||||
type: integer
|
||||
servicePort:
|
||||
description: ServicePort 是服务暴露的端口号
|
||||
type: integer
|
||||
required:
|
||||
- containerPort
|
||||
- nodePort
|
||||
- servicePort
|
||||
type: object
|
||||
type: array
|
||||
lastScheduleTime:
|
||||
description: Information when was the last time the job was successfully
|
||||
scheduled.
|
||||
format: date-time
|
||||
type: string
|
||||
nodePortAssigned:
|
||||
description: NodePortAssigned 存储 DevcontainerApp CRD调度后集群分配的 NodePort
|
||||
type: integer
|
||||
ready:
|
||||
description: Ready 标识 DevcontainerApp 管理的 Pod 的 Readiness Probe 是否达到就绪状态
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
22
modules/k8s/config/devcontainer/crd/kustomization.yaml
Normal file
22
modules/k8s/config/devcontainer/crd/kustomization.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/devcontainer.devstar.cn_devcontainerapps.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patches:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
#- path: patches/cainjection_in_devcontainerapps.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment the following section
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
|
||||
#configurations:
|
||||
#- kustomizeconfig.yaml
|
||||
19
modules/k8s/config/devcontainer/crd/kustomizeconfig.yaml
Normal file
19
modules/k8s/config/devcontainer/crd/kustomizeconfig.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
||||
151
modules/k8s/config/devcontainer/default/kustomization.yaml
Normal file
151
modules/k8s/config/devcontainer/default/kustomization.yaml
Normal file
@@ -0,0 +1,151 @@
|
||||
# Adds namespace to all resources.
|
||||
namespace: devcontainer-operator-system
|
||||
|
||||
# Value of this field is prepended to the
|
||||
# names of all resources, e.g. a deployment named
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
namePrefix: devcontainer-operator-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#labels:
|
||||
#- includeSelectors: true
|
||||
# pairs:
|
||||
# someName: someValue
|
||||
|
||||
resources:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- ../webhook
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||
#- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
# [METRICS] Expose the controller manager metrics service.
|
||||
- metrics_service.yaml
|
||||
# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
|
||||
# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
|
||||
# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
|
||||
# be able to communicate with the Webhook Server.
|
||||
#- ../network-policy
|
||||
|
||||
# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager
|
||||
patches:
|
||||
# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics
|
||||
- path: manager_metrics_patch.yaml
|
||||
target:
|
||||
kind: Deployment
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- path: manager_webhook_patch.yaml
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
|
||||
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
|
||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||
#- path: webhookcainjection_patch.yaml
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
# Uncomment the following replacements to add the cert-manager CA injection annotations
|
||||
#replacements:
|
||||
# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.namespace # namespace of the certificate CR
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - select:
|
||||
# kind: CustomResourceDefinition
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
# fieldPath: .metadata.name
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: ValidatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
# - select:
|
||||
# kind: MutatingWebhookConfiguration
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
# - select:
|
||||
# kind: CustomResourceDefinition
|
||||
# fieldPaths:
|
||||
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
|
||||
# options:
|
||||
# delimiter: '/'
|
||||
# index: 1
|
||||
# create: true
|
||||
# - source: # Add cert-manager annotation to the webhook Service
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.name # namespace of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 0
|
||||
# create: true
|
||||
# - source:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldPath: .metadata.namespace # namespace of the service
|
||||
# targets:
|
||||
# - select:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# fieldPaths:
|
||||
# - .spec.dnsNames.0
|
||||
# - .spec.dnsNames.1
|
||||
# options:
|
||||
# delimiter: '.'
|
||||
# index: 1
|
||||
# create: true
|
||||
@@ -0,0 +1,4 @@
|
||||
# This patch adds the args to allow exposing the metrics endpoint using HTTPS
|
||||
- op: add
|
||||
path: /spec/template/spec/containers/0/args/0
|
||||
value: --metrics-bind-address=:8443
|
||||
17
modules/k8s/config/devcontainer/default/metrics_service.yaml
Normal file
17
modules/k8s/config/devcontainer/default/metrics_service.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: 8443
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
@@ -0,0 +1,8 @@
|
||||
resources:
|
||||
- manager.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: devstar.cn/devstar/devcontainer-operator
|
||||
newTag: build-f42c51cbef59584977e74f4fa100e350b8ca3c9d
|
||||
95
modules/k8s/config/devcontainer/manager/manager.yaml
Normal file
95
modules/k8s/config/devcontainer/manager/manager.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: manager
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||
# according to the platforms which are supported by your solution.
|
||||
# It is considered best practice to support multiple architectures. You can
|
||||
# build your manager image using the makefile target docker-buildx.
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/arch
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
# - arm64
|
||||
# - ppc64le
|
||||
# - s390x
|
||||
# - key: kubernetes.io/os
|
||||
# operator: In
|
||||
# values:
|
||||
# - linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
# TODO(user): For common cases that do not require escalating privileges
|
||||
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
||||
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
||||
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
||||
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
containers:
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --leader-elect
|
||||
- --health-probe-bind-address=:8081
|
||||
image: controller:latest
|
||||
name: manager
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
serviceAccountName: controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
||||
@@ -0,0 +1,26 @@
|
||||
# This NetworkPolicy allows ingress traffic
|
||||
# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
|
||||
# namespaces are able to gathering data from the metrics endpoint.
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: allow-metrics-traffic
|
||||
namespace: system
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
# This allows ingress traffic from any namespace with the label metrics: enabled
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
metrics: enabled # Only from namespaces with this label
|
||||
ports:
|
||||
- port: 8443
|
||||
protocol: TCP
|
||||
@@ -0,0 +1,2 @@
|
||||
resources:
|
||||
- allow-metrics-traffic.yaml
|
||||
@@ -0,0 +1,2 @@
|
||||
resources:
|
||||
- monitor.yaml
|
||||
30
modules/k8s/config/devcontainer/prometheus/monitor.yaml
Normal file
30
modules/k8s/config/devcontainer/prometheus/monitor.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager-metrics-monitor
|
||||
namespace: system
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: https # Ensure this is the name of the port that exposes HTTPS metrics
|
||||
scheme: https
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
# TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
|
||||
# certificate verification. This poses a significant security risk by making the system vulnerable to
|
||||
# man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between
|
||||
# Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data,
|
||||
# compromising the integrity and confidentiality of the information.
|
||||
# Please use the following options for secure configurations:
|
||||
# caFile: /etc/metrics-certs/ca.crt
|
||||
# certFile: /etc/metrics-certs/tls.crt
|
||||
# keyFile: /etc/metrics-certs/tls.key
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
@@ -0,0 +1,27 @@
|
||||
# permissions for end users to edit devcontainerapps.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: devcontainerapp-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -0,0 +1,23 @@
|
||||
# permissions for end users to view devcontainerapps.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: devcontainerapp-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps/status
|
||||
verbs:
|
||||
- get
|
||||
27
modules/k8s/config/devcontainer/rbac/kustomization.yaml
Normal file
27
modules/k8s/config/devcontainer/rbac/kustomization.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
resources:
|
||||
# All RBAC will be applied under this service account in
|
||||
# the deployment namespace. You may comment out this resource
|
||||
# if your manager will use a service account that exists at
|
||||
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||
# subjects if changing service account names.
|
||||
- service_account.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# The following RBAC configurations are used to protect
|
||||
# the metrics endpoint with authn/authz. These configurations
|
||||
# ensure that only authorized users and service accounts
|
||||
# can access the metrics endpoint. Comment the following
|
||||
# permissions if you want to disable this protection.
|
||||
# More info: https://book.kubebuilder.io/reference/metrics.html
|
||||
- metrics_auth_role.yaml
|
||||
- metrics_auth_role_binding.yaml
|
||||
- metrics_reader_role.yaml
|
||||
# For each CRD, "Editor" and "Viewer" roles are scaffolded by
|
||||
# default, aiding admins in cluster management. Those roles are
|
||||
# not used by the Project itself. You can comment the following lines
|
||||
# if you do not want those helpers be installed with your Project.
|
||||
- devcontainerapp_editor_role.yaml
|
||||
- devcontainerapp_viewer_role.yaml
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
@@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
17
modules/k8s/config/devcontainer/rbac/metrics_auth_role.yaml
Normal file
17
modules/k8s/config/devcontainer/rbac/metrics_auth_role.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-auth-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-auth-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metrics-auth-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
||||
52
modules/k8s/config/devcontainer/rbac/role.yaml
Normal file
52
modules/k8s/config/devcontainer/rbac/role.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- devcontainer.devstar.cn
|
||||
resources:
|
||||
- devcontainerapps/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
15
modules/k8s/config/devcontainer/rbac/role_binding.yaml
Normal file
15
modules/k8s/config/devcontainer/rbac/role_binding.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: manager-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
@@ -0,0 +1,38 @@
|
||||
apiVersion: devcontainer.devstar.cn/v1
|
||||
kind: DevcontainerApp
|
||||
metadata:
|
||||
name: studio-test
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/name: devcontainer-operator
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
spec:
|
||||
statefulset:
|
||||
image: devstar.cn/public/base-ssh-devcontainer:ubuntu-20.04-20241014
|
||||
gitRepositoryURL: https://gitee.com/panshuxiao/test-devcontainer.git
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- service ssh start && while true; do sleep 60; done
|
||||
containerPort: 22
|
||||
sshPublicKeyList:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOmlOiVc18CjXKmVxDwSEqQ8fA2ikZ3p8NqdGV1Gw2cQ panshuxiao@mail.ustc.edu.cn
|
||||
service:
|
||||
servicePort: 22
|
||||
# nodePort: 30000 # 建议动态分配,不建议写入固定 NodePort 值
|
||||
|
||||
|
||||
######################################################################################################################################
|
||||
# 后记:SSH连接方式
|
||||
|
||||
# ```bash
|
||||
# >>>>> minikube ip
|
||||
# # 192.168.49.2
|
||||
#
|
||||
# >>>>> minikube service list
|
||||
# # |-------------------------|----------------------------------------------------------|--------------|---------------------------|
|
||||
# # | NAMESPACE | NAME | TARGET PORT | URL |
|
||||
# # |-------------------------|----------------------------------------------------------|--------------|---------------------------|
|
||||
# # | devstar-devcontainer-ns | daimingchen-devstar-beef092a69c011ef9c00000c2952a362-svc | ssh-port/22 | http://192.168.49.2:32598 |
|
||||
#
|
||||
# >>>>> ssh -p 32598 username@192.168.49.2
|
||||
@@ -0,0 +1,4 @@
|
||||
## Append samples of your project ##
|
||||
resources:
|
||||
- devcontainer_v1_devcontainerapp.yaml
|
||||
# +kubebuilder:scaffold:manifestskustomizesamples
|
||||
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
)
|
||||
|
||||
// Controller 实现 controller.Controller 接口
|
||||
type Controller struct{}
|
||||
|
||||
// Name 返回控制器名称
|
||||
func (c *Controller) Name() string {
|
||||
return "devcontainer"
|
||||
}
|
||||
|
||||
// Init 初始化控制器
|
||||
func (c *Controller) Init(mgr manager.Manager) error {
|
||||
// 添加 API 到 scheme
|
||||
klog.InfoS("Adding DevContainer API to scheme")
|
||||
if err := devcontainerv1.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
return fmt.Errorf("unable to add DevContainer API to scheme: %w", err)
|
||||
}
|
||||
|
||||
// 创建 DevContainer reconciler
|
||||
klog.InfoS("Creating DevContainer reconciler")
|
||||
reconciler := &DevcontainerAppReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
// 设置 reconciler 与 manager
|
||||
klog.InfoS("Setting up DevContainer with manager")
|
||||
if err := reconciler.SetupWithManager(mgr); err != nil {
|
||||
return fmt.Errorf("failed to setup DevContainer controller: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
devcontainer_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
devcontainer_controller_utils "code.gitea.io/gitea/modules/k8s/controller/devcontainer/utils"
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
k8s_sigs_controller_runtime_utils "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// DevcontainerAppReconciler reconciles a DevcontainerApp object
|
||||
type DevcontainerAppReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=create;delete;get;list;watch
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// Modify the Reconcile function to compare the state specified by
|
||||
// the DevcontainerApp object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
|
||||
|
||||
func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
var err error
|
||||
|
||||
// 1. 读取缓存中的 DevcontainerApp
|
||||
app := &devcontainer_v1.DevcontainerApp{}
|
||||
err = r.Get(ctx, req.NamespacedName, app)
|
||||
if err != nil {
|
||||
// 当 CRD 资源 "DevcontainerApp" 被删除后,直接返回空结果,跳过剩下步骤
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// 检查停止容器的注解
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas == "0" {
|
||||
logger.Info("DevContainer stop requested via annotation", "name", app.Name)
|
||||
|
||||
// 获取当前的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err == nil {
|
||||
// 设置副本数为0
|
||||
replicas := int32(0)
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to scale down StatefulSet replicas to 0")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet scaled down to 0 replicas due to stop request")
|
||||
|
||||
// 标记容器为未就绪
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 继续处理其他逻辑(如更新 Service)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 根据 DevcontainerApp 配置信息进行处理
|
||||
// 2.1 StatefulSet 处理
|
||||
statefulSet := devcontainer_controller_utils.NewStatefulSet(app)
|
||||
err = k8s_sigs_controller_runtime_utils.SetControllerReference(app, statefulSet, r.Scheme)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 2.2 查找 集群中同名称的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, statefulSet)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create StatefulSet")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// 处理重启注解
|
||||
if restartedAt, exists := app.Annotations["devstar.io/restartedAt"]; exists {
|
||||
// 检查注解是否已经应用到StatefulSet
|
||||
needsRestart := true
|
||||
|
||||
if statefulSetInNamespace.Spec.Template.Annotations != nil {
|
||||
if currentRestartTime, exists := statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"]; exists && currentRestartTime == restartedAt {
|
||||
needsRestart = false
|
||||
}
|
||||
} else {
|
||||
statefulSetInNamespace.Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if needsRestart {
|
||||
logger.Info("DevContainer restart requested", "name", app.Name, "time", restartedAt)
|
||||
|
||||
// 将重启注解传递到 Pod 模板以触发滚动更新
|
||||
statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"] = restartedAt
|
||||
|
||||
// 确保副本数至少为1(防止之前被停止)
|
||||
replicas := int32(1)
|
||||
if statefulSetInNamespace.Spec.Replicas != nil && *statefulSetInNamespace.Spec.Replicas > 0 {
|
||||
replicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to update StatefulSet for restart")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet restarted successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// 若 StatefulSet.Status.readyReplicas 变化,则更新 DevcontainerApp.Status.Ready 域
|
||||
if statefulSetInNamespace.Status.ReadyReplicas > 0 {
|
||||
app.Status.Ready = true
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is READY", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
} else if app.Status.Ready {
|
||||
// 只有当目前状态为Ready但实际不再Ready时才更新
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to un-mark DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is NOT ready", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
}
|
||||
|
||||
// 修复方法:加上判断条件,避免循环触发更新
|
||||
needsUpdate := false
|
||||
|
||||
// 检查镜像是否变更
|
||||
if app.Spec.StatefulSet.Image != statefulSetInNamespace.Spec.Template.Spec.Containers[0].Image {
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
// 检查副本数 - 如果指定了 desiredReplicas 注解但不为 0(停止已在前面处理)
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas != "0" {
|
||||
replicas, err := strconv.ParseInt(desiredReplicas, 10, 32)
|
||||
if err == nil {
|
||||
currentReplicas := int32(1) // 默认值
|
||||
if statefulSetInNamespace.Spec.Replicas != nil {
|
||||
currentReplicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
|
||||
if currentReplicas != int32(replicas) {
|
||||
r32 := int32(replicas)
|
||||
statefulSet.Spec.Replicas = &r32
|
||||
needsUpdate = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
if err := r.Update(ctx, statefulSet); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet updated", "name", statefulSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.3 Service 处理
|
||||
service := devcontainer_controller_utils.NewService(app)
|
||||
if err := k8s_sigs_controller_runtime_utils.SetControllerReference(app, service, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
serviceInCluster := &core_v1.Service{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, serviceInCluster)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, service)
|
||||
if err == nil {
|
||||
// 创建 NodePort Service 成功只执行一次 ==> 将NodePort 端口分配信息更新到 app.Status
|
||||
logger.Info("[DevStar][DevContainer] NodePort Assigned", "nodePortAssigned", service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 设置主 SSH 端口的 NodePort
|
||||
app.Status.NodePortAssigned = uint16(service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 处理额外端口
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 处理额外端口,从第二个端口开始(索引为1)
|
||||
// 因为第一个端口(索引为0)是 SSH 端口
|
||||
for i := 1; i < len(service.Spec.Ports); i++ {
|
||||
port := service.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Extra Port NodePort Assigned",
|
||||
"name", port.Name,
|
||||
"servicePort", port.Port,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新 CRD 状态,包括额外端口
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts of DevcontainerApp",
|
||||
"nodePortAssigned", service.Spec.Ports[0].NodePort,
|
||||
"extraPortsCount", len(extraPortsAssigned))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else if !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create DevcontainerApp NodePort Service", "nodePortServiceName", service.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// Service 已存在,检查它的端口信息
|
||||
// 检查是否需要更新状态
|
||||
needStatusUpdate := false
|
||||
|
||||
// 如果主端口未记录,记录之
|
||||
if app.Status.NodePortAssigned == 0 && len(serviceInCluster.Spec.Ports) > 0 {
|
||||
app.Status.NodePortAssigned = uint16(serviceInCluster.Spec.Ports[0].NodePort)
|
||||
needStatusUpdate = true
|
||||
logger.Info("[DevStar][DevContainer] Found existing main NodePort",
|
||||
"nodePort", serviceInCluster.Spec.Ports[0].NodePort)
|
||||
}
|
||||
|
||||
// 处理额外端口
|
||||
if len(serviceInCluster.Spec.Ports) > 1 {
|
||||
// 如果额外端口状态为空,或者数量不匹配
|
||||
if app.Status.ExtraPortsAssigned == nil ||
|
||||
len(app.Status.ExtraPortsAssigned) != len(serviceInCluster.Spec.Ports)-1 {
|
||||
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 从索引 1 开始,跳过主端口
|
||||
for i := 1; i < len(serviceInCluster.Spec.Ports); i++ {
|
||||
port := serviceInCluster.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Found existing extra NodePort",
|
||||
"name", port.Name,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新额外端口状态
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
needStatusUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
// 如果需要更新状态
|
||||
if needStatusUpdate {
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts status for existing service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("[DevStar][DevContainer] Updated NodePorts status for existing service",
|
||||
"mainNodePort", app.Status.NodePortAssigned,
|
||||
"extraPortsCount", len(app.Status.ExtraPortsAssigned))
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *DevcontainerAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&devcontainer_v1.DevcontainerApp{}).
|
||||
Owns(&apps_v1.StatefulSet{}).
|
||||
Owns(&core_v1.Service{}).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("DevcontainerApp Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
devcontainerapp := &devcontainerv1.DevcontainerApp{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind DevcontainerApp")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, devcontainerapp)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &devcontainerv1.DevcontainerApp{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
Spec: devcontainerv1.DevcontainerAppSpec{
|
||||
StatefulSet: devcontainerv1.StatefulSetSpec{
|
||||
// 添加必要的命令
|
||||
Command: []string{"/bin/sh", "-c", "sleep infinity"},
|
||||
// 添加 SSH 公钥列表
|
||||
SSHPublicKeyList: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/test-key"},
|
||||
// 其他需要的字段
|
||||
Image: "busybox:latest",
|
||||
},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &devcontainerv1.DevcontainerApp{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance DevcontainerApp")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &DevcontainerAppReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
96
modules/k8s/controller/devcontainer/suite_test.go
Normal file
96
modules/k8s/controller/devcontainer/suite_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "devcontainer", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = devcontainerv1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}-svc
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
spec:
|
||||
selector:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
port: 22
|
||||
targetPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{ if .Spec.Service.NodePort}}
|
||||
nodePort: {{.Spec.Service.NodePort}}
|
||||
{{ end }}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ServicePort) }}
|
||||
protocol: TCP
|
||||
port: {{ .ServicePort }}
|
||||
targetPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
||||
114
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
114
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
podManagementPolicy: OrderedReady
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
# 安全策略,禁止挂载 ServiceAccount Token
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- name: root-ssh-dir
|
||||
emptyDir: {}
|
||||
initContainers:
|
||||
- name: init-root-ssh-dir
|
||||
image: devstar.cn/public/busybox:27a71e19c956
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- {{range .Spec.StatefulSet.SSHPublicKeyList}} echo "{{.}}" >> /root/.ssh/authorized_keys && {{end}} chmod -R 700 /root/.ssh/ && echo 'SSH Public Key(s) imported.'
|
||||
# 注意,必须递归设置 ~/.ssh/ 目录下权限 700,否则即使配置了 ~/.ssh/authorized_keys 也不会生效
|
||||
volumeMounts:
|
||||
- name: root-ssh-dir
|
||||
mountPath: /root/.ssh
|
||||
- name: init-git-repo-dir
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- if [ ! -d '/data/workspace' ]; then git clone {{.Spec.StatefulSet.GitRepositoryURL}} /data/workspace && echo "Git Repository cloned."; else echo "Folder already exists."; fi
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
containers:
|
||||
- name: {{.ObjectMeta.Name}}
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
command:
|
||||
{{range .Spec.StatefulSet.Command}}
|
||||
- {{.}}
|
||||
{{end}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
# securityContext: TODO: 设置 DevContainer 安全策略
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
containerPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ContainerPort) }}
|
||||
protocol: TCP
|
||||
containerPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
- name: root-ssh-dir
|
||||
mountPath: /root/.ssh
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec ls ~
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec cat /etc/ssh/ssh_host*.pub
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 300m
|
||||
ephemeral-storage: 8Gi
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
ephemeral-storage: 50Mi
|
||||
memory: 128Mi
|
||||
volumeClaimTemplates:
|
||||
- apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-devcontainer
|
||||
spec:
|
||||
storageClassName: openebs-hostpath
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
93
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
93
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"text/template"
|
||||
|
||||
devcontainer_apps_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
app_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
yaml_util "k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
// const (
|
||||
// TemplatePath = "modules/k8s/controller/devcontainer/templates/"
|
||||
// )
|
||||
|
||||
// parseTemplate 解析 Go Template 模板文件
|
||||
func parseTemplate(templateName string, app *devcontainer_apps_v1.DevcontainerApp) []byte {
|
||||
// 获取当前代码文件的绝对路径
|
||||
_, filename, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
panic("无法获取当前文件路径")
|
||||
}
|
||||
|
||||
// 通过当前代码文件的位置计算模板文件的位置
|
||||
// utils 目录
|
||||
utilsDir := filepath.Dir(filename)
|
||||
// controller/devcontainer 目录
|
||||
controllerDir := filepath.Dir(utilsDir)
|
||||
// templates 目录
|
||||
templatesDir := filepath.Join(controllerDir, "templates")
|
||||
// 完整模板文件路径
|
||||
templatePath := filepath.Join(templatesDir, templateName+".yaml")
|
||||
|
||||
// 打印调试信息
|
||||
fmt.Printf("当前代码文件: %s\n", filename)
|
||||
fmt.Printf("模板目录: %s\n", templatesDir)
|
||||
fmt.Printf("使用模板文件: %s\n", templatePath)
|
||||
|
||||
// 检查模板文件是否存在
|
||||
if _, err := os.Stat(templatePath); os.IsNotExist(err) {
|
||||
panic(fmt.Errorf("模板文件不存在: %s", templatePath))
|
||||
}
|
||||
|
||||
// 解析模板
|
||||
tmpl, err := template.
|
||||
New(filepath.Base(templatePath)).
|
||||
Funcs(template.FuncMap{"default": DefaultFunc}).
|
||||
ParseFiles(templatePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
err = tmpl.Execute(b, app)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// NewStatefulSet 创建 StatefulSet
|
||||
func NewStatefulSet(app *devcontainer_apps_v1.DevcontainerApp) *app_v1.StatefulSet {
|
||||
statefulSet := &app_v1.StatefulSet{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("statefulset", app), statefulSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return statefulSet
|
||||
}
|
||||
|
||||
// NewService 创建 Service
|
||||
func NewService(app *devcontainer_apps_v1.DevcontainerApp) *core_v1.Service {
|
||||
service := &core_v1.Service{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("service", app), service)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
// DefaultFunc 函数用于实现默认值
|
||||
func DefaultFunc(value interface{}, defaultValue interface{}) interface{} {
|
||||
if value == nil || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
105
modules/k8s/controller/manager.go
Normal file
105
modules/k8s/controller/manager.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
var (
|
||||
// Scheme 是所有 API 类型的 scheme
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
// 控制器注册表
|
||||
Controllers = map[string]Controller{}
|
||||
controllerLock sync.Mutex
|
||||
controllerSet = sets.NewString()
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(Scheme))
|
||||
// 这里可以添加自定义资源的 scheme
|
||||
utilruntime.Must(devcontainerv1.AddToScheme(Scheme))
|
||||
}
|
||||
|
||||
// Controller 是控制器接口
|
||||
type Controller interface {
|
||||
// Name 返回控制器名称
|
||||
Name() string
|
||||
// Init 初始化控制器
|
||||
Init(mgr manager.Manager) error
|
||||
}
|
||||
|
||||
// Manager 是控制器管理器
|
||||
type Manager struct {
|
||||
Manager manager.Manager
|
||||
IsControllerEnabled func(name string) bool
|
||||
}
|
||||
|
||||
// Start 启动控制器管理器
|
||||
func (m *Manager) Start(ctx context.Context) error {
|
||||
klog.InfoS("Starting DevStar controller manager")
|
||||
|
||||
// 添加健康检查
|
||||
if err := m.Manager.AddHealthzCheck("health", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Manager.AddReadyzCheck("ready", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up ready check: %w", err)
|
||||
}
|
||||
|
||||
// 初始化所有启用的控制器
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
for name, c := range Controllers {
|
||||
if !m.IsControllerEnabled(name) {
|
||||
klog.InfoS("Controller disabled", "name", name)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.InfoS("Initializing controller", "name", name)
|
||||
if err := c.Init(m.Manager); err != nil {
|
||||
return fmt.Errorf("error initializing controller %q: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 启动管理器
|
||||
klog.InfoS("Starting controllers")
|
||||
return m.Manager.Start(ctx)
|
||||
}
|
||||
|
||||
// Register 注册一个控制器到控制器管理器
|
||||
func Register(c Controller) error {
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
name := c.Name()
|
||||
if _, found := Controllers[name]; found {
|
||||
return fmt.Errorf("controller %q was registered twice", name)
|
||||
}
|
||||
|
||||
Controllers[name] = c
|
||||
controllerSet.Insert(name)
|
||||
klog.InfoS("Registered controller", "name", name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllControllers 返回所有已注册控制器的名称
|
||||
func GetAllControllers() sets.String {
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
return controllerSet.Union(nil)
|
||||
}
|
||||
10
modules/k8s/controller/options/options.go
Normal file
10
modules/k8s/controller/options/options.go
Normal file
@@ -0,0 +1,10 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
// Options 包含所有控制器可能需要的选项
|
||||
type Options struct {
|
||||
// 可以根据实际需求扩展更多选项
|
||||
}
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/services/devcontainer/errors"
|
||||
"code.gitea.io/gitea/services/devstar_cloud_provider"
|
||||
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
|
||||
@@ -213,16 +211,13 @@ func waitUntilDevcontainerReadyWithTimeout(ctx *context.Context, client dynamic_
|
||||
}
|
||||
}
|
||||
|
||||
// CreateDevcontainer 创建开发容器
|
||||
// 修改 CreateDevcontainer 函数
|
||||
func CreateDevcontainer(ctx *context.Context, client dynamic_client.Interface, opts *CreateDevcontainerOptions) (*k8s_api_v1.DevcontainerApp, error) {
|
||||
// 记录日志
|
||||
log.Info("Creating DevContainer with options: name=%s, namespace=%s, image=%s",
|
||||
opts.Name, opts.Namespace, opts.Image)
|
||||
|
||||
if ctx == nil || opts == nil {
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrIllegalDevcontainerParameters{
|
||||
FieldList: []string{"ctx", "opts"},
|
||||
Message: "cannot be nil",
|
||||
}
|
||||
}
|
||||
|
||||
// 创建资源定义
|
||||
devcontainerApp := &k8s_api_v1.DevcontainerApp{
|
||||
TypeMeta: apimachinery_apis_v1.TypeMeta{
|
||||
Kind: "DevcontainerApp",
|
||||
@@ -231,6 +226,10 @@ func CreateDevcontainer(ctx *context.Context, client dynamic_client.Interface, o
|
||||
ObjectMeta: apimachinery_apis_v1.ObjectMeta{
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "devcontainer-operator",
|
||||
"app.kubernetes.io/managed-by": "kustomize",
|
||||
},
|
||||
},
|
||||
Spec: k8s_api_v1.DevcontainerAppSpec{
|
||||
StatefulSet: k8s_api_v1.StatefulSetSpec{
|
||||
@@ -240,108 +239,74 @@ func CreateDevcontainer(ctx *context.Context, client dynamic_client.Interface, o
|
||||
SSHPublicKeyList: opts.SSHPublicKeyList,
|
||||
GitRepositoryURL: opts.GitRepositoryURL,
|
||||
},
|
||||
Service: k8s_api_v1.ServiceSpec{
|
||||
ServicePort: opts.ServicePort,
|
||||
ExtraPorts: opts.ExtraPorts, // 添加 ExtraPorts 配置
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// 转换为 JSON
|
||||
jsonData, err := json.Marshal(devcontainerApp)
|
||||
if err != nil {
|
||||
log.Error("Failed to marshal DevcontainerApp to JSON: %v", err)
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrOperateDevcontainer{
|
||||
Action: "Marshal JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 输出 JSON 以便调试
|
||||
log.Debug("Generated JSON for DevcontainerApp:\n%s", string(jsonData))
|
||||
|
||||
// 转换为 Unstructured 对象
|
||||
unstructuredObj := &apimachinery_apis_v1_unstructured.Unstructured{}
|
||||
_, _, err = apimachinery_apis_v1_unstructured.UnstructuredJSONScheme.Decode(jsonData, &groupVersionKind, unstructuredObj)
|
||||
err = unstructuredObj.UnmarshalJSON(jsonData)
|
||||
if err != nil {
|
||||
log.Error("Failed to unmarshal JSON to Unstructured: %v", err)
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrOperateDevcontainer{
|
||||
Action: "build unstructured obj",
|
||||
Action: "Unmarshal JSON to Unstructured",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 创建 DevContainer Status.nodePortAssigned 信息
|
||||
_, err = client.Resource(groupVersionResource).Namespace(opts.Namespace).Create(*ctx, unstructuredObj, opts.CreateOptions)
|
||||
// 确认 GroupVersionResource 定义
|
||||
log.Debug("Using GroupVersionResource: Group=%s, Version=%s, Resource=%s",
|
||||
groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
|
||||
// 创建资源
|
||||
log.Info("Creating DevcontainerApp resource in namespace %s", opts.Namespace)
|
||||
result, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Create(*ctx, unstructuredObj, opts.CreateOptions)
|
||||
if err != nil {
|
||||
log.Error("Failed to create DevcontainerApp: %v", err)
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrOperateDevcontainer{
|
||||
Action: "create DevContainer via Dynamic Client",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 注册 watcher 监听 DevContainer Status.nodePortAssigned 信息
|
||||
watcherTimeoutSeconds := int64(3)
|
||||
watcher, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Watch(*ctx, apimachinery_apis_v1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", opts.Name),
|
||||
//Watch: false,
|
||||
TimeoutSeconds: &watcherTimeoutSeconds,
|
||||
Limit: 1,
|
||||
})
|
||||
log.Info("DevcontainerApp resource created successfully")
|
||||
|
||||
// 将结果转换回 DevcontainerApp 结构体
|
||||
resultJSON, err := result.MarshalJSON()
|
||||
if err != nil {
|
||||
log.Error("Failed to marshal result to JSON: %v", err)
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrOperateDevcontainer{
|
||||
Action: "register watcher of DevContainer NodePort",
|
||||
Action: "Marshal result JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
//log.Info(" ===== 开始监听 DevContainer NodePort 分配信息 =====")
|
||||
var nodePortAssigned int64
|
||||
for event := range watcher.ResultChan() {
|
||||
switch event.Type {
|
||||
case apimachinery_watch.Modified:
|
||||
if devcontainerUnstructured, ok := event.Object.(*apimachinery_apis_v1_unstructured.Unstructured); ok {
|
||||
|
||||
statusDevcontainer, ok, err := apimachinery_apis_v1_unstructured.NestedMap(devcontainerUnstructured.Object, "status")
|
||||
if err == nil && ok {
|
||||
nodePortAssigned = statusDevcontainer["nodePortAssigned"].(int64)
|
||||
if 30000 <= nodePortAssigned && nodePortAssigned <= 32767 {
|
||||
devcontainerApp.Status.NodePortAssigned = uint16(nodePortAssigned)
|
||||
//log.Info("DevContainer NodePort Status 更新完成,最新 NodePort = %v", nodePortAssigned)
|
||||
//break
|
||||
// 收到 NodePort Service MODIFIED 消息后,更新 NodePort,直接返回,不再处理后续 Event (否则必须超时3秒才得到 NodePort)
|
||||
natRuleDescription := setting.DEVCONTAINER_CLOUD_NAT_RULE_DESCRIPTION_PREFIX + devcontainerApp.Name
|
||||
privatePort := uint64(nodePortAssigned)
|
||||
publicPort := privatePort
|
||||
err = devstar_cloud_provider.CreateNATRulePort(privatePort, publicPort, natRuleDescription)
|
||||
return devcontainerApp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
createdDevcontainer := &k8s_api_v1.DevcontainerApp{}
|
||||
if err := json.Unmarshal(resultJSON, createdDevcontainer); err != nil {
|
||||
log.Error("Failed to unmarshal result JSON: %v", err)
|
||||
return nil, devcontainer_k8s_agent_modules_errors.ErrOperateDevcontainer{
|
||||
Action: "Unmarshal result JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
//log.Info(" ===== 结束监听 DevContainer NodePort 分配信息 =====")
|
||||
|
||||
/*
|
||||
// 目前需要更新的字段只有 devcontainerInCluster.Status.NodePortAssigned
|
||||
// 如果后续有其他较多的需要更新的字段,可以考虑重新查询,目前,暂时只考虑 直接更新内存缓存对象,然后返回即可
|
||||
// 避免对 k8s API Server 造成访问压力
|
||||
//response, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Get(*ctx, opts.Name, apimachinery_apis_v1.GetOptions{})
|
||||
//jsonData, err = response.MarshalJSON()
|
||||
//devcontainerInCluster := &k8s_api_v1.DevcontainerApp{}
|
||||
//err = json.Unmarshal(jsonData, devcontainerInCluster)
|
||||
//if err != nil {
|
||||
// return nil, devcontainer_errors.ErrOperateDevcontainer{
|
||||
// Action: "parse response result of in-cluster DevContainer",
|
||||
// Message: err.Error(),
|
||||
// }
|
||||
//}
|
||||
*/
|
||||
|
||||
// 如果执行到这里,说明 k8s 集群中 DevcontainerApp 初始化失败,比如执行下列命令查看出错原因如下:
|
||||
// $ kubectl get pod -n devstar-studio-ns test-mockrepo1-6c5369588f8911e-0
|
||||
// NAME READY STATUS RESTARTS AGE
|
||||
// test-mockrepo1-6c5369588f8911e-0 0/1 Init:CrashLoopBackOff 1 (5s ago) 7s
|
||||
// 需要删除刚刚创建的 k8s CRD,然后返回 DevContainer 初始化失败
|
||||
optsDeleteInitFailed := &DeleteDevcontainerOptions{
|
||||
Namespace: setting.Devcontainer.Namespace,
|
||||
Name: devcontainerApp.Name,
|
||||
}
|
||||
_ = DeleteDevcontainer(ctx, client, optsDeleteInitFailed)
|
||||
return nil, errors.ErrOperateDevcontainer{
|
||||
Action: "Initialize DevContainer",
|
||||
Message: fmt.Sprintf("DevContainer %v failed to initialize and is thus purged.", devcontainerApp.Name),
|
||||
}
|
||||
return createdDevcontainer, nil
|
||||
}
|
||||
|
||||
func DeleteDevcontainer(ctx *context.Context, client dynamic_client.Interface, opts *DeleteDevcontainerOptions) error {
|
||||
|
||||
320
modules/k8s/k8s_test.go
Normal file
320
modules/k8s/k8s_test.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package k8s_agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func TestGetKubernetesClient(t *testing.T) {
|
||||
t.Log("====== 开始测试 GetKubernetesClient 功能 ======")
|
||||
|
||||
// 设置测试环境
|
||||
setting.Devcontainer.Enabled = true
|
||||
t.Log("Devcontainer.Enabled 已设置为:", setting.Devcontainer.Enabled)
|
||||
|
||||
// 创建上下文
|
||||
ctx := context.Background()
|
||||
ctxPtr := &ctx
|
||||
|
||||
t.Log("尝试连接Kubernetes集群...")
|
||||
startTime := time.Now()
|
||||
|
||||
// 尝试获取Kubernetes客户端
|
||||
dynamicClient, err := GetKubernetesClient(ctxPtr)
|
||||
|
||||
elapsed := time.Since(startTime).Seconds()
|
||||
t.Logf("连接尝试耗时: %.2f 秒", elapsed)
|
||||
|
||||
// 检查结果
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取Kubernetes动态客户端失败: %v", err)
|
||||
t.Logf("这可能是因为测试环境没有适当的Kubernetes配置或无法访问集群")
|
||||
assert.False(t, setting.Devcontainer.Enabled,
|
||||
"当Kubernetes连接失败时,Devcontainer.Enabled应被设置为false")
|
||||
t.Log("====== 测试完成 - 连接失败 ======")
|
||||
return
|
||||
}
|
||||
|
||||
t.Log("✅ 成功获取Kubernetes动态客户端")
|
||||
assert.NotNil(t, dynamicClient, "Kubernetes动态客户端不应为nil")
|
||||
|
||||
// 使用动态客户端获取节点信息
|
||||
t.Log("使用动态客户端获取集群节点信息...")
|
||||
|
||||
// 定义Node资源的GVR (Group, Version, Resource)
|
||||
nodesGVR := schema.GroupVersionResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "nodes",
|
||||
}
|
||||
|
||||
// 使用动态客户端获取节点列表
|
||||
nodesList, err := dynamicClient.Resource(nodesGVR).List(*ctxPtr, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取节点信息失败: %v", err)
|
||||
} else {
|
||||
// 处理和打印节点信息
|
||||
t.Logf("📊 集群共有 %d 个节点:", len(nodesList.Items))
|
||||
for i, nodeUnstructured := range nodesList.Items {
|
||||
nodeName, _, _ := unstructured.NestedString(nodeUnstructured.Object, "metadata", "name")
|
||||
|
||||
// 尝试获取节点状态
|
||||
var nodeStatus string = "Unknown"
|
||||
conditions, exists, _ := unstructured.NestedSlice(nodeUnstructured.Object, "status", "conditions")
|
||||
if exists {
|
||||
for _, conditionObj := range conditions {
|
||||
condition, ok := conditionObj.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
conditionType, typeExists, _ := unstructured.NestedString(condition, "type")
|
||||
if typeExists && conditionType == "Ready" {
|
||||
status, statusExists, _ := unstructured.NestedString(condition, "status")
|
||||
if statusExists {
|
||||
if status == "True" {
|
||||
nodeStatus = "Ready"
|
||||
} else {
|
||||
nodeStatus = "NotReady"
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 尝试获取节点版本
|
||||
var kubeletVersion string = "Unknown"
|
||||
version, exists, _ := unstructured.NestedString(nodeUnstructured.Object, "status", "nodeInfo", "kubeletVersion")
|
||||
if exists {
|
||||
kubeletVersion = version
|
||||
}
|
||||
|
||||
t.Logf(" 节点 %d: 名称=%s, 状态=%s, 版本=%s",
|
||||
i+1,
|
||||
nodeName,
|
||||
nodeStatus,
|
||||
kubeletVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// 使用动态客户端获取命名空间信息
|
||||
namespacesGVR := schema.GroupVersionResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "namespaces",
|
||||
}
|
||||
|
||||
namespacesList, err := dynamicClient.Resource(namespacesGVR).List(*ctxPtr, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取命名空间信息失败: %v", err)
|
||||
} else {
|
||||
t.Logf("📊 集群共有 %d 个命名空间:", len(namespacesList.Items))
|
||||
for i, nsUnstructured := range namespacesList.Items {
|
||||
nsName, _, _ := unstructured.NestedString(nsUnstructured.Object, "metadata", "name")
|
||||
|
||||
// 尝试获取命名空间状态
|
||||
nsStatus, exists, _ := unstructured.NestedString(nsUnstructured.Object, "status", "phase")
|
||||
if !exists {
|
||||
nsStatus = "Unknown"
|
||||
}
|
||||
|
||||
t.Logf(" 命名空间 %d: %s (状态: %s)",
|
||||
i+1,
|
||||
nsName,
|
||||
nsStatus)
|
||||
}
|
||||
}
|
||||
|
||||
// 测试DevcontainerApp资源列表
|
||||
t.Log("尝试列出指定命名空间的DevcontainerApp资源...")
|
||||
namespaceToTest := "default" // 使用默认命名空间测试
|
||||
opts := &ListDevcontainersOptions{
|
||||
Namespace: namespaceToTest,
|
||||
}
|
||||
devcontainers, listErr := ListDevcontainers(ctxPtr, dynamicClient, opts)
|
||||
|
||||
if listErr != nil {
|
||||
t.Logf("⚠️ 列出DevcontainerApp资源失败: %v", listErr)
|
||||
t.Log("这可能是因为缺少权限或CRD未定义")
|
||||
} else {
|
||||
t.Logf("✅ 成功获取DevcontainerApp列表,命名空间'%s'中有 %d 个DevcontainerApp资源",
|
||||
namespaceToTest,
|
||||
len(devcontainers.Items))
|
||||
|
||||
// 打印每个DevcontainerApp的基本信息
|
||||
for i, devcontainer := range devcontainers.Items {
|
||||
t.Logf(" DevcontainerApp %d:", i+1)
|
||||
t.Logf(" 名称: %s", devcontainer.Name)
|
||||
t.Logf(" 创建时间: %v", devcontainer.CreationTimestamp)
|
||||
t.Logf(" Ready状态: %v", devcontainer.Status.Ready)
|
||||
t.Logf(" NodePort: %d", devcontainer.Status.NodePortAssigned)
|
||||
t.Logf(" 镜像: %s", devcontainer.Spec.StatefulSet.Image)
|
||||
}
|
||||
}
|
||||
|
||||
// 列出所有CRD
|
||||
t.Log("列出集群中的所有CRD...")
|
||||
crdsGVR := schema.GroupVersionResource{
|
||||
Group: "apiextensions.k8s.io",
|
||||
Version: "v1",
|
||||
Resource: "customresourcedefinitions",
|
||||
}
|
||||
|
||||
crdsList, err := dynamicClient.Resource(crdsGVR).List(*ctxPtr, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取CRD列表失败: %v", err)
|
||||
} else {
|
||||
t.Logf("📊 集群中共有 %d 个CRD定义", len(crdsList.Items))
|
||||
|
||||
// 查找DevcontainerApp CRD
|
||||
found := false
|
||||
for _, crdObj := range crdsList.Items {
|
||||
crdName, exists, _ := unstructured.NestedString(crdObj.Object, "metadata", "name")
|
||||
if exists && crdName == "devcontainerapps.devcontainer.devstar.cn" {
|
||||
found = true
|
||||
t.Log("✅ 找到DevcontainerApp CRD定义")
|
||||
|
||||
// 尝试获取CRD版本和组
|
||||
group, _, _ := unstructured.NestedString(crdObj.Object, "spec", "group")
|
||||
version, _, _ := unstructured.NestedString(crdObj.Object, "spec", "versions", "0", "name")
|
||||
scope, _, _ := unstructured.NestedString(crdObj.Object, "spec", "scope")
|
||||
|
||||
t.Logf(" 组: %s", group)
|
||||
t.Logf(" 版本: %s", version)
|
||||
t.Logf(" 作用域: %s", scope)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Log("⚠️ 未找到DevcontainerApp CRD定义,这可能导致后续测试失败")
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("====== 测试完成 - 连接成功 ======")
|
||||
}
|
||||
|
||||
// 模拟创建和删除Devcontainer的集成测试
|
||||
func TestCreateAndDeleteDevcontainer(t *testing.T) {
|
||||
t.Log("====== 开始测试 CreateAndDeleteDevcontainer 功能 ======")
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("在短模式下跳过集成测试")
|
||||
return
|
||||
}
|
||||
|
||||
// 创建上下文
|
||||
ctx := context.Background()
|
||||
ctxPtr := &ctx
|
||||
|
||||
// 获取客户端
|
||||
t.Log("尝试获取Kubernetes客户端...")
|
||||
client, err := GetKubernetesClient(ctxPtr)
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取Kubernetes客户端失败: %v", err)
|
||||
t.Skip("跳过后续测试,因为无法创建Kubernetes客户端")
|
||||
return
|
||||
}
|
||||
t.Log("✅ 成功获取Kubernetes客户端")
|
||||
|
||||
// 创建唯一的测试名称
|
||||
currentTime := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
testName := fmt.Sprintf("studio-test-%d", currentTime)
|
||||
t.Logf("创建测试Devcontainer: %s", testName)
|
||||
|
||||
// 创建测试Devcontainer的配置
|
||||
createOpts := &CreateDevcontainerOptions{
|
||||
Name: testName,
|
||||
Namespace: "default",
|
||||
Image: "devstar.cn/public/base-ssh-devcontainer:ubuntu-20.04-20241014",
|
||||
GitRepositoryURL: "https://gitee.com/panshuxiao/test-devcontainer.git",
|
||||
CommandList: []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"service ssh start && while true; do sleep 60; done",
|
||||
},
|
||||
ContainerPort: 22,
|
||||
ServicePort: 22,
|
||||
SSHPublicKeyList: []string{
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOmlOiVc18CjXKmVxDwSEqQ8fA2ikZ3p8NqdGV1Gw2cQ panshuxiao@mail.ustc.edu.cn",
|
||||
},
|
||||
}
|
||||
|
||||
t.Log("开始创建Devcontainer...")
|
||||
startTime := time.Now()
|
||||
|
||||
// 创建Devcontainer
|
||||
devcontainer, err := CreateDevcontainer(ctxPtr, client, createOpts)
|
||||
|
||||
elapsed := time.Since(startTime).Seconds()
|
||||
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 创建测试Devcontainer失败 (耗时: %.2f秒): %v", elapsed, err)
|
||||
t.Skip("由于创建失败跳过测试的其余部分")
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("✅ 成功创建Devcontainer (耗时: %.2f秒)", elapsed)
|
||||
t.Logf(" 名称: %s", devcontainer.Name)
|
||||
t.Logf(" 命名空间: %s", devcontainer.Namespace)
|
||||
t.Logf(" 镜像: %s", devcontainer.Spec.StatefulSet.Image)
|
||||
|
||||
// 确保测试结束时删除资源
|
||||
defer func() {
|
||||
t.Logf("清理 - 删除测试Devcontainer: %s", testName)
|
||||
deleteOpts := &DeleteDevcontainerOptions{
|
||||
Name: createOpts.Name,
|
||||
Namespace: createOpts.Namespace,
|
||||
}
|
||||
deleteStartTime := time.Now()
|
||||
err := DeleteDevcontainer(ctxPtr, client, deleteOpts)
|
||||
deleteElapsed := time.Since(deleteStartTime).Seconds()
|
||||
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 清理测试Devcontainer失败 (耗时: %.2f秒): %v", deleteElapsed, err)
|
||||
} else {
|
||||
t.Logf("✅ 成功删除测试Devcontainer (耗时: %.2f秒)", deleteElapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
// 验证创建的Devcontainer
|
||||
assert.Equal(t, createOpts.Name, devcontainer.Name, "创建的Devcontainer名称应与请求的名称匹配")
|
||||
|
||||
// 等待一小段时间,让K8s有时间处理资源创建
|
||||
t.Log("等待15秒钟,让Kubernetes有时间处理资源...")
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// 测试获取刚创建的Devcontainer
|
||||
t.Logf("尝试获取刚创建的Devcontainer: %s", testName)
|
||||
getOpts := &GetDevcontainerOptions{
|
||||
Name: createOpts.Name,
|
||||
Namespace: createOpts.Namespace,
|
||||
Wait: true,
|
||||
}
|
||||
|
||||
getStartTime := time.Now()
|
||||
retrieved, err := GetDevcontainer(ctxPtr, client, getOpts)
|
||||
getElapsed := time.Since(getStartTime).Seconds()
|
||||
|
||||
if err != nil {
|
||||
t.Logf("⚠️ 获取Devcontainer失败 (耗时: %.2f秒): %v", getElapsed, err)
|
||||
t.Log("注意: 如果Devcontainer尚未Ready,这可能是正常的")
|
||||
} else {
|
||||
t.Logf("✅ 成功获取Devcontainer (耗时: %.2f秒)", getElapsed)
|
||||
t.Logf(" 名称: %s", retrieved.Name)
|
||||
t.Logf(" Ready状态: %v", retrieved.Status.Ready)
|
||||
t.Logf(" NodePort: %d", retrieved.Status.NodePortAssigned)
|
||||
assert.Equal(t, createOpts.Name, retrieved.Name, "获取的Devcontainer名称应与创建的名称匹配")
|
||||
}
|
||||
|
||||
t.Log("====== 测试完成 - CreateAndDeleteDevcontainer ======")
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package k8s_agent
|
||||
|
||||
import (
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@@ -8,14 +9,15 @@ import (
|
||||
type CreateDevcontainerOptions struct {
|
||||
metav1.CreateOptions
|
||||
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Image string `json:"image"`
|
||||
CommandList []string `json:"command"`
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
SSHPublicKeyList []string `json:"sshPublicKeyList"`
|
||||
GitRepositoryURL string `json:"gitRepositoryURL"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Image string `json:"image"`
|
||||
CommandList []string `json:"command"`
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
SSHPublicKeyList []string `json:"sshPublicKeyList"`
|
||||
GitRepositoryURL string `json:"gitRepositoryURL"`
|
||||
ExtraPorts []k8s_api_v1.ExtraPortSpec `json:"extraPorts,omitempty"` // 添加额外端口配置
|
||||
}
|
||||
|
||||
type GetDevcontainerOptions struct {
|
||||
@@ -45,4 +47,7 @@ type DevcontainerStatusK8sAgentVO struct {
|
||||
|
||||
// CRD Controller 向 DevcontainerApp.Status.Ready 写入了 true,当且仅当 StatefulSet 控制下的 Pod 中的 Readiness Probe 返回 true
|
||||
Ready bool `json:"ready"`
|
||||
|
||||
// 额外端口的 NodePort 分配情况
|
||||
ExtraPortsAssigned []k8s_api_v1.ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
|
||||
}
|
||||
|
||||
@@ -17,8 +17,10 @@ import (
|
||||
devcontainer_models_errors "code.gitea.io/gitea/models/devcontainer/errors"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/docker"
|
||||
devcontainer_k8s_agent_module "code.gitea.io/gitea/modules/k8s"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
gitea_context "code.gitea.io/gitea/services/context"
|
||||
devcontainer_service_errors "code.gitea.io/gitea/services/devcontainer/errors"
|
||||
@@ -316,7 +318,54 @@ func fileExists(filename string) bool {
|
||||
func GetWebTerminalURL(ctx context.Context, devcontainerName string) (string, error) {
|
||||
switch setting.Devcontainer.Agent {
|
||||
case setting.KUBERNETES:
|
||||
return "", fmt.Errorf("unsupported agent")
|
||||
// 创建 K8s 客户端,直接查询 CRD 以获取 ttyd 端口
|
||||
k8sClient, err := devcontainer_k8s_agent_module.GetKubernetesClient(&ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 直接从K8s获取CRD信息,不依赖数据库
|
||||
opts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
|
||||
GetOptions: metav1.GetOptions{},
|
||||
Name: devcontainerName,
|
||||
Namespace: setting.Devcontainer.Namespace,
|
||||
Wait: false,
|
||||
}
|
||||
|
||||
devcontainerApp, err := devcontainer_k8s_agent_module.GetDevcontainer(&ctx, k8sClient, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 在额外端口中查找 ttyd 端口,使用多个条件匹配
|
||||
var ttydNodePort uint16 = 0
|
||||
for _, portInfo := range devcontainerApp.Status.ExtraPortsAssigned {
|
||||
// 检查各种可能的情况:名称为ttyd、名称包含ttyd、名称为port-7681、端口为7681
|
||||
if portInfo.Name == "ttyd" ||
|
||||
strings.Contains(portInfo.Name, "ttyd") ||
|
||||
portInfo.Name == "port-7681" ||
|
||||
portInfo.ContainerPort == 7681 {
|
||||
ttydNodePort = portInfo.NodePort
|
||||
log.Info("Found ttyd port: %d for port named: %s", ttydNodePort, portInfo.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// 如果找到 ttyd 端口,构建 URL
|
||||
if ttydNodePort > 0 {
|
||||
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
|
||||
if err != nil {
|
||||
log.Error("Failed to load custom conf '%s': %v", setting.CustomConf, err)
|
||||
return "", err
|
||||
}
|
||||
domain := cfg.Section("server").Key("DOMAIN").Value()
|
||||
return fmt.Sprintf("http://%s:%d/", domain, ttydNodePort), nil
|
||||
}
|
||||
|
||||
// 如果没有找到ttyd端口,记录详细的调试信息
|
||||
log.Info("Available extra ports for %s: %v", devcontainerName, devcontainerApp.Status.ExtraPortsAssigned)
|
||||
return "", fmt.Errorf("ttyd port (7681) not found for container: %s", devcontainerName)
|
||||
|
||||
case setting.DOCKER:
|
||||
cli, err := docker.CreateDockerClient(&ctx)
|
||||
if err != nil {
|
||||
@@ -640,7 +689,6 @@ func claimDevcontainerResource(ctx *context.Context, newDevContainer *CreateDevc
|
||||
initializeScript = strings.ReplaceAll(initializeScript, "$HOST_DOCKER_INTERNAL", cfg.Section("server").Key("DOMAIN").Value())
|
||||
initializeScript = strings.ReplaceAll(initializeScript, "$WORKDIR", newDevContainer.DevcontainerWorkDir)
|
||||
initializeScript = strings.ReplaceAll(initializeScript, "$REPO_URL", newURL)
|
||||
|
||||
restartScript := strings.ReplaceAll(string(restartScriptContent), "$WORKDIR", newDevContainer.DevcontainerWorkDir)
|
||||
// 2. 根据配置文件中指定的 DevContainer Agent 派遣创建任务
|
||||
switch setting.Devcontainer.Agent {
|
||||
@@ -662,8 +710,8 @@ func RestartDevcontainer(gitea_ctx gitea_context.Context, opts *RepoDevContainer
|
||||
switch setting.Devcontainer.Agent {
|
||||
case setting.KUBERNETES:
|
||||
//k8s处理
|
||||
return fmt.Errorf("暂时不支持的Agent")
|
||||
|
||||
ctx := gitea_ctx.Req.Context()
|
||||
return AssignDevcontainerRestart2K8sOperator(&ctx, opts)
|
||||
case setting.DOCKER:
|
||||
return DockerRestartContainer(&gitea_ctx, opts)
|
||||
default:
|
||||
@@ -676,7 +724,7 @@ func StopDevcontainer(gitea_ctx context.Context, opts *RepoDevContainer) error {
|
||||
switch setting.Devcontainer.Agent {
|
||||
case setting.KUBERNETES:
|
||||
//k8s处理
|
||||
return fmt.Errorf("暂时不支持的Agent")
|
||||
return AssignDevcontainerStop2K8sOperator(&gitea_ctx, opts)
|
||||
case setting.DOCKER:
|
||||
return DockerStopContainer(&gitea_ctx, opts)
|
||||
default:
|
||||
|
||||
@@ -3,8 +3,11 @@ package devcontainer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
devcontainer_model "code.gitea.io/gitea/models/devcontainer"
|
||||
devcontainer_models "code.gitea.io/gitea/models/devcontainer"
|
||||
devcontainer_dto "code.gitea.io/gitea/modules/k8s"
|
||||
devcontainer_k8s_agent_module "code.gitea.io/gitea/modules/k8s"
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
|
||||
@@ -14,8 +17,16 @@ import (
|
||||
"code.gitea.io/gitea/services/devcontainer/errors"
|
||||
"code.gitea.io/gitea/services/devstar_cloud_provider"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var k8sGroupVersionResource = schema.GroupVersionResource{
|
||||
Group: "devcontainer.devstar.cn",
|
||||
Version: "v1",
|
||||
Resource: "devcontainerapps",
|
||||
}
|
||||
|
||||
type ErrIllegalK8sAgentParams struct {
|
||||
FieldNameList []string
|
||||
}
|
||||
@@ -116,6 +127,82 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
|
||||
return err
|
||||
}
|
||||
|
||||
// 1.1:插入 devcontainer_output 记录
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
|
||||
// 插入拉取镜像记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: "Pulling image for K8s container: " + newDevContainer.Image,
|
||||
ListId: 0,
|
||||
Status: "success", // 设为 success 以满足 created 变量的条件
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Pull Image",
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Pull Image record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 插入初始化工作区记录 (满足 created = true 的关键条件)
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: "Initializing workspace in Kubernetes...",
|
||||
Status: "success", // 必须为 success
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Initialize Workspace",
|
||||
ListId: 1, // ListId > 0 且 Status = success 是 created = true 的条件
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Initialize Workspace record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 插入初始化 DevStar 记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: "Initializing DevStar in Kubernetes...",
|
||||
Status: "success",
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Initialize DevStar",
|
||||
ListId: 2,
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Initialize DevStar record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 插入 postCreateCommand 记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: "Running post-create commands in Kubernetes...",
|
||||
Status: "success",
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Run postCreateCommand",
|
||||
ListId: 3,
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Run postCreateCommand record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 添加 ttyd 端口配置 - WebTerminal 功能
|
||||
extraPorts := []k8s_api_v1.ExtraPortSpec{
|
||||
{
|
||||
Name: "ttyd",
|
||||
ContainerPort: 7681, // ttyd 默认端口
|
||||
ServicePort: 7681,
|
||||
},
|
||||
}
|
||||
|
||||
command := []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"rm -f /etc/ssh/ssh_host_* && ssh-keygen -A && service ssh start && " +
|
||||
"apt-get update -y && " +
|
||||
"apt-get install -y build-essential cmake git libjson-c-dev libwebsockets-dev && " +
|
||||
"git clone https://github.com/tsl0922/ttyd.git /tmp/ttyd && " +
|
||||
"cd /tmp/ttyd && mkdir build && cd build && cmake .. && make && make install && " +
|
||||
"nohup ttyd -p 7681 -W bash > /dev/null 2>&1 & " +
|
||||
"while true; do sleep 60; done",
|
||||
}
|
||||
|
||||
// 2. 调用 modules 层 k8s Agent,执行创建资源
|
||||
opts := &devcontainer_dto.CreateDevcontainerOptions{
|
||||
CreateOptions: metav1.CreateOptions{},
|
||||
@@ -142,15 +229,12 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
|
||||
* USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
* root 21826 0.0 0.0 2520 408 ? Ss 18:36 0:00 sleep infinity
|
||||
*/
|
||||
CommandList: []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"rm -f /etc/ssh/ssh_host_* && ssh-keygen -A && service ssh start && while true; do sleep 60; done",
|
||||
},
|
||||
CommandList: command,
|
||||
ContainerPort: 22,
|
||||
ServicePort: 22,
|
||||
SSHPublicKeyList: newDevContainer.SSHPublicKeyList,
|
||||
GitRepositoryURL: newDevContainer.GitRepositoryURL,
|
||||
ExtraPorts: extraPorts, // 添加额外端口配置
|
||||
}
|
||||
|
||||
// 2. 创建成功,取回集群中的 DevContainer
|
||||
@@ -159,9 +243,204 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. 将分配的 NodePort Service 写回 newDevcontainer,供写入数据库进行下一步操作
|
||||
newDevContainer.DevcontainerPort = devcontainerInCluster.Status.NodePortAssigned
|
||||
// // 3. 将分配的 NodePort Service 写回 newDevcontainer,供写入数据库进行下一步操作
|
||||
// newDevContainer.DevcontainerPort = devcontainerInCluster.Status.NodePortAssigned
|
||||
|
||||
// 3. 处理 NodePort - 检查是否为0(尚未分配)
|
||||
nodePort := devcontainerInCluster.Status.NodePortAssigned
|
||||
|
||||
if nodePort == 0 {
|
||||
log.Info("NodePort not yet assigned by K8s controller, setting temporary port")
|
||||
|
||||
// 将端口设为0,数据库中记录特殊标记
|
||||
newDevContainer.DevcontainerPort = 0
|
||||
|
||||
// 记录容器已创建,但端口待更新
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: pending assignment",
|
||||
devcontainerInCluster.Name)
|
||||
|
||||
// 启动异步任务来更新端口
|
||||
go updateNodePortAsync(devcontainerInCluster.Name,
|
||||
setting.Devcontainer.Namespace,
|
||||
newDevContainer.UserId,
|
||||
newDevContainer.RepoId)
|
||||
} else {
|
||||
// 端口已分配,直接使用
|
||||
newDevContainer.DevcontainerPort = nodePort
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
|
||||
devcontainerInCluster.Name, nodePort)
|
||||
}
|
||||
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
|
||||
devcontainerInCluster.Name,
|
||||
devcontainerInCluster.Status.NodePortAssigned)
|
||||
// 4. 层层返回 nil,自动提交数据库事务,完成 DevContainer 创建
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssignDevcontainerRestart2K8sOperator 将 DevContainer 重启任务派遣至 K8s 控制器
|
||||
func AssignDevcontainerRestart2K8sOperator(ctx *context.Context, opts *RepoDevContainer) error {
|
||||
// 1. 获取 Dynamic Client
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get Kubernetes client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. 通过打补丁方式实现重启 - 更新注解以触发控制器重新部署 Pod
|
||||
// 创建补丁,添加或更新 restartedAt 注解,同时确保 desiredReplicas 为 1
|
||||
patchData := fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"devstar.io/restartedAt": "%s",
|
||||
"devstar.io/desiredReplicas": "1"
|
||||
}
|
||||
}
|
||||
}`, time.Now().Format(time.RFC3339))
|
||||
|
||||
// 应用补丁到 DevcontainerApp CRD
|
||||
_, err = client.Resource(k8sGroupVersionResource).
|
||||
Namespace(setting.Devcontainer.Namespace).
|
||||
Patch(*ctx, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to patch DevcontainerApp for restart: %v", err)
|
||||
return devcontainer_errors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("restart k8s devcontainer '%s'", opts.DevContainerName),
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 记录重启操作日志
|
||||
log.Info("DevContainer restarted: %s", opts.DevContainerName)
|
||||
|
||||
// 将重启操作记录到数据库
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: fmt.Sprintf("Restarting K8s DevContainer %s", opts.DevContainerName),
|
||||
Status: "success",
|
||||
UserId: opts.UserId,
|
||||
RepoId: opts.RepoId,
|
||||
Command: "Restart DevContainer",
|
||||
ListId: 0,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert restart record: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssignDevcontainerStop2K8sOperator 将 DevContainer 停止任务派遣至 K8s 控制器
|
||||
func AssignDevcontainerStop2K8sOperator(ctx *context.Context, opts *RepoDevContainer) error {
|
||||
// 1. 获取 Dynamic Client
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get Kubernetes client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. 通过打补丁方式实现停止 - 添加停止注解
|
||||
// 创建补丁,添加或更新 stopped 和 desiredReplicas 注解
|
||||
patchData := fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"devstar.io/stoppedAt": "%s",
|
||||
"devstar.io/desiredReplicas": "0"
|
||||
}
|
||||
}
|
||||
}`, time.Now().Format(time.RFC3339))
|
||||
|
||||
// 应用补丁到 DevcontainerApp CRD
|
||||
_, err = client.Resource(k8sGroupVersionResource).
|
||||
Namespace(setting.Devcontainer.Namespace).
|
||||
Patch(*ctx, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to patch DevcontainerApp for stop: %v", err)
|
||||
return devcontainer_errors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("stop k8s devcontainer '%s'", opts.DevContainerName),
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 记录停止操作日志
|
||||
log.Info("DevContainer stopped: %s", opts.DevContainerName)
|
||||
|
||||
// 将停止操作记录到数据库
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
|
||||
Output: fmt.Sprintf("Stopping K8s DevContainer %s", opts.DevContainerName),
|
||||
Status: "success",
|
||||
UserId: opts.UserId,
|
||||
RepoId: opts.RepoId,
|
||||
Command: "Stop DevContainer",
|
||||
ListId: 0,
|
||||
})
|
||||
if err != nil {
|
||||
// 只记录错误,不影响主流程返回结果
|
||||
log.Warn("Failed to insert stop record: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 异步更新 NodePort 的辅助函数
|
||||
func updateNodePortAsync(containerName string, namespace string, userId, repoId int64) {
|
||||
// 等待K8s控制器完成端口分配
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
// 创建新的上下文和客户端
|
||||
ctx := context.Background()
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(&ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to get K8s client in async updater: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 尝试最多5次获取端口
|
||||
for i := 0; i < 5; i++ {
|
||||
getOpts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
|
||||
GetOptions: metav1.GetOptions{},
|
||||
Name: containerName,
|
||||
Namespace: namespace,
|
||||
Wait: false,
|
||||
}
|
||||
|
||||
devcontainer, err := devcontainer_k8s_agent_module.GetDevcontainer(&ctx, client, getOpts)
|
||||
if err == nil && devcontainer != nil && devcontainer.Status.NodePortAssigned > 0 {
|
||||
// 获取到正确的端口,更新数据库
|
||||
realNodePort := devcontainer.Status.NodePortAssigned
|
||||
|
||||
// 记录 ttyd 端口信息到日志
|
||||
if len(devcontainer.Status.ExtraPortsAssigned) > 0 {
|
||||
for _, portInfo := range devcontainer.Status.ExtraPortsAssigned {
|
||||
log.Info("Found extra port for %s: name=%s, nodePort=%d, containerPort=%d",
|
||||
containerName, portInfo.Name, portInfo.NodePort, portInfo.ContainerPort)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Found real NodePort %d for container %s, updating database record",
|
||||
realNodePort, containerName)
|
||||
|
||||
engine := db.GetEngine(ctx)
|
||||
_, err := engine.Table("devcontainer").
|
||||
Where("user_id = ? AND repo_id = ?", userId, repoId).
|
||||
Update(map[string]interface{}{
|
||||
"devcontainer_port": realNodePort,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to update NodePort in database: %v", err)
|
||||
} else {
|
||||
log.Info("Successfully updated NodePort in database to %d", realNodePort)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
log.Warn("Failed to retrieve real NodePort after multiple attempts")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user