添加了k8s下的停止、重启devcontainer和webterminal

• 修改了Makefile添加了controller-manager
• 添加了controller-manager的Dockerfile
This commit is contained in:
panshuxiao
2025-04-29 20:09:48 +08:00
repo.diff.parent a28f802bd8
repo.diff.commit 234a5087fc
repo.diff.stats_desc%!(EXTRA int=11, int=733, int=35)

repo.diff.view_file

@@ -0,0 +1,39 @@
FROM golang:1.23 AS builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# 禁用所有代理
ENV HTTP_PROXY=""
ENV HTTPS_PROXY=""
ENV http_proxy=""
ENV https_proxy=""
ENV GOPROXY=https://goproxy.cn,direct
# 下载依赖
RUN go mod download
# Copy the Go source code
COPY . .
# Build the controller-manager binary
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
# Build a small image
FROM alpine:3.18
WORKDIR /
# 创建非 root 用户
RUN addgroup -g 65532 nonroot && \
adduser -u 65532 -G nonroot -D nonroot
COPY --from=builder /workspace/modules/k8s/controller/ modules/k8s/controller/
COPY --from=builder /workspace/controller-manager .
USER 65532:65532
ENTRYPOINT ["/controller-manager"]

repo.diff.view_file

@@ -192,6 +192,7 @@ help:
@echo "Make Routines:"
@echo " - \"\" equivalent to \"build\""
@echo " - build build everything"
@echo " - build-debug build everything to debug"
@echo " - frontend build frontend files"
@echo " - backend build backend files"
@echo " - watch watch everything and continuously rebuild"
@@ -249,6 +250,8 @@ help:
@echo " - tidy run go mod tidy"
@echo " - test[\#TestSpecificName] run unit test"
@echo " - test-sqlite[\#TestSpecificName] run integration test for sqlite"
@echo " - controller-manager build controller-manager"
@echo " - controller-manager-debug build controller-manager with debug info"
.PHONY: go-check
go-check:
@@ -769,6 +772,16 @@ install: $(wildcard *.go)
.PHONY: build
build: frontend backend
# 添加一个新目标,用于构建带有调试信息的二进制文件
.PHONY: build-debug
build-debug: frontend backend-debug
.PHONY: backend-debug
backend-debug: go-check generate-backend $(EXECUTABLE)-debug
$(EXECUTABLE)-debug: $(GO_SOURCES) $(TAGS_PREREQ)
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o $@
.PHONY: frontend
frontend: $(WEBPACK_DEST)
@@ -985,6 +998,18 @@ docker:
# docker build --disable-content-trust=false -t $(DOCKER_REF) .
# support also build args docker build --build-arg GITEA_VERSION=v1.2.3 --build-arg TAGS="bindata sqlite sqlite_unlock_notify" .
# 添加一个新目标,用于构建 controller-manager
.PHONY: controller-manager
controller-manager: go-check
@echo "Building controller-manager..."
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '-s -w $(LDFLAGS)' -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
# 添加调试版本的编译目标
.PHONY: controller-manager-debug
controller-manager-debug: go-check
@echo "Building controller-manager with debug info..."
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o controller-manager-debug modules/k8s/cmd/controller-manager/controller-manager.go
# This endif closes the if at the top of the file
endif

repo.diff.view_file

@@ -24,6 +24,39 @@ import (
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ExtraPortSpec 定义额外端口配置
type ExtraPortSpec struct {
// Name 是端口的名称
// +optional
Name string `json:"name,omitempty"`
// ContainerPort 是容器内的端口号
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
ContainerPort uint16 `json:"containerPort"`
// ServicePort 是服务暴露的端口号
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
ServicePort uint16 `json:"servicePort"`
}
// ExtraPortAssigned 定义已分配的额外端口信息
type ExtraPortAssigned struct {
// Name 是端口的名称
// +optional
Name string `json:"name,omitempty"`
// ContainerPort 是容器内的端口号
ContainerPort uint16 `json:"containerPort"`
// ServicePort 是服务暴露的端口号
ServicePort uint16 `json:"servicePort"`
// NodePort 是 Kubernetes 分配的 NodePort
NodePort uint16 `json:"nodePort"`
}
// DevcontainerAppSpec defines the desired state of DevcontainerApp
type DevcontainerAppSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
@@ -86,6 +119,10 @@ type ServiceSpec struct {
// +kubebuilder:validation:Minimum=1
// +optional
ServicePort uint16 `json:"servicePort,omitempty"`
// ExtraPorts 定义额外的端口配置
// +optional
ExtraPorts []ExtraPortSpec `json:"extraPorts,omitempty"`
}
// DevcontainerAppStatus defines the observed state of DevcontainerApp
@@ -105,6 +142,10 @@ type DevcontainerAppStatus struct {
// +optional
NodePortAssigned uint16 `json:"nodePortAssigned"`
// ExtraPortsAssigned 存储额外端口映射的 NodePort
// +optional
ExtraPortsAssigned []ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
// Ready 标识 DevcontainerApp 管理的 Pod 的 Readiness Probe 是否达到就绪状态
// +optional
Ready bool `json:"ready"`

repo.diff.view_file

@@ -88,7 +88,7 @@ func (in *DevcontainerAppList) DeepCopyObject() runtime.Object {
func (in *DevcontainerAppSpec) DeepCopyInto(out *DevcontainerAppSpec) {
*out = *in
in.StatefulSet.DeepCopyInto(&out.StatefulSet)
out.Service = in.Service
in.Service.DeepCopyInto(&out.Service)
if in.StartingDeadlineSeconds != nil {
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
*out = new(int64)
@@ -133,6 +133,11 @@ func (in *DevcontainerAppStatus) DeepCopyInto(out *DevcontainerAppStatus) {
in, out := &in.LastScheduleTime, &out.LastScheduleTime
*out = (*in).DeepCopy()
}
if in.ExtraPortsAssigned != nil {
in, out := &in.ExtraPortsAssigned, &out.ExtraPortsAssigned
*out = make([]ExtraPortAssigned, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerAppStatus.
@@ -145,9 +150,44 @@ func (in *DevcontainerAppStatus) DeepCopy() *DevcontainerAppStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtraPortAssigned) DeepCopyInto(out *ExtraPortAssigned) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortAssigned.
func (in *ExtraPortAssigned) DeepCopy() *ExtraPortAssigned {
if in == nil {
return nil
}
out := new(ExtraPortAssigned)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtraPortSpec) DeepCopyInto(out *ExtraPortSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortSpec.
func (in *ExtraPortSpec) DeepCopy() *ExtraPortSpec {
if in == nil {
return nil
}
out := new(ExtraPortSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = *in
if in.ExtraPorts != nil {
in, out := &in.ExtraPorts, &out.ExtraPorts
*out = make([]ExtraPortSpec, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.

repo.diff.view_file

@@ -18,6 +18,7 @@ package devcontainer
import (
"context"
"strconv"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
@@ -55,6 +56,7 @@ type DevcontainerAppReconciler struct {
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
var err error
@@ -63,10 +65,38 @@ func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Requ
app := &devcontainer_v1.DevcontainerApp{}
err = r.Get(ctx, req.NamespacedName, app)
if err != nil {
// 当 CRD 资源 DevcontainerApp 被删除后,直接返回空结果,跳过剩下步骤
// 当 CRD 资源 "DevcontainerApp" 被删除后,直接返回空结果,跳过剩下步骤
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// 检查停止容器的注解
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas == "0" {
logger.Info("DevContainer stop requested via annotation", "name", app.Name)
// 获取当前的 StatefulSet
statefulSetInNamespace := &apps_v1.StatefulSet{}
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
if err == nil {
// 设置副本数为0
replicas := int32(0)
statefulSetInNamespace.Spec.Replicas = &replicas
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
logger.Error(err, "Failed to scale down StatefulSet replicas to 0")
return ctrl.Result{}, err
}
logger.Info("StatefulSet scaled down to 0 replicas due to stop request")
// 标记容器为未就绪
app.Status.Ready = false
if err := r.Status().Update(ctx, app); err != nil {
logger.Error(err, "Failed to update DevcontainerApp status")
return ctrl.Result{}, err
}
// 继续处理其他逻辑(如更新 Service
}
}
// 2. 根据 DevcontainerApp 配置信息进行处理
// 2.1 StatefulSet 处理
statefulSet := devcontainer_controller_utils.NewStatefulSet(app)
@@ -88,7 +118,41 @@ func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, err
}
} else {
// 若 StatefulSet.Status.readyReplicas 变化,则更新 DevcontainerApp.Status.Ready 域mark/un-mark
// 处理重启注解
if restartedAt, exists := app.Annotations["devstar.io/restartedAt"]; exists {
// 检查注解是否已经应用到StatefulSet
needsRestart := true
if statefulSetInNamespace.Spec.Template.Annotations != nil {
if currentRestartTime, exists := statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"]; exists && currentRestartTime == restartedAt {
needsRestart = false
}
} else {
statefulSetInNamespace.Spec.Template.Annotations = make(map[string]string)
}
if needsRestart {
logger.Info("DevContainer restart requested", "name", app.Name, "time", restartedAt)
// 将重启注解传递到 Pod 模板以触发滚动更新
statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"] = restartedAt
// 确保副本数至少为1防止之前被停止
replicas := int32(1)
if statefulSetInNamespace.Spec.Replicas != nil && *statefulSetInNamespace.Spec.Replicas > 0 {
replicas = *statefulSetInNamespace.Spec.Replicas
}
statefulSetInNamespace.Spec.Replicas = &replicas
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
logger.Error(err, "Failed to update StatefulSet for restart")
return ctrl.Result{}, err
}
logger.Info("StatefulSet restarted successfully")
}
}
// 若 StatefulSet.Status.readyReplicas 变化,则更新 DevcontainerApp.Status.Ready 域
if statefulSetInNamespace.Status.ReadyReplicas > 0 {
app.Status.Ready = true
if err := r.Status().Update(ctx, app); err != nil {
@@ -96,27 +160,50 @@ func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, err
}
logger.Info("DevContainer is READY", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
} else {
// app.Status.Ready = false
// if err := r.Status().Update(ctx, app); err != nil {
// logger.Error(err, "Failed to un-mark DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
// return ctrl.Result{}, err
// }
} else if app.Status.Ready {
// 只有当目前状态为Ready但实际不再Ready时才更新
app.Status.Ready = false
if err := r.Status().Update(ctx, app); err != nil {
logger.Error(err, "Failed to un-mark DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
return ctrl.Result{}, err
}
logger.Info("DevContainer is NOT ready", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
}
// 这里会反复触发更新
// 原因:在 SetupWithManager方法中监听了 StatefulSet ,所以只要更新 StatefulSet 就会触发
// 此处更新和 controllerManager 更新 StatefulSet 都会触发更新事件,导致循环触发
//修复方法:加上判断条件,仅在 app.Spec.StatefulSet.Image != statefulSet.Spec.Template.Spec.Containers[0].Image 时才更新 StatefulSet
if app.Spec.StatefulSet.Image != statefulSet.Spec.Template.Spec.Containers[0].Image {
// 修复方法:加上判断条件,避免循环触发更新
needsUpdate := false
// 检查镜像是否变更
if app.Spec.StatefulSet.Image != statefulSetInNamespace.Spec.Template.Spec.Containers[0].Image {
needsUpdate = true
}
// 检查副本数 - 如果指定了 desiredReplicas 注解但不为 0停止已在前面处理
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas != "0" {
replicas, err := strconv.ParseInt(desiredReplicas, 10, 32)
if err == nil {
currentReplicas := int32(1) // 默认值
if statefulSetInNamespace.Spec.Replicas != nil {
currentReplicas = *statefulSetInNamespace.Spec.Replicas
}
if currentReplicas != int32(replicas) {
r32 := int32(replicas)
statefulSet.Spec.Replicas = &r32
needsUpdate = true
}
}
}
if needsUpdate {
if err := r.Update(ctx, statefulSet); err != nil {
return ctrl.Result{}, err
}
logger.Info("StatefulSet updated", "name", statefulSet.Name)
}
}
// 2.2 Service 处理
// 2.3 Service 处理
service := devcontainer_controller_utils.NewService(app)
if err := k8s_sigs_controller_runtime_utils.SetControllerReference(app, service, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -132,15 +219,136 @@ func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Requ
// 创建 NodePort Service 成功只执行一次 ==> 将NodePort 端口分配信息更新到 app.Status
logger.Info("[DevStar][DevContainer] NodePort Assigned", "nodePortAssigned", service.Spec.Ports[0].NodePort)
// 设置主 SSH 端口的 NodePort
app.Status.NodePortAssigned = uint16(service.Spec.Ports[0].NodePort)
// 处理额外端口
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
// 处理额外端口从第二个端口开始索引为1
// 因为第一个端口索引为0是 SSH 端口
for i := 1; i < len(service.Spec.Ports); i++ {
port := service.Spec.Ports[i]
// 查找对应的端口规格
var containerPort uint16 = 0
// 如果存在额外端口配置,尝试匹配
if app.Spec.Service.ExtraPorts != nil {
for _, ep := range app.Spec.Service.ExtraPorts {
if (ep.Name != "" && ep.Name == port.Name) ||
(uint16(port.Port) == ep.ServicePort) {
containerPort = ep.ContainerPort
break
}
}
}
// 如果没有找到匹配项,使用目标端口
if containerPort == 0 && port.TargetPort.IntVal > 0 {
containerPort = uint16(port.TargetPort.IntVal)
}
// 添加到额外端口列表
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
Name: port.Name,
ServicePort: uint16(port.Port),
ContainerPort: containerPort,
NodePort: uint16(port.NodePort),
})
logger.Info("[DevStar][DevContainer] Extra Port NodePort Assigned",
"name", port.Name,
"servicePort", port.Port,
"nodePort", port.NodePort)
}
// 更新 CRD 状态,包括额外端口
app.Status.ExtraPortsAssigned = extraPortsAssigned
if err := r.Status().Update(ctx, app); err != nil {
logger.Error(err, "Failed to update NodePort of DevcontainerApp", "nodePortAssigned", service.Spec.Ports[0].NodePort)
logger.Error(err, "Failed to update NodePorts of DevcontainerApp",
"nodePortAssigned", service.Spec.Ports[0].NodePort,
"extraPortsCount", len(extraPortsAssigned))
return ctrl.Result{}, err
}
} else if !errors.IsAlreadyExists(err) {
logger.Error(err, "Failed to create DevcontainerApp NodePort Service", "nodePortServiceName", service.Name)
return ctrl.Result{}, err
}
} else {
// Service 已存在,检查它的端口信息
// 检查是否需要更新状态
needStatusUpdate := false
// 如果主端口未记录,记录之
if app.Status.NodePortAssigned == 0 && len(serviceInCluster.Spec.Ports) > 0 {
app.Status.NodePortAssigned = uint16(serviceInCluster.Spec.Ports[0].NodePort)
needStatusUpdate = true
logger.Info("[DevStar][DevContainer] Found existing main NodePort",
"nodePort", serviceInCluster.Spec.Ports[0].NodePort)
}
// 处理额外端口
if len(serviceInCluster.Spec.Ports) > 1 {
// 如果额外端口状态为空,或者数量不匹配
if app.Status.ExtraPortsAssigned == nil ||
len(app.Status.ExtraPortsAssigned) != len(serviceInCluster.Spec.Ports)-1 {
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
// 从索引 1 开始,跳过主端口
for i := 1; i < len(serviceInCluster.Spec.Ports); i++ {
port := serviceInCluster.Spec.Ports[i]
// 查找对应的端口规格
var containerPort uint16 = 0
// 如果存在额外端口配置,尝试匹配
if app.Spec.Service.ExtraPorts != nil {
for _, ep := range app.Spec.Service.ExtraPorts {
if (ep.Name != "" && ep.Name == port.Name) ||
(uint16(port.Port) == ep.ServicePort) {
containerPort = ep.ContainerPort
break
}
}
}
// 如果没有找到匹配项,使用目标端口
if containerPort == 0 && port.TargetPort.IntVal > 0 {
containerPort = uint16(port.TargetPort.IntVal)
}
// 添加到额外端口列表
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
Name: port.Name,
ServicePort: uint16(port.Port),
ContainerPort: containerPort,
NodePort: uint16(port.NodePort),
})
logger.Info("[DevStar][DevContainer] Found existing extra NodePort",
"name", port.Name,
"nodePort", port.NodePort)
}
// 更新额外端口状态
app.Status.ExtraPortsAssigned = extraPortsAssigned
needStatusUpdate = true
}
}
// 如果需要更新状态
if needStatusUpdate {
if err := r.Status().Update(ctx, app); err != nil {
logger.Error(err, "Failed to update NodePorts status for existing service")
return ctrl.Result{}, err
}
logger.Info("[DevStar][DevContainer] Updated NodePorts status for existing service",
"mainNodePort", app.Status.NodePortAssigned,
"extraPortsCount", len(app.Status.ExtraPortsAssigned))
}
}
return ctrl.Result{}, nil
}

repo.diff.view_file

@@ -16,3 +16,9 @@ spec:
{{ if .Spec.Service.NodePort}}
nodePort: {{.Spec.Service.NodePort}}
{{ end }}
{{- range .Spec.Service.ExtraPorts }}
- name: {{ .Name | default (printf "port-%d" .ServicePort) }}
protocol: TCP
port: {{ .ServicePort }}
targetPort: {{ .ContainerPort }}
{{- end }}

repo.diff.view_file

@@ -59,6 +59,11 @@ spec:
- name: ssh-port
protocol: TCP
containerPort: {{.Spec.StatefulSet.ContainerPort}}
{{- range .Spec.Service.ExtraPorts }}
- name: {{ .Name | default (printf "port-%d" .ContainerPort) }}
protocol: TCP
containerPort: {{ .ContainerPort }}
{{- end }}
volumeMounts:
- name: pvc-devcontainer
mountPath: /data

repo.diff.view_file

@@ -241,6 +241,7 @@ func CreateDevcontainer(ctx *context.Context, client dynamic_client.Interface, o
},
Service: k8s_api_v1.ServiceSpec{
ServicePort: opts.ServicePort,
ExtraPorts: opts.ExtraPorts, // 添加 ExtraPorts 配置
},
},
}

repo.diff.view_file

@@ -1,6 +1,7 @@
package k8s_agent
import (
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -8,14 +9,15 @@ import (
type CreateDevcontainerOptions struct {
metav1.CreateOptions
Name string `json:"name"`
Namespace string `json:"namespace"`
Image string `json:"image"`
CommandList []string `json:"command"`
ContainerPort uint16 `json:"containerPort"`
ServicePort uint16 `json:"servicePort"`
SSHPublicKeyList []string `json:"sshPublicKeyList"`
GitRepositoryURL string `json:"gitRepositoryURL"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Image string `json:"image"`
CommandList []string `json:"command"`
ContainerPort uint16 `json:"containerPort"`
ServicePort uint16 `json:"servicePort"`
SSHPublicKeyList []string `json:"sshPublicKeyList"`
GitRepositoryURL string `json:"gitRepositoryURL"`
ExtraPorts []k8s_api_v1.ExtraPortSpec `json:"extraPorts,omitempty"` // 添加额外端口配置
}
type GetDevcontainerOptions struct {
@@ -45,4 +47,7 @@ type DevcontainerStatusK8sAgentVO struct {
// CRD Controller 向 DevcontainerApp.Status.Ready 写入了 true当且仅当 StatefulSet 控制下的 Pod 中的 Readiness Probe 返回 true
Ready bool `json:"ready"`
// 额外端口的 NodePort 分配情况
ExtraPortsAssigned []k8s_api_v1.ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
}

repo.diff.view_file

@@ -16,8 +16,10 @@ import (
devcontainer_models_errors "code.gitea.io/gitea/models/devcontainer/errors"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/docker"
devcontainer_k8s_agent_module "code.gitea.io/gitea/modules/k8s"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gitea_context "code.gitea.io/gitea/services/context"
devcontainer_service_errors "code.gitea.io/gitea/services/devcontainer/errors"
@@ -315,7 +317,54 @@ func fileExists(filename string) bool {
func GetWebTerminalURL(ctx context.Context, devcontainerName string) (string, error) {
switch setting.Devcontainer.Agent {
case setting.KUBERNETES:
return "", fmt.Errorf("unsupported agent")
// 创建 K8s 客户端,直接查询 CRD 以获取 ttyd 端口
k8sClient, err := devcontainer_k8s_agent_module.GetKubernetesClient(&ctx)
if err != nil {
return "", err
}
// 直接从K8s获取CRD信息不依赖数据库
opts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
GetOptions: metav1.GetOptions{},
Name: devcontainerName,
Namespace: setting.Devcontainer.Namespace,
Wait: false,
}
devcontainerApp, err := devcontainer_k8s_agent_module.GetDevcontainer(&ctx, k8sClient, opts)
if err != nil {
return "", err
}
// 在额外端口中查找 ttyd 端口,使用多个条件匹配
var ttydNodePort uint16 = 0
for _, portInfo := range devcontainerApp.Status.ExtraPortsAssigned {
// 检查各种可能的情况名称为ttyd、名称包含ttyd、名称为port-7681、端口为7681
if portInfo.Name == "ttyd" ||
strings.Contains(portInfo.Name, "ttyd") ||
portInfo.Name == "port-7681" ||
portInfo.ContainerPort == 7681 {
ttydNodePort = portInfo.NodePort
log.Info("Found ttyd port: %d for port named: %s", ttydNodePort, portInfo.Name)
break
}
}
// 如果找到 ttyd 端口,构建 URL
if ttydNodePort > 0 {
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
if err != nil {
log.Error("Failed to load custom conf '%s': %v", setting.CustomConf, err)
return "", err
}
domain := cfg.Section("server").Key("DOMAIN").Value()
return fmt.Sprintf("http://%s:%d/", domain, ttydNodePort), nil
}
// 如果没有找到ttyd端口记录详细的调试信息
log.Info("Available extra ports for %s: %v", devcontainerName, devcontainerApp.Status.ExtraPortsAssigned)
return "", fmt.Errorf("ttyd port (7681) not found for container: %s", devcontainerName)
case setting.DOCKER:
cli, err := docker.CreateDockerClient(&ctx)
if err != nil {
@@ -593,8 +642,8 @@ func RestartDevcontainer(gitea_ctx gitea_context.Context, opts *RepoDevContainer
switch setting.Devcontainer.Agent {
case setting.KUBERNETES:
//k8s处理
return fmt.Errorf("暂时不支持的Agent")
ctx := gitea_ctx.Req.Context()
return AssignDevcontainerRestart2K8sOperator(&ctx, opts)
case setting.DOCKER:
return DockerRestartContainer(&gitea_ctx, opts)
default:
@@ -607,7 +656,7 @@ func StopDevcontainer(gitea_ctx context.Context, opts *RepoDevContainer) error {
switch setting.Devcontainer.Agent {
case setting.KUBERNETES:
//k8s处理
return fmt.Errorf("暂时不支持的Agent")
return AssignDevcontainerStop2K8sOperator(&gitea_ctx, opts)
case setting.DOCKER:
return DockerStopContainer(&gitea_ctx, opts)
default:

repo.diff.view_file

@@ -3,8 +3,11 @@ package devcontainer
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/models/db"
devcontainer_model "code.gitea.io/gitea/models/devcontainer"
devcontainer_models "code.gitea.io/gitea/models/devcontainer"
devcontainer_dto "code.gitea.io/gitea/modules/k8s"
devcontainer_k8s_agent_module "code.gitea.io/gitea/modules/k8s"
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/v1"
@@ -14,8 +17,16 @@ import (
"code.gitea.io/gitea/services/devcontainer/errors"
"code.gitea.io/gitea/services/devstar_cloud_provider"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
var k8sGroupVersionResource = schema.GroupVersionResource{
Group: "devcontainer.devstar.cn",
Version: "v1",
Resource: "devcontainerapps",
}
type ErrIllegalK8sAgentParams struct {
FieldNameList []string
}
@@ -116,6 +127,82 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
return err
}
// 1.1:插入 devcontainer_output 记录
dbEngine := db.GetEngine(*ctx)
// 插入拉取镜像记录
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: "Pulling image for K8s container: " + newDevContainer.Image,
ListId: 0,
Status: "success", // 设为 success 以满足 created 变量的条件
UserId: newDevContainer.UserId,
RepoId: newDevContainer.RepoId,
Command: "Pull Image",
}); err != nil {
log.Info("Failed to insert Pull Image record: %v", err)
// 不返回错误,继续执行
}
// 插入初始化工作区记录 (满足 created = true 的关键条件)
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: "Initializing workspace in Kubernetes...",
Status: "success", // 必须为 success
UserId: newDevContainer.UserId,
RepoId: newDevContainer.RepoId,
Command: "Initialize Workspace",
ListId: 1, // ListId > 0 且 Status = success 是 created = true 的条件
}); err != nil {
log.Info("Failed to insert Initialize Workspace record: %v", err)
// 不返回错误,继续执行
}
// 插入初始化 DevStar 记录
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: "Initializing DevStar in Kubernetes...",
Status: "success",
UserId: newDevContainer.UserId,
RepoId: newDevContainer.RepoId,
Command: "Initialize DevStar",
ListId: 2,
}); err != nil {
log.Info("Failed to insert Initialize DevStar record: %v", err)
// 不返回错误,继续执行
}
// 插入 postCreateCommand 记录
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: "Running post-create commands in Kubernetes...",
Status: "success",
UserId: newDevContainer.UserId,
RepoId: newDevContainer.RepoId,
Command: "Run postCreateCommand",
ListId: 3,
}); err != nil {
log.Info("Failed to insert Run postCreateCommand record: %v", err)
// 不返回错误,继续执行
}
// 添加 ttyd 端口配置 - WebTerminal 功能
extraPorts := []k8s_api_v1.ExtraPortSpec{
{
Name: "ttyd",
ContainerPort: 7681, // ttyd 默认端口
ServicePort: 7681,
},
}
command := []string{
"/bin/bash",
"-c",
"rm -f /etc/ssh/ssh_host_* && ssh-keygen -A && service ssh start && " +
"apt-get update -y && " +
"apt-get install -y build-essential cmake git libjson-c-dev libwebsockets-dev && " +
"git clone https://github.com/tsl0922/ttyd.git /tmp/ttyd && " +
"cd /tmp/ttyd && mkdir build && cd build && cmake .. && make && make install && " +
"nohup ttyd -p 7681 -W bash > /dev/null 2>&1 & " +
"while true; do sleep 60; done",
}
// 2. 调用 modules 层 k8s Agent执行创建资源
opts := &devcontainer_dto.CreateDevcontainerOptions{
CreateOptions: metav1.CreateOptions{},
@@ -142,15 +229,12 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
* USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
* root 21826 0.0 0.0 2520 408 ? Ss 18:36 0:00 sleep infinity
*/
CommandList: []string{
"/bin/bash",
"-c",
"rm -f /etc/ssh/ssh_host_* && ssh-keygen -A && service ssh start && while true; do sleep 60; done",
},
CommandList: command,
ContainerPort: 22,
ServicePort: 22,
SSHPublicKeyList: newDevContainer.SSHPublicKeyList,
GitRepositoryURL: newDevContainer.GitRepositoryURL,
ExtraPorts: extraPorts, // 添加额外端口配置
}
// 2. 创建成功,取回集群中的 DevContainer
@@ -159,9 +243,204 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
return err
}
// 3. 将分配的 NodePort Service 写回 newDevcontainer供写入数据库进行下一步操作
newDevContainer.DevcontainerPort = devcontainerInCluster.Status.NodePortAssigned
// // 3. 将分配的 NodePort Service 写回 newDevcontainer供写入数据库进行下一步操作
// newDevContainer.DevcontainerPort = devcontainerInCluster.Status.NodePortAssigned
// 3. 处理 NodePort - 检查是否为0尚未分配
nodePort := devcontainerInCluster.Status.NodePortAssigned
if nodePort == 0 {
log.Info("NodePort not yet assigned by K8s controller, setting temporary port")
// 将端口设为0数据库中记录特殊标记
newDevContainer.DevcontainerPort = 0
// 记录容器已创建,但端口待更新
log.Info("DevContainer created in cluster - Name: %s, NodePort: pending assignment",
devcontainerInCluster.Name)
// 启动异步任务来更新端口
go updateNodePortAsync(devcontainerInCluster.Name,
setting.Devcontainer.Namespace,
newDevContainer.UserId,
newDevContainer.RepoId)
} else {
// 端口已分配,直接使用
newDevContainer.DevcontainerPort = nodePort
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
devcontainerInCluster.Name, nodePort)
}
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
devcontainerInCluster.Name,
devcontainerInCluster.Status.NodePortAssigned)
// 4. 层层返回 nil自动提交数据库事务完成 DevContainer 创建
return nil
}
// AssignDevcontainerRestart2K8sOperator 将 DevContainer 重启任务派遣至 K8s 控制器
func AssignDevcontainerRestart2K8sOperator(ctx *context.Context, opts *RepoDevContainer) error {
// 1. 获取 Dynamic Client
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctx)
if err != nil {
log.Error("Failed to get Kubernetes client: %v", err)
return err
}
// 2. 通过打补丁方式实现重启 - 更新注解以触发控制器重新部署 Pod
// 创建补丁,添加或更新 restartedAt 注解,同时确保 desiredReplicas 为 1
patchData := fmt.Sprintf(`{
"metadata": {
"annotations": {
"devstar.io/restartedAt": "%s",
"devstar.io/desiredReplicas": "1"
}
}
}`, time.Now().Format(time.RFC3339))
// 应用补丁到 DevcontainerApp CRD
_, err = client.Resource(k8sGroupVersionResource).
Namespace(setting.Devcontainer.Namespace).
Patch(*ctx, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
log.Error("Failed to patch DevcontainerApp for restart: %v", err)
return devcontainer_errors.ErrOperateDevcontainer{
Action: fmt.Sprintf("restart k8s devcontainer '%s'", opts.DevContainerName),
Message: err.Error(),
}
}
// 记录重启操作日志
log.Info("DevContainer restarted: %s", opts.DevContainerName)
// 将重启操作记录到数据库
dbEngine := db.GetEngine(*ctx)
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: fmt.Sprintf("Restarting K8s DevContainer %s", opts.DevContainerName),
Status: "success",
UserId: opts.UserId,
RepoId: opts.RepoId,
Command: "Restart DevContainer",
ListId: 0,
})
if err != nil {
log.Warn("Failed to insert restart record: %v", err)
}
return nil
}
// AssignDevcontainerStop2K8sOperator 将 DevContainer 停止任务派遣至 K8s 控制器
func AssignDevcontainerStop2K8sOperator(ctx *context.Context, opts *RepoDevContainer) error {
// 1. 获取 Dynamic Client
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctx)
if err != nil {
log.Error("Failed to get Kubernetes client: %v", err)
return err
}
// 2. 通过打补丁方式实现停止 - 添加停止注解
// 创建补丁,添加或更新 stopped 和 desiredReplicas 注解
patchData := fmt.Sprintf(`{
"metadata": {
"annotations": {
"devstar.io/stoppedAt": "%s",
"devstar.io/desiredReplicas": "0"
}
}
}`, time.Now().Format(time.RFC3339))
// 应用补丁到 DevcontainerApp CRD
_, err = client.Resource(k8sGroupVersionResource).
Namespace(setting.Devcontainer.Namespace).
Patch(*ctx, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
log.Error("Failed to patch DevcontainerApp for stop: %v", err)
return devcontainer_errors.ErrOperateDevcontainer{
Action: fmt.Sprintf("stop k8s devcontainer '%s'", opts.DevContainerName),
Message: err.Error(),
}
}
// 记录停止操作日志
log.Info("DevContainer stopped: %s", opts.DevContainerName)
// 将停止操作记录到数据库
dbEngine := db.GetEngine(*ctx)
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_models.DevcontainerOutput{
Output: fmt.Sprintf("Stopping K8s DevContainer %s", opts.DevContainerName),
Status: "success",
UserId: opts.UserId,
RepoId: opts.RepoId,
Command: "Stop DevContainer",
ListId: 0,
})
if err != nil {
// 只记录错误,不影响主流程返回结果
log.Warn("Failed to insert stop record: %v", err)
}
return nil
}
// 异步更新 NodePort 的辅助函数
func updateNodePortAsync(containerName string, namespace string, userId, repoId int64) {
// 等待K8s控制器完成端口分配
time.Sleep(20 * time.Second)
// 创建新的上下文和客户端
ctx := context.Background()
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(&ctx)
if err != nil {
log.Error("Failed to get K8s client in async updater: %v", err)
return
}
// 尝试最多5次获取端口
for i := 0; i < 5; i++ {
getOpts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
GetOptions: metav1.GetOptions{},
Name: containerName,
Namespace: namespace,
Wait: false,
}
devcontainer, err := devcontainer_k8s_agent_module.GetDevcontainer(&ctx, client, getOpts)
if err == nil && devcontainer != nil && devcontainer.Status.NodePortAssigned > 0 {
// 获取到正确的端口,更新数据库
realNodePort := devcontainer.Status.NodePortAssigned
// 记录 ttyd 端口信息到日志
if len(devcontainer.Status.ExtraPortsAssigned) > 0 {
for _, portInfo := range devcontainer.Status.ExtraPortsAssigned {
log.Info("Found extra port for %s: name=%s, nodePort=%d, containerPort=%d",
containerName, portInfo.Name, portInfo.NodePort, portInfo.ContainerPort)
}
}
log.Info("Found real NodePort %d for container %s, updating database record",
realNodePort, containerName)
engine := db.GetEngine(ctx)
_, err := engine.Table("devcontainer").
Where("user_id = ? AND repo_id = ?", userId, repoId).
Update(map[string]interface{}{
"devcontainer_port": realNodePort,
})
if err != nil {
log.Error("Failed to update NodePort in database: %v", err)
} else {
log.Info("Successfully updated NodePort in database to %d", realNodePort)
}
return
}
time.Sleep(5 * time.Second)
}
log.Warn("Failed to retrieve real NodePort after multiple attempts")
}