devcontainer /root /etc/ssh 改为持久化
修改open with vscode返回连接
This commit is contained in:
@@ -567,6 +567,13 @@ func Get_IDE_TerminalURL(ctx *gitea_context.Context, devcontainer *RepoDevContai
|
||||
return "", fmt.Errorf("不支持的 DevContainer Agent 类型: %s", setting.Devcontainer.Agent)
|
||||
}
|
||||
|
||||
// 加载配置文件
|
||||
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
|
||||
if err != nil {
|
||||
log.Error("Get_IDE_TerminalURL: 加载配置文件失败: %v", err)
|
||||
return "", err
|
||||
}
|
||||
log.Info("Get_IDE_TerminalURL: 配置文件加载成功, ROOT_URL=%s", cfg.Section("server").Key("ROOT_URL").Value())
|
||||
// 构建并返回 URL
|
||||
return "://mengning.devstar/" +
|
||||
"openProject?host=" + devcontainer.RepoName +
|
||||
@@ -575,13 +582,128 @@ func Get_IDE_TerminalURL(ctx *gitea_context.Context, devcontainer *RepoDevContai
|
||||
"&username=" + devcontainer.DevContainerUsername +
|
||||
"&path=" + devcontainer.DevContainerWorkDir +
|
||||
"&access_token=" + access_token +
|
||||
"&devstar_username=" + devcontainer.RepoOwnerName, nil
|
||||
"&devstar_username=" + devcontainer.RepoOwnerName +
|
||||
"&devstar_domain=" + cfg.Section("server").Key("ROOT_URL").Value(), nil
|
||||
}
|
||||
|
||||
func AddPublicKeyToAllRunningDevContainer(ctx context.Context, user *user_model.User, publicKey string) error {
|
||||
switch setting.Devcontainer.Agent {
|
||||
case setting.KUBERNETES, "k8s":
|
||||
return fmt.Errorf("unsupported agent")
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 开始为用户 %s (ID=%d) 的所有运行中容器添加公钥",
|
||||
user.Name, user.ID)
|
||||
|
||||
// 1. 获取用户的所有 DevContainer
|
||||
opts := &SearchUserDevcontainerListItemVoOptions{
|
||||
Actor: user,
|
||||
}
|
||||
userDevcontainersVO, err := GetUserDevcontainersList(ctx, opts)
|
||||
if err != nil {
|
||||
log.Error("AddPublicKeyToAllRunningDevContainer: 获取用户容器列表失败: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
repoDevContainerList := userDevcontainersVO.DevContainers
|
||||
if len(repoDevContainerList) == 0 {
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 用户 %s 没有任何 DevContainer", user.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 找到 %d 个 DevContainer", len(repoDevContainerList))
|
||||
|
||||
// 2. 获取 K8s 客户端
|
||||
k8sClient, err := devcontainer_k8s_agent_module.GetKubernetesClient(&ctx)
|
||||
if err != nil {
|
||||
log.Error("AddPublicKeyToAllRunningDevContainer: 获取 K8s 客户端失败: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. 获取标准 K8s 客户端用于执行命令
|
||||
stdClient, err := getStandardKubernetesClient()
|
||||
if err != nil {
|
||||
log.Error("AddPublicKeyToAllRunningDevContainer: 获取标准 K8s 客户端失败: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 4. 遍历所有容器,检查状态并添加公钥
|
||||
successCount := 0
|
||||
errorCount := 0
|
||||
|
||||
for _, repoDevContainer := range repoDevContainerList {
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 处理容器 %s", repoDevContainer.DevContainerName)
|
||||
|
||||
// 4.1 检查 DevContainer 是否运行
|
||||
getOpts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
|
||||
GetOptions: metav1.GetOptions{},
|
||||
Name: repoDevContainer.DevContainerName,
|
||||
Namespace: setting.Devcontainer.Namespace,
|
||||
Wait: false,
|
||||
}
|
||||
|
||||
devcontainerApp, err := devcontainer_k8s_agent_module.GetDevcontainer(&ctx, k8sClient, getOpts)
|
||||
if err != nil {
|
||||
log.Error("AddPublicKeyToAllRunningDevContainer: 获取容器 %s 状态失败: %v",
|
||||
repoDevContainer.DevContainerName, err)
|
||||
errorCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// 4.2 检查容器是否就绪
|
||||
if !devcontainerApp.Status.Ready {
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 容器 %s 未就绪,跳过",
|
||||
repoDevContainer.DevContainerName)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 容器 %s 就绪,开始添加公钥",
|
||||
repoDevContainer.DevContainerName)
|
||||
|
||||
// 4.3 构建添加公钥的命令
|
||||
// 使用更安全的方式添加公钥,避免重复添加
|
||||
addKeyCommand := fmt.Sprintf(`
|
||||
# 确保 .ssh 目录存在
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# 检查公钥是否已存在
|
||||
if ! grep -Fxq "%s" ~/.ssh/authorized_keys 2>/dev/null; then
|
||||
echo "%s" >> ~/.ssh/authorized_keys
|
||||
chmod 600 ~/.ssh/authorized_keys
|
||||
echo "Public key added successfully"
|
||||
else
|
||||
echo "Public key already exists"
|
||||
fi
|
||||
|
||||
# 验证文件内容
|
||||
wc -l ~/.ssh/authorized_keys
|
||||
`, publicKey, publicKey)
|
||||
|
||||
// 4.4 在容器中执行命令
|
||||
err = executeCommandInK8sPod(&ctx, stdClient,
|
||||
setting.Devcontainer.Namespace,
|
||||
repoDevContainer.DevContainerName, // 传递 DevContainer 名称而不是 Pod 名称
|
||||
repoDevContainer.DevContainerName, // 容器名通常与 DevContainer 名相同
|
||||
[]string{"/bin/bash", "-c", addKeyCommand})
|
||||
|
||||
if err != nil {
|
||||
log.Error("AddPublicKeyToAllRunningDevContainer: 在容器 %s 中执行添加公钥命令失败: %v",
|
||||
repoDevContainer.DevContainerName, err)
|
||||
errorCount++
|
||||
} else {
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 成功为容器 %s 添加公钥",
|
||||
repoDevContainer.DevContainerName)
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("AddPublicKeyToAllRunningDevContainer: 完成处理 - 成功: %d, 失败: %d",
|
||||
successCount, errorCount)
|
||||
|
||||
if errorCount > 0 && successCount == 0 {
|
||||
return fmt.Errorf("所有容器添加公钥都失败了,错误数量: %d", errorCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case setting.DOCKER:
|
||||
cli, err := docker.CreateDockerClient(&ctx)
|
||||
if err != nil {
|
||||
@@ -623,8 +745,9 @@ func AddPublicKeyToAllRunningDevContainer(ctx context.Context, user *user_model.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown agent")
|
||||
return fmt.Errorf("unknown agent: %s", setting.Devcontainer.Agent)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/services/devcontainer/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -24,6 +26,8 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
var k8sGroupVersionResource = schema.GroupVersionResource{
|
||||
@@ -242,12 +246,24 @@ func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContaine
|
||||
command := []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"rm -f /etc/ssh/ssh_host_* && ssh-keygen -A && service ssh start && " +
|
||||
"export DEBIAN_FRONTEND=noninteractive && " +
|
||||
"apt-get update -y && " +
|
||||
"apt-get install -y build-essential cmake git libjson-c-dev libwebsockets-dev && " +
|
||||
"git clone https://github.com/tsl0922/ttyd.git /tmp/ttyd && " +
|
||||
"cd /tmp/ttyd && mkdir build && cd build && cmake .. && make && make install && " +
|
||||
"nohup ttyd -p 7681 -W bash > /dev/null 2>&1 & " +
|
||||
"apt-get install -y ssh && " +
|
||||
// 改为条件生成:只有在密钥不存在时才生成
|
||||
"if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then " +
|
||||
" echo 'Generating SSH host keys...' && " +
|
||||
" ssh-keygen -A && " +
|
||||
" echo 'SSH host keys generated' ; " +
|
||||
"else " +
|
||||
" echo 'SSH host keys already exist' ; " +
|
||||
"fi && " +
|
||||
"mkdir -p /var/run/sshd && " +
|
||||
"/usr/sbin/sshd && " +
|
||||
"if [ -f /ttyd-shared/ttyd ]; then " +
|
||||
"mkdir -p /data/workspace && " +
|
||||
"cd /data/workspace && " +
|
||||
"/ttyd-shared/ttyd -p 7681 -i 0.0.0.0 --writable bash > /tmp/ttyd.log 2>&1 & " +
|
||||
"fi && " +
|
||||
"while true; do sleep 60; done",
|
||||
}
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Command includes ttyd installation and startup")
|
||||
@@ -642,3 +658,90 @@ func getStandardKubernetesClient() (*kubernetes.Clientset, error) {
|
||||
|
||||
return stdClient, nil
|
||||
}
|
||||
|
||||
// executeCommandInK8sPod 在 K8s Pod 中执行命令的辅助函数
|
||||
func executeCommandInK8sPod(ctx *context.Context, client *kubernetes.Clientset, namespace, devcontainerName, containerName string, command []string) error {
|
||||
log.Info("executeCommandInK8sPod: 开始为 DevContainer %s 查找对应的 Pod", devcontainerName)
|
||||
|
||||
// 1. 首先根据标签选择器查找对应的 Pod
|
||||
labelSelector := fmt.Sprintf("app=%s", devcontainerName)
|
||||
pods, err := client.CoreV1().Pods(namespace).List(*ctx, metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("executeCommandInK8sPod: 查找 Pod 失败: %v", err)
|
||||
return fmt.Errorf("查找 Pod 失败: %v", err)
|
||||
}
|
||||
|
||||
if len(pods.Items) == 0 {
|
||||
log.Error("executeCommandInK8sPod: 未找到 DevContainer %s 对应的 Pod", devcontainerName)
|
||||
return fmt.Errorf("未找到 DevContainer %s 对应的 Pod", devcontainerName)
|
||||
}
|
||||
|
||||
// 2. 找到第一个运行中的 Pod
|
||||
var targetPod *v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := &pods.Items[i]
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
targetPod = pod
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if targetPod == nil {
|
||||
log.Error("executeCommandInK8sPod: DevContainer %s 没有运行中的 Pod", devcontainerName)
|
||||
return fmt.Errorf("DevContainer %s 没有运行中的 Pod", devcontainerName)
|
||||
}
|
||||
|
||||
podName := targetPod.Name
|
||||
log.Info("executeCommandInK8sPod: 找到运行中的 Pod: %s, 在容器 %s 中执行命令",
|
||||
podName, containerName)
|
||||
|
||||
// 3. 执行命令
|
||||
req := client.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
SubResource("exec").
|
||||
Param("container", containerName)
|
||||
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: command,
|
||||
Stdin: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
TTY: false,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
// 获取 executor
|
||||
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
|
||||
if err != nil {
|
||||
// 如果集群外配置失败,尝试集群内配置
|
||||
config, err = rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("获取 K8s 配置失败: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
executor, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("创建命令执行器失败: %v", err)
|
||||
}
|
||||
|
||||
// 执行命令
|
||||
var stdout, stderr bytes.Buffer
|
||||
err = executor.StreamWithContext(*ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("executeCommandInK8sPod: 命令执行失败: %v, stderr: %s",
|
||||
err, stderr.String())
|
||||
return fmt.Errorf("命令执行失败: %v, stderr: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
log.Info("executeCommandInK8sPod: 命令执行成功, stdout: %s", stdout.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user