Skip to content

Commit

Permalink
Merge pull request kosmos-io#792 from OrangeBao/main
Browse files Browse the repository at this point in the history
feat: support lvscare for kubenest
  • Loading branch information
duanmengkk authored Jan 3, 2025
2 parents cb88970 + 838026b commit 8f572f9
Show file tree
Hide file tree
Showing 4 changed files with 186 additions and 19 deletions.
32 changes: 29 additions & 3 deletions hack/k8s-in-k8s/generate_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ PATH_KUBERNETES=$(GetDirectory $PATH_KUBERNETES_PKI)
HOST_CORE_DNS=$(GetKubeDnsClusterIP)

DOCKER_IMAGE_NGINX="registry.paas/cmss/nginx:1.21.4"
DOCKER_IMAGE_LVSCARE="registry.paas/cmss/lvscare:1.0.0"

master_lables=("master", "control-plane")

Expand Down Expand Up @@ -157,10 +158,35 @@ KUBELET_CONF_TIMEOUT=30
# load balance
DOCKER_IMAGE_NGINX=$DOCKER_IMAGE_NGINX
DOCKER_IMAGE_LVSCARE=$DOCKER_IMAGE_LVSCARE
SERVERS=($SERVERS)
LOCAL_PORT="6443"
LOCAL_IP="127.0.0.1" # [::1]
USE_NGINX=true
# Proxy Configuration Options
# Specify the proxy server to be used for traffic management or load balancing.
# Available options for USE_PROXY:
# - "NGINX" : Use NGINX as the proxy server.
# - "LVSCARE" : Use LVSCARE for load balancing (based on IPVS).
# - "NONE" : No proxy server will be used.
# Note: When USE_PROXY is set to "NONE", no proxy service will be configured.
USE_PROXY="LVSCARE" # Current proxy setting: LVSCARE for load balancing.
# Proxy Service Port Configuration
# LOCAL_PORT specifies the port on which the proxy service listens.
# Example:
# - For Kubernetes setups, this is typically the API server port.
LOCAL_PORT="6443" # Proxy service listening port (default: 6443 for Kubernetes API).
# Proxy Address Configuration
# LOCAL_IP specifies the address of the proxy service.
# - When USE_PROXY is set to "NGINX":
# - Use LOCAL_IP="127.0.0.1" (IPv4) or LOCAL_IP="[::1]" (IPv6 loopback).
# - When USE_PROXY is set to "LVSCARE":
# - Use LOCAL_IP as the VIP (e.g., "192.0.0.2") for LVSCARE load balancing.
# - Ensure this address is added to the "excludeCIDRs" list in the kube-proxy configuration file
# to avoid routing conflicts.
LOCAL_IP="192.0.0.2" # LVSCARE setup: Proxy address and VIP for load balancing.
CRI_SOCKET=$CRI_SOCKET
function GenerateKubeadmConfig() {
Expand Down
156 changes: 143 additions & 13 deletions hack/k8s-in-k8s/kubelet_node_helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,8 @@ function get_ca_certificate() {

# verify the kubeconfig data is not empty
if [ -z "$kubeconfig_data" ]; then
echo "Failed to extract certificate-authority-data."
return 1
echo "Failed to extract certificate-authority-data."
return 1
fi

# Base64 decoded and written to a file
Expand Down Expand Up @@ -284,6 +284,9 @@ function revert() {

echo "NONONO use kubeadm to join node to host"
get_ca_certificate $JOIN_HOST
if [ $? -ne 0 ]; then
exit 1
fi
create_kubelet_bootstrap_config $JOIN_HOST $JOIN_TOKEN
if [ -f "${PATH_FILE_TMP}/kubeadm-flags.env.origin" ]; then
cp "${PATH_FILE_TMP}/kubeadm-flags.env.origin" "${PATH_KUBELET_LIB}" && \
Expand Down Expand Up @@ -457,16 +460,31 @@ function is_ipv6() {
fi
}

function install_lb() {
if [ -z "$USE_NGINX" ]; then
export USE_NGINX=false
fi
function wait_api_server_proxy_ready() {
local retries=0
local max_retries=10
local sleep_duration=6

if [ "$USE_NGINX" = false ]; then
exit 0
fi
while true; do
response=$(curl -k --connect-timeout 5 --max-time 10 https://${LOCAL_IP}:${LOCAL_PORT}/healthz)

if [ "$response" == "ok" ]; then
echo "apiserver proxy is ready!"
return 0
else
retries=$((retries + 1))
echo "apiserver proxy is not ready. Retrying(${retries}/${max_retries})..."
if [ "$retries" -ge "$max_retries" ]; then
echo "Max retries reached. apiserver proxy did not become ready."
return 1
fi
sleep $sleep_duration
fi
done
}

echo "exec(1/6): get port of apiserver...."
function install_nginx_lb() {
echo "exec(1/7): get port of apiserver...."

PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}')

Expand All @@ -483,7 +501,7 @@ function install_lb() {
fi

# Start generating nginx.conf
echo "exec(2/6): generate nginx.conf...."
echo "exec(2/7): generate nginx.conf...."
cat <<EOL > "$PATH_FILE_TMP/nginx.conf"
error_log stderr notice;
worker_processes 1;
Expand Down Expand Up @@ -520,23 +538,135 @@ EOL
}
EOL

echo "exec(3/6): create static pod"
echo "exec(3/7): create static pod"
GenerateStaticNginxProxy true


echo "exec(4/6): restart static pod"
echo "exec(4/7): restart static pod"
mv "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml" "${PATH_KUBERNETES}/nginx-proxy.yaml"
sleep 2
mv "${PATH_KUBERNETES}/nginx-proxy.yaml" "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml"

echo "exec(5/7): wati nginx ready"
if wait_api_server_proxy_ready; then
echo "nginx is ready"
else
echo "nginx is not ready"
exit 1
fi

echo "exec(6/7): update kubelet.conf"
cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak"
sed -i "s|server: .*|server: https://${LOCAL_IP}:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"

echo "exec(7/7): restart kubelet"
systemctl restart kubelet
}

function install_lvscare_lb() {
echo "exec(1/6): get port of apiserver...."

PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}')

if [ -z "$PORT" ]; then
echo "can not get port"
exit 1
else
echo "port is $PORT"
fi

# Start generating kube-lvscare.yaml
echo "exec(2/6): generate kube-lvscare.yaml...."

cat <<EOL > $PATH_KUBERNETES/manifests/kube-lvscare.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
app: kube-lvscare
name: kube-lvscare
namespace: kube-system
spec:
containers:
- args:
- care
- --vs
- ${LOCAL_IP}:${LOCAL_PORT}
- --health-path
- /healthz
- --health-schem
- https
EOL

# Loop through the array and append each server to the kube-lvscare.yaml file
for SERVER in "${SERVERS[@]}"; do
if is_ipv6 "$SERVER"; then
echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
echo " - [$SERVER]:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
else
echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
echo " - $SERVER:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
fi
done

# Continue writing the rest of the kube-lvscare.yaml file
cat <<EOL >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
command:
- /usr/bin/lvscare
image: $DOCKER_IMAGE_LVSCARE
imagePullPolicy: Always
name: kube-lvscare
resources: {}
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
status: {}
EOL

echo "exec(3/6): restart static pod"
mv "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml" "${PATH_KUBERNETES}/kube-lvscare.yaml"
sleep 2
mv "${PATH_KUBERNETES}/kube-lvscare.yaml" "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml"

echo "exec(4/6): wait lvscare ready"
if wait_api_server_proxy_ready; then
echo "lvscare is ready"
else
echo "lvscare is not ready"
exit 1
fi

echo "exec(5/6): update kubelet.conf"
cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak"
sed -i "s|server: .*|server: https://${LOCAL_IP}:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"
sed -i 's|certificate-authority-data: .*|insecure-skip-tls-verify: true|' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"

echo "exec(6/6): restart kubelet"
systemctl restart kubelet
}

function install_lb() {
if [ -z "$USE_PROXY" ]; then
export USE_PROXY="LVSCARE"
fi

if [ "$USE_PROXY" = "NGINX" ]; then
install_nginx_lb
elif [ "$USE_PROXY" = "LVSCARE" ]; then
install_lvscare_lb
else
exit 0
fi
}

# See how we were called.
case "$1" in
unjoin)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,17 @@ func (p *PrefixedLogger) Infof(format string, args ...interface{}) {
}
}

func (p *PrefixedLogger) Warn(args ...interface{}) {
if p.level.Enabled() {
klog.WarningDepth(1, append([]interface{}{p.prefix}, args...)...)
}
}
func (p *PrefixedLogger) Warnf(format string, args ...interface{}) {
if p.level.Enabled() {
klog.WarningDepth(1, fmt.Sprintf(p.prefix+format, args...))
}
}

func (p *PrefixedLogger) Error(args ...interface{}) {
klog.ErrorDepth(1, append([]interface{}{p.prefix}, args...)...)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,18 @@ func RunWithRetry(ctx context.Context, task task.Task, opt task.TaskOpt, preArgs
break
}
waitTime := 3 * (i + 1)
opt.Loger().Infof("work flow retry %d after %ds, task name: %s, err: %s", i, waitTime, task.Name, err)
opt.Loger().Warnf("work flow retry %d after %ds, task name: %s, err: %s", i, waitTime, task.Name, err)
time.Sleep(time.Duration(waitTime) * time.Second)
} else {
break
}
}
if err != nil {
if task.ErrorIgnore {
opt.Loger().Infof("work flow ignore err, task name: %s, err: %s", task.Name, err)
opt.Loger().Warnf("work flow ignore err, task name: %s, err: %s", task.Name, err)
return nil, nil
}
opt.Loger().Infof("work flow interrupt, task name: %s, err: %s", task.Name, err)
opt.Loger().Warnf("work flow interrupt, task name: %s, err: %s", task.Name, err)
return nil, err
}
return args, nil
Expand Down

0 comments on commit 8f572f9

Please sign in to comment.