Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] main from kosmos-io:main #19

Merged
merged 4 commits into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions deploy/crds/kosmos.io_kubenestconfigurations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ spec:
type: string
etcdUnitSize:
type: string
externalPort:
format: int32
type: integer
forceDestroy:
description: todo Group according to the parameters of apiserver,
etcd, coredns, etc. ForceDestroy indicates whether to force destroy
Expand All @@ -77,6 +80,9 @@ spec:
type: string
type: array
type: object
useNodeLocalDNS:
default: false
type: boolean
useTenantDNS:
default: false
type: boolean
Expand Down
5 changes: 4 additions & 1 deletion deploy/crds/kosmos.io_virtualclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ spec:
etcdUnitSize:
type: string
externalPort:
type: integer
format: int32
type: integer
forceDestroy:
description: todo Group according to the parameters of apiserver,
etcd, coredns, etc. ForceDestroy indicates whether to force
Expand All @@ -98,6 +98,9 @@ spec:
type: string
type: array
type: object
useNodeLocalDNS:
default: false
type: boolean
useTenantDNS:
default: false
type: boolean
Expand Down
32 changes: 29 additions & 3 deletions hack/k8s-in-k8s/generate_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ PATH_KUBERNETES=$(GetDirectory $PATH_KUBERNETES_PKI)
HOST_CORE_DNS=$(GetKubeDnsClusterIP)

DOCKER_IMAGE_NGINX="registry.paas/cmss/nginx:1.21.4"
DOCKER_IMAGE_LVSCARE="registry.paas/cmss/lvscare:1.0.0"

master_lables=("master", "control-plane")

Expand Down Expand Up @@ -157,10 +158,35 @@ KUBELET_CONF_TIMEOUT=30

# load balance
DOCKER_IMAGE_NGINX=$DOCKER_IMAGE_NGINX
DOCKER_IMAGE_LVSCARE=$DOCKER_IMAGE_LVSCARE
SERVERS=($SERVERS)
LOCAL_PORT="6443"
LOCAL_IP="127.0.0.1" # [::1]
USE_NGINX=true

# Proxy Configuration Options
# Specify the proxy server to be used for traffic management or load balancing.
# Available options for USE_PROXY:
# - "NGINX" : Use NGINX as the proxy server.
# - "LVSCARE" : Use LVSCARE for load balancing (based on IPVS).
# - "NONE" : No proxy server will be used.
# Note: When USE_PROXY is set to "NONE", no proxy service will be configured.
USE_PROXY="LVSCARE" # Current proxy setting: LVSCARE for load balancing.

# Proxy Service Port Configuration
# LOCAL_PORT specifies the port on which the proxy service listens.
# Example:
# - For Kubernetes setups, this is typically the API server port.
LOCAL_PORT="6443" # Proxy service listening port (default: 6443 for Kubernetes API).

# Proxy Address Configuration
# LOCAL_IP specifies the address of the proxy service.
# - When USE_PROXY is set to "NGINX":
# - Use LOCAL_IP="127.0.0.1" (IPv4) or LOCAL_IP="[::1]" (IPv6 loopback).
# - When USE_PROXY is set to "LVSCARE":
# - Use LOCAL_IP as the VIP (e.g., "192.0.0.2") for LVSCARE load balancing.
# - Ensure this address is added to the "excludeCIDRs" list in the kube-proxy configuration file
# to avoid routing conflicts.
LOCAL_IP="192.0.0.2" # LVSCARE setup: Proxy address and VIP for load balancing.


CRI_SOCKET=$CRI_SOCKET

function GenerateKubeadmConfig() {
Expand Down
166 changes: 152 additions & 14 deletions hack/k8s-in-k8s/kubelet_node_helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ LOG_NAME=${2:-kubelet}
JOIN_HOST=$2
JOIN_TOKEN=$3
JOIN_CA_HASH=$4
NODE_LOCAL_DNS_ADDRESS=$3


function cri_runtime_clean() {
Expand Down Expand Up @@ -202,8 +203,8 @@ function get_ca_certificate() {

# verify the kubeconfig data is not empty
if [ -z "$kubeconfig_data" ]; then
echo "Failed to extract certificate-authority-data."
return 1
echo "Failed to extract certificate-authority-data."
return 1
fi

# Base64 decoded and written to a file
Expand Down Expand Up @@ -284,6 +285,9 @@ function revert() {

echo "NONONO use kubeadm to join node to host"
get_ca_certificate $JOIN_HOST
if [ $? -ne 0 ]; then
exit 1
fi
create_kubelet_bootstrap_config $JOIN_HOST $JOIN_TOKEN
if [ -f "${PATH_FILE_TMP}/kubeadm-flags.env.origin" ]; then
cp "${PATH_FILE_TMP}/kubeadm-flags.env.origin" "${PATH_KUBELET_LIB}" && \
Expand Down Expand Up @@ -351,7 +355,14 @@ function join() {
exit 1
fi
echo "exec(4/8): set core dns address...."
sed -e "s|__DNS_ADDRESS__|$DNS_ADDRESS|g" -e "w ${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}" "$PATH_FILE_TMP"/"$KUBELET_CONFIG_NAME"
if [ -n "$NODE_LOCAL_DNS_ADDRESS" ]; then
sed -e "/__DNS_ADDRESS__/i - ${NODE_LOCAL_DNS_ADDRESS}" \
-e "s|__DNS_ADDRESS__|${DNS_ADDRESS}|g" \
"$PATH_FILE_TMP/$KUBELET_CONFIG_NAME" \
> "${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}"
else
sed -e "s|__DNS_ADDRESS__|$DNS_ADDRESS|g" -e "w ${PATH_KUBELET_CONF}/${KUBELET_CONFIG_NAME}" "$PATH_FILE_TMP"/"$KUBELET_CONFIG_NAME"
fi
if [ $? -ne 0 ]; then
exit 1
fi
Expand Down Expand Up @@ -457,16 +468,31 @@ function is_ipv6() {
fi
}

function install_lb() {
if [ -z "$USE_NGINX" ]; then
export USE_NGINX=false
fi
function wait_api_server_proxy_ready() {
local retries=0
local max_retries=10
local sleep_duration=6

if [ "$USE_NGINX" = false ]; then
exit 0
fi
while true; do
response=$(curl -k --connect-timeout 5 --max-time 10 https://${LOCAL_IP}:${LOCAL_PORT}/healthz)

if [ "$response" == "ok" ]; then
echo "apiserver proxy is ready!"
return 0
else
retries=$((retries + 1))
echo "apiserver proxy is not ready. Retrying(${retries}/${max_retries})..."
if [ "$retries" -ge "$max_retries" ]; then
echo "Max retries reached. apiserver proxy did not become ready."
return 1
fi
sleep $sleep_duration
fi
done
}

echo "exec(1/6): get port of apiserver...."
function install_nginx_lb() {
echo "exec(1/7): get port of apiserver...."

PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}')

Expand All @@ -483,7 +509,7 @@ function install_lb() {
fi

# Start generating nginx.conf
echo "exec(2/6): generate nginx.conf...."
echo "exec(2/7): generate nginx.conf...."
cat <<EOL > "$PATH_FILE_TMP/nginx.conf"
error_log stderr notice;
worker_processes 1;
Expand Down Expand Up @@ -520,23 +546,135 @@ EOL
}
EOL

echo "exec(3/6): create static pod"
echo "exec(3/7): create static pod"
GenerateStaticNginxProxy true


echo "exec(4/6): restart static pod"
echo "exec(4/7): restart static pod"
mv "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml" "${PATH_KUBERNETES}/nginx-proxy.yaml"
sleep 2
mv "${PATH_KUBERNETES}/nginx-proxy.yaml" "${PATH_KUBERNETES}/manifests/nginx-proxy.yaml"

echo "exec(5/7): wati nginx ready"
if wait_api_server_proxy_ready; then
echo "nginx is ready"
else
echo "nginx is not ready"
exit 1
fi

echo "exec(6/7): update kubelet.conf"
cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak"
sed -i "s|server: .*|server: https://${LOCAL_IP}:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"

echo "exec(7/7): restart kubelet"
systemctl restart kubelet
}

function install_lvscare_lb() {
echo "exec(1/6): get port of apiserver...."

PORT=$(grep 'server:' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" | awk -F '[:/]' '{print $NF}')

if [ -z "$PORT" ]; then
echo "can not get port"
exit 1
else
echo "port is $PORT"
fi

# Start generating kube-lvscare.yaml
echo "exec(2/6): generate kube-lvscare.yaml...."

cat <<EOL > $PATH_KUBERNETES/manifests/kube-lvscare.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
app: kube-lvscare
name: kube-lvscare
namespace: kube-system
spec:
containers:
- args:
- care
- --vs
- ${LOCAL_IP}:${LOCAL_PORT}
- --health-path
- /healthz
- --health-schem
- https
EOL

# Loop through the array and append each server to the kube-lvscare.yaml file
for SERVER in "${SERVERS[@]}"; do
if is_ipv6 "$SERVER"; then
echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
echo " - [$SERVER]:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
else
echo " - --rs" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
echo " - $SERVER:$PORT" >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
fi
done

# Continue writing the rest of the kube-lvscare.yaml file
cat <<EOL >> "$PATH_KUBERNETES/manifests/kube-lvscare.yaml"
command:
- /usr/bin/lvscare
image: $DOCKER_IMAGE_LVSCARE
imagePullPolicy: Always
name: kube-lvscare
resources: {}
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /lib/modules
name: lib-modules
status: {}
EOL

echo "exec(3/6): restart static pod"
mv "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml" "${PATH_KUBERNETES}/kube-lvscare.yaml"
sleep 2
mv "${PATH_KUBERNETES}/kube-lvscare.yaml" "${PATH_KUBERNETES}/manifests/kube-lvscare.yaml"

echo "exec(4/6): wait lvscare ready"
if wait_api_server_proxy_ready; then
echo "lvscare is ready"
else
echo "lvscare is not ready"
exit 1
fi

echo "exec(5/6): update kubelet.conf"
cp "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}.bak"
sed -i "s|server: .*|server: https://${LOCAL_IP}:${LOCAL_PORT}|" "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"
sed -i 's|certificate-authority-data: .*|insecure-skip-tls-verify: true|' "${PATH_KUBERNETES}/${KUBELET_KUBE_CONFIG_NAME}"

echo "exec(6/6): restart kubelet"
systemctl restart kubelet
}

function install_lb() {
if [ -z "$USE_PROXY" ]; then
export USE_PROXY="LVSCARE"
fi

if [ "$USE_PROXY" = "NGINX" ]; then
install_nginx_lb
elif [ "$USE_PROXY" = "LVSCARE" ]; then
install_lvscare_lb
else
exit 0
fi
}

# See how we were called.
case "$1" in
unjoin)
Expand Down
3 changes: 3 additions & 0 deletions pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,9 @@ type KubeInKubeConfig struct {
UseTenantDNS bool `yaml:"useTenantDNS" json:"useTenantDNS,omitempty"`
// +optional
ExternalPort int32 `json:"externalPort,omitempty"`
// +kubebuilder:default=false
// +optional
UseNodeLocalDNS bool `yaml:"useNodeLocalDNS" json:"useNodeLocalDNS,omitempty"`
}

// TenantEntrypoint contains the configuration for the tenant entrypoint.
Expand Down
6 changes: 6 additions & 0 deletions pkg/generated/openapi/zz_generated.openapi.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions pkg/kubenest/constants/constant.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,12 @@ const (

//in virtual cluster
APIServerExternalService = "api-server-external-service"

//nodelocaldns
NodeLocalDNSComponentName = "virtual-node-local-dns"
NodeLocalDNSIp = "169.254.20.10"
NodeLocalDNSClusterDomain = "cluster.local"
NodeLocalDNSService = "__PILLAR__DNS__SERVER__"
)

type Action string
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,17 @@ func (p *PrefixedLogger) Infof(format string, args ...interface{}) {
}
}

func (p *PrefixedLogger) Warn(args ...interface{}) {
if p.level.Enabled() {
klog.WarningDepth(1, append([]interface{}{p.prefix}, args...)...)
}
}
func (p *PrefixedLogger) Warnf(format string, args ...interface{}) {
if p.level.Enabled() {
klog.WarningDepth(1, fmt.Sprintf(p.prefix+format, args...))
}
}

func (p *PrefixedLogger) Error(args ...interface{}) {
klog.ErrorDepth(1, append([]interface{}{p.prefix}, args...)...)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,12 @@ func NewRemoteNodeJoinTask() Task {
Retry: true,
Run: func(ctx context.Context, to TaskOpt, _ interface{}) (interface{}, error) {
exectHelper := exector.NewExectorHelper(to.NodeInfo.Spec.NodeIP, "")

baseCmd := fmt.Sprintf("bash %s join %s", env.GetExectorShellName(), to.KubeDNSAddress)
if to.VirtualCluster.Spec.KubeInKubeConfig != nil && to.VirtualCluster.Spec.KubeInKubeConfig.UseNodeLocalDNS {
baseCmd = fmt.Sprintf("bash %s join %s %s", env.GetExectorShellName(), to.KubeDNSAddress, constants.NodeLocalDNSIp)
}
joinCmd := &exector.CMDExector{
Cmd: fmt.Sprintf("bash %s join %s", env.GetExectorShellName(), to.KubeDNSAddress),
Cmd: baseCmd,
}
to.Loger().Infof("join node %s with cmd: %s", to.NodeInfo.Name, joinCmd.Cmd)
ret := exectHelper.DoExector(ctx.Done(), joinCmd)
Expand Down
Loading
Loading