diff --git a/Dockerfile b/Dockerfile index efc9175d..0eef94a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG TERWAY_POLICY_IMAGE=registry.cn-hongkong.aliyuncs.com/acs/terway:policy-4ac9e38@sha256:349ba69383445186d8375b32f40f0428bf1317514c7cf36713838a493817a4a7 +ARG TERWAY_POLICY_IMAGE=registry.cn-hongkong.aliyuncs.com/acs/terway:policy-d78b0c3@sha256:503a31bc708cec62b4f3276affd0d708a091148ce9bf4503744a3d1f3755b66f ARG UBUNTU_IMAGE=registry.cn-hangzhou.aliyuncs.com/acs/ubuntu:22.04-update ARG CILIUM_LLVM_IMAGE=quay.io/cilium/cilium-llvm:547db7ec9a750b8f888a506709adb41f135b952e@sha256:4d6fa0aede3556c5fb5a9c71bc6b9585475ac9b1064f516d4c45c8fb691c9d9e ARG CILIUM_BPFTOOL_IMAGE=quay.io/cilium/cilium-bpftool:78448c1a37ff2b790d5e25c3d8b8ec3e96e6405f@sha256:99a9453a921a8de99899ef82e0822f0c03f65d97005c064e231c06247ad8597d diff --git a/go.mod b/go.mod index 1f3c3c06..2d1c12f4 100644 --- a/go.mod +++ b/go.mod @@ -45,11 +45,11 @@ require ( k8s.io/client-go v0.27.9 k8s.io/code-generator v0.27.9 k8s.io/component-base v0.27.9 - k8s.io/klog/v2 v2.90.1 + k8s.io/klog/v2 v2.100.1 k8s.io/kubelet v0.27.9 - k8s.io/utils v0.0.0-20230209194617-a36077c30491 + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/controller-runtime v0.15.3 - sigs.k8s.io/e2e-framework v0.0.7 + sigs.k8s.io/e2e-framework v0.3.0 sigs.k8s.io/yaml v1.3.0 ) @@ -64,12 +64,12 @@ require ( github.com/containerd/console v1.0.3 // indirect github.com/coreos/go-iptables v0.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/zapr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect @@ -77,12 +77,12 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/gookit/color v1.5.3 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -109,7 +109,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/vladimirvivien/gexe v0.1.1 // indirect + github.com/vladimirvivien/gexe v0.2.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.6.0 // indirect diff --git a/go.sum b/go.sum index 322e9521..c42e05f7 100644 --- a/go.sum +++ b/go.sum @@ -124,6 +124,7 @@ github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -298,8 +299,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -312,6 +313,7 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= @@ -347,8 +349,8 @@ github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaL github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= @@ -421,8 +423,8 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -493,8 +495,9 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -813,13 +816,14 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vladimirvivien/gexe v0.1.1 h1:2A0SBaOSKH+cwLVdt6H+KkHZotZWRNLlWygANGw5DxE= -github.com/vladimirvivien/gexe v0.1.1/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= +github.com/vladimirvivien/gexe v0.2.0 h1:nbdAQ6vbZ+ZNsolCgSVb9Fno60kzSuvtzVh6Ytqi/xY= +github.com/vladimirvivien/gexe v0.2.0/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= @@ -957,6 +961,7 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1078,6 +1083,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1199,8 +1205,8 @@ google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1329,8 +1335,8 @@ k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= @@ -1339,8 +1345,8 @@ k8s.io/kubelet v0.27.9 h1:KT26xwkGOTaq8rLNpdKK7G4cdSw/4EfHyxGSPhX2T3o= k8s.io/kubelet v0.27.9/go.mod h1:dZ01D6udy2TGcZHd7uAv3iQRCO62wR9Q9sm0BhI+xIs= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -1349,8 +1355,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= -sigs.k8s.io/e2e-framework v0.0.7 h1:nMv2oSPBLWARse2aBoqX5Wq3ox67w8jrhTGWGpccWDQ= -sigs.k8s.io/e2e-framework v0.0.7/go.mod h1:hdwYGVQg4bvDAah5eidNf2/qkG35qHjzuyMVr2A3oiY= +sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= +sigs.k8s.io/e2e-framework v0.3.0/go.mod h1:C+ef37/D90Dc7Xq1jQnNbJYscrUGpxrWog9bx2KIa+c= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/controller/pod-eni/eni_controller.go b/pkg/controller/pod-eni/eni_controller.go index 16ccb8dc..0601e67d 100644 --- a/pkg/controller/pod-eni/eni_controller.go +++ b/pkg/controller/pod-eni/eni_controller.go @@ -581,28 +581,23 @@ func (m *ReconcilePodENI) gcCRPodENIs(ctx context.Context) { // pod exist just update timestamp if err == nil { - // for non fixed-ip pod no need to update timeStamp - if !podENI.Spec.HaveFixedIP() { - return - } + if podRequirePodENI(p, m.crdMode) { + // for non fixed-ip pod no need to update timeStamp + if !podENI.Spec.HaveFixedIP() { + return + } + + ll.V(5).Info("update pod lastSeen to now") + update := podENI.DeepCopy() + update.Status.PodLastSeen = metav1.Now() - if !podRequirePodENI(p, m.crdMode) { - err = m.deletePodENI(ctx, &podENI) + err = m.client.Status().Patch(ctx, update, client.MergeFrom(&podENI)) if err != nil { - ll.Error(err, "error set podENI to ENIPhaseDeleting") + ll.Error(err, "error update timestamp") } return } - - ll.V(5).Info("update pod lastSeen to now") - update := podENI.DeepCopy() - update.Status.PodLastSeen = metav1.Now() - - err = m.client.Status().Patch(ctx, update, client.MergeFrom(&podENI)) - if err != nil { - ll.Error(err, "error update timestamp") - } - return + // pod not require pod eni, so follow the release strategy } switch podENI.Status.Phase { @@ -843,14 +838,6 @@ func (m *ReconcilePodENI) getNode(ctx context.Context, name string) (*corev1.Nod return node, err } -func (m *ReconcilePodENI) deletePodENI(ctx context.Context, podENI *v1beta1.PodENI) error { - update := podENI.DeepCopy() - update.Status.Phase = v1beta1.ENIPhaseDeleting - - _, err := common.UpdatePodENIStatus(ctx, m.client, update) - return err -} - func allocIDs(podENI *v1beta1.PodENI) []string { var ids []string for _, alloc := range podENI.Spec.Allocations { diff --git a/policy/cilium/0023-fix-sec-label.patch b/policy/cilium/0023-fix-sec-label.patch new file mode 100644 index 00000000..1d82bf10 --- /dev/null +++ b/policy/cilium/0023-fix-sec-label.patch @@ -0,0 +1,31 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: l1b0k +Date: Thu, 11 Apr 2024 15:45:33 +0800 +Subject: fix sec label + +Signed-off-by: l1b0k +--- + bpf/bpf_host.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/bpf/bpf_host.c b/bpf/bpf_host.c +index e29bbc5c97..95d93e01aa 100644 +--- a/bpf/bpf_host.c ++++ b/bpf/bpf_host.c +@@ -563,6 +563,13 @@ handle_ipv4(struct __ctx_buff *ctx, __u32 secctx, + if (ep->flags & ENDPOINT_F_HOST) + return CTX_ACT_OK; + ++#ifdef ENABLE_ROUTING ++ info = lookup_ip4_remote_endpoint(ip4->saddr); ++ if (info && info->sec_label) { ++ return ipv4_local_delivery(ctx, ETH_HLEN, info->sec_label, ip4, ep, ++ METRIC_INGRESS, from_host, false); ++ } ++#endif + return ipv4_local_delivery(ctx, ETH_HLEN, secctx, ip4, ep, + METRIC_INGRESS, from_host, false); + } +-- +2.44.0 + diff --git a/tests/auto_network_policy.sh b/tests/auto_network_policy.sh deleted file mode 100755 index 699918a0..00000000 --- a/tests/auto_network_policy.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -# auto test network policy continuously with TEST_INTERVAL -TEST_INTERVAL=300 - -while true -do - echo "Begin test at $(date)" - - bats network_policy.bats - if [ $? -ne 0 ]; then - curl -X POST "https://oapi.dingtalk.com/robot/send?access_token=$TOKEN" -H 'cache-control: no-cache' -H 'content-type: application/json' -d '{ - "msgtype": "text", - "text": { - "content": "terway network policy test failed!" - } - }' - else - echo "Test succeed at $(date)" - fi - - sleep $TEST_INTERVAL -done diff --git a/tests/cni_ready.bats b/tests/cni_ready.bats deleted file mode 100644 index 2ebd2aa0..00000000 --- a/tests/cni_ready.bats +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bats -load helpers - -@test "terway ds ready" { - terway_ready_count="$(kubectl get ds terway -n kube-system -o jsonpath='{.status.numberReady}')" - node_count="$(kubectl get node -o name | wc -l)" - echo $terway_ready_count " " $node_count - debug_output - [ "$terway_ready_count" -eq "$node_count" ] -} - -@test "node device plugin" { - if [ "$category" = "vpc" ]; then - device_plugin_count="$(kubectl get node -o yaml | grep aliyun/eni | wc -l)" - node_count="$(kubectl get node -o name | wc -l)" - [ "$device_plugin_count" -eq "$(( $node_count * 2 ))" ] - fi -} diff --git a/tests/config_test.go b/tests/config_test.go deleted file mode 100644 index 69db7cf7..00000000 --- a/tests/config_test.go +++ /dev/null @@ -1,293 +0,0 @@ -//go:build e2e - -package tests - -import ( - "flag" - "strconv" - "time" - - corev1 "k8s.io/api/core/v1" - - "github.com/AliyunContainerService/terway/pkg/apis/network.alibabacloud.com/v1beta1" -) - -var enableTrunk bool -var enablePolicy bool -var image string -var testNamespace = "network-test-" + strconv.FormatInt(time.Now().Unix(), 10) -var httpTestPort = 81 -var httpsTestPort = 444 - -func init() { - flag.BoolVar(&enableTrunk, "trunk", false, "install trunk policy") - flag.BoolVar(&enablePolicy, "policy", false, "install network policy") - flag.StringVar(&image, "image", "l1b0k/echo", "custom test image") -} - -type ResConfig struct { - Description string - - Name string - Namespace string - Labels map[string]string -} - -type PodResConfig struct { - *ResConfig - HostNetwork bool - Replicas int32 -} - -type ServiceResConfig struct { - *ResConfig - PodSelectLabels map[string]string - Type corev1.ServiceType - Headless bool -} - -type NetworkPolicyConfig struct { - *ResConfig - PodSelectLabels map[string]string - IngressPodLabels map[string]string - IngressNamespaceLabels map[string]string -} - -type PodNetworkingConfig struct { - *ResConfig - PodSelectLabels map[string]string - NamespaceLabels map[string]string - IPType v1beta1.AllocationType -} - -type Resource struct { - Label map[string]string -} - -type TestCase struct { - Type TestType - Skip bool // skip this case or not - - Src Resource - Dst Resource - - Status bool // status true or false -} - -type TestType string - -const ( - TestTypePodToPod TestType = "pod2pod" - TestTypePodToServiceIP TestType = "pod2serviceIP" - TestTypePodToServiceName TestType = "pod2serviceName" -) - -var podConnA = PodResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "container-network-deploy-src", - Namespace: testNamespace, - Labels: map[string]string{ - "app": "container-network-pod-src", - "e2e": "true", - "ref": "deployment", - "access": "true", // when enable policy, src pod can access dst pod - }, - }, - HostNetwork: false, - Replicas: 3, -} - -var podConnB = PodResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "host-network-deploy-src", - Namespace: testNamespace, - Labels: map[string]string{ - "app": "host-network-pod-src", - "e2e": "true", - "ref": "deployment", - "access": "false", // when enable policy, src pod can't access dst pod - }, - }, - HostNetwork: true, - Replicas: 3, -} - -var podConnC = PodResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "container-network-deploy-dst", - Namespace: testNamespace, - Labels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - }, - HostNetwork: false, - Replicas: 2, -} - -var podConnD = PodResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "container-network-sts-dst", - Namespace: testNamespace, - Labels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "stateful-set", - }, - }, - HostNetwork: false, - Replicas: 2, -} - -var podConnPolicy = PodResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "container-network-policy-deploy-src", - Namespace: testNamespace, - Labels: map[string]string{ - "app": "container-network-policy-pod-src", - "e2e": "true", - "ref": "deployment", - "access": "false", // when enable policy, src pod can't access dst pod - }, - }, - HostNetwork: false, - Replicas: 3, -} - -var clusterIPService = ServiceResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "cluster-ip-service", - Namespace: testNamespace, - Labels: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - Type: corev1.ServiceTypeClusterIP, - Headless: false, -} - -var nodePortService = ServiceResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "node-port-service", - Namespace: testNamespace, - Labels: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - Type: corev1.ServiceTypeNodePort, - Headless: false, -} - -var loadBalancerService = ServiceResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "load-balancer-service", - Namespace: testNamespace, - Labels: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - Type: corev1.ServiceTypeLoadBalancer, - Headless: false, -} - -var headlessService = ServiceResConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "headless-service", - Namespace: testNamespace, - Labels: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - Type: corev1.ServiceTypeClusterIP, - Headless: true, -} - -var networkPolicy = NetworkPolicyConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "access-policy", - Namespace: testNamespace, - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - }, - IngressPodLabels: map[string]string{ - "access": "true", - }, - IngressNamespaceLabels: map[string]string{ - "project": "network-test", - }, -} - -var elasticPodNetWorking = PodNetworkingConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "stateless", - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "deployment", - }, - NamespaceLabels: map[string]string{ - "project": "network-test", - }, - IPType: v1beta1.AllocationType{ - Type: v1beta1.IPAllocTypeElastic, - }, -} - -var fixedPodNetWorking = PodNetworkingConfig{ - ResConfig: &ResConfig{ - Description: "", - Name: "fixed-ip", - }, - PodSelectLabels: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - "ref": "stateful-set", - }, - NamespaceLabels: map[string]string{ - "project": "network-test", - }, - IPType: v1beta1.AllocationType{ - Type: v1beta1.IPAllocTypeFixed, - ReleaseStrategy: v1beta1.ReleaseStrategyTTL, - ReleaseAfter: "5m0s", - }, -} diff --git a/tests/connecctive_test.go b/tests/connecctive_test.go new file mode 100644 index 00000000..1d32bf33 --- /dev/null +++ b/tests/connecctive_test.go @@ -0,0 +1,813 @@ +//go:build e2e + +package tests + +import ( + "bytes" + "context" + "fmt" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +var resourceKey struct{} + +func getStack() []string { + var r []string + if testIPv4 { + r = append(r, "ipv4") + } + if testIPv6 { + r = append(r, "ipv6") + } + return r +} + +func TestConnective(t *testing.T) { + var feats []features.Feature + + mutateConfig := []struct { + name string + podFunc func(pod *Pod) *Pod + }{ + { + name: "normal config", + podFunc: func(pod *Pod) *Pod { + return pod + }, + }, + { + name: "alinux2 node", + podFunc: func(pod *Pod) *Pod { + return pod.WithNodeAffinity(map[string]string{"e2e-os": "alinux2"}) + }, + }, + { + name: "alinux3 node", + podFunc: func(pod *Pod) *Pod { + return pod.WithNodeAffinity(map[string]string{"e2e-os": "alinux3"}) + }, + }, + { + name: "trunk pod", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}) + }, + }, + { + name: "trunk pod alinux2", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}).WithNodeAffinity(map[string]string{"e2e-os": "alinux2"}) + }, + }, + { + name: "trunk pod alinux3", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}).WithNodeAffinity(map[string]string{"e2e-os": "alinux3"}) + }, + }, + } + + for i := range mutateConfig { + name := mutateConfig[i].name + fn := mutateConfig[i].podFunc + + hairpin := features.New(fmt.Sprintf("PodConnective/hairpin-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + err := config.Client().Resources().Create(ctx, server.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + objs = append(objs, server.Pod) + + for _, stack := range getStack() { + svc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}). + ExposePort(80, "http").WithIPFamily(stack) + err = config.Client().Resources().Create(ctx, svc.Service) + if err != nil { + t.Error(err) + t.FailNow() + } + objs = append(objs, svc.Service) + } + ctx = context.WithValue(ctx, resourceKey, objs) + return ctx + }). + Assess("Pod can access own service", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + err := wait.For(conditions.New(config.Client().Resources()).PodReady(server.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + // https://github.com/cilium/cilium/issues/13891 cilium not support ipv6 hairpin + if stack == "ipv6" && ipvlan { + continue + } + + err = pull(config.Client(), server.Namespace, server.Name, "server", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + return ctx + }). + Feature() + + podSameNode := features.New(fmt.Sprintf("PodConnective/podSameNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + err := config.Client().Resources().Create(ctx, server.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + err = config.Client().Resources().Create(ctx, client.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + objs = append(objs, server.Pod, client.Pod) + + for _, stack := range getStack() { + svc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}). + ExposePort(80, "http"). + WithIPFamily(stack) + + err = config.Client().Resources().Create(ctx, svc.Service) + if err != nil { + t.Error(err) + t.FailNow() + } + objs = append(objs, svc.Service) + } + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Pod can access server", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + err := wait.For(conditions.New(config.Client().Resources()).PodReady(client.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + err = wait.For(conditions.New(config.Client().Resources()).PodReady(server.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pull(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + return ctx + }). + Feature() + + podDifferentNode := features.New(fmt.Sprintf("PodConnective/podDifferentNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + err := config.Client().Resources().Create(ctx, server.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAntiAffinity(map[string]string{"app": "server"}) + + err = config.Client().Resources().Create(ctx, client.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + objs = append(objs, server.Pod, client.Pod) + + for _, stack := range getStack() { + svc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}). + ExposePort(80, "http"). + WithIPFamily(stack) + + err = config.Client().Resources().Create(ctx, svc.Service) + if err != nil { + t.Error(err) + t.FailNow() + } + objs = append(objs, svc.Service) + } + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Pod can access server", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + err := wait.For(conditions.New(config.Client().Resources()).PodReady(client.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + err = wait.For(conditions.New(config.Client().Resources()).PodReady(server.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pull(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + return ctx + }). + Feature() + + hostToPodSameNode := features.New(fmt.Sprintf("PodConnective/hostToSameNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + err := config.Client().Resources().Create(ctx, server.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}). + WithDNSPolicy(corev1.DNSClusterFirstWithHostNet). + WithHostNetwork() + + err = config.Client().Resources().Create(ctx, client.Pod) + if err != nil { + t.Error(err) + t.FailNow() + } + + objs = append(objs, server.Pod, client.Pod) + + for _, stack := range getStack() { + svc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}). + ExposePort(80, "http"). + WithIPFamily(stack) + + err = config.Client().Resources().Create(ctx, svc.Service) + if err != nil { + t.Error(err) + t.FailNow() + } + objs = append(objs, svc.Service) + } + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Pod can access server", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + err := wait.For(conditions.New(config.Client().Resources()).PodReady(client.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + err = wait.For(conditions.New(config.Client().Resources()).PodReady(server.Pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pull(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + return ctx + }). + Feature() + + feats = append(feats, hairpin, podSameNode, podDifferentNode, hostToPodSameNode) + } + + testenv.Test(t, feats...) + + if t.Failed() { + isFailed.Store(true) + } +} + +func TestNetworkPolicy(t *testing.T) { + if !testNetworkPolicy { + t.Log("Skip networkPolicy tests") + return + } + var feats []features.Feature + + mutateConfig := []struct { + name string + podFunc func(pod *Pod) *Pod + }{ + { + name: "normal config", + podFunc: func(pod *Pod) *Pod { + return pod + }, + }, + { + name: "alinux2 node", + podFunc: func(pod *Pod) *Pod { + return pod.WithNodeAffinity(map[string]string{"e2e-os": "alinux2"}) + }, + }, + { + name: "alinux3 node", + podFunc: func(pod *Pod) *Pod { + return pod.WithNodeAffinity(map[string]string{"e2e-os": "alinux3"}) + }, + }, + { + name: "trunk pod", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}) + }, + }, + { + name: "trunk pod alinux2", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}).WithNodeAffinity(map[string]string{"e2e-os": "alinux2"}) + }, + }, + { + name: "trunk pod alinux3", + podFunc: func(pod *Pod) *Pod { + return pod.WithLabels(map[string]string{"trunk": "enable"}).WithNodeAffinity(map[string]string{"e2e-os": "alinux3"}) + }, + }, + } + for i := range mutateConfig { + name := mutateConfig[i].name + fn := mutateConfig[i].podFunc + + healthCheck := features.New(fmt.Sprintf("NetworkPolicy/PodHealthCheck-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + policy := NewNetworkPolicy("default-deny-ingress", config.Namespace()). + WithPolicyType(networkingv1.PolicyTypeIngress) + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil). + WithHealthCheck(80)) + + objs = append(objs, policy.NetworkPolicy, server.Pod) + + for _, obj := range objs { + err := config.Client().Resources().Create(ctx, obj) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Pod should ready", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + server := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server", Namespace: config.Namespace()}} + err := waitPodsReady(config.Client(), server) + if err != nil { + t.Error(err) + t.FailNow() + } + + return ctx + }).Feature() + + denyIngressSameNode := features.New(fmt.Sprintf("NetworkPolicy/DenyIngressSameNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + policy := NewNetworkPolicy("deny-ingress", config.Namespace()). + WithPolicyType(networkingv1.PolicyTypeIngress). + WithPodSelector(map[string]string{"app": "deny-ingress"}) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + serverDenyIngress := fn(NewPod("server-deny-ingress", config.Namespace()). + WithLabels(map[string]string{"app": "deny-ingress"}). + WithContainer("server", nginxImage, nil)) + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil). + WithPodAffinity(map[string]string{"app": "deny-ingress"})) + + objs = append(objs, policy.NetworkPolicy, client.Pod, serverDenyIngress.Pod, server.Pod) + + for _, stack := range getStack() { + + denySvc := NewService("server-deny-ingress-"+stack, config.Namespace(), map[string]string{"app": "deny-ingress"}).WithIPFamily(stack).ExposePort(80, "http") + + normalSvc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}).WithIPFamily(stack).ExposePort(80, "http") + + objs = append(objs, denySvc.Service, normalSvc.Service) + } + + for _, obj := range objs { + err := config.Client().Resources().Create(ctx, obj) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Check ingress policy", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + client := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: config.Namespace()}} + server := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server", Namespace: config.Namespace()}} + serverDenyIngress := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server-deny-ingress", Namespace: config.Namespace()}} + err := waitPodsReady(config.Client(), client, server, serverDenyIngress) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pull(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + + err = pullFail(config.Client(), client.Namespace, client.Name, "client", "server-deny-ingress-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + return ctx + }).Feature() + + denyIngressotherNode := features.New(fmt.Sprintf("NetworkPolicy/denyIngressotherNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + policy := NewNetworkPolicy("deny-ingress", config.Namespace()). + WithPolicyType(networkingv1.PolicyTypeIngress). + WithPodSelector(map[string]string{"app": "deny-ingress"}) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAntiAffinity(map[string]string{"app": "server"}) + + serverDenyIngress := fn(NewPod("server-deny-ingress", config.Namespace()). + WithLabels(map[string]string{"app": "deny-ingress"}). + WithContainer("server", nginxImage, nil)) + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil). + WithPodAffinity(map[string]string{"app": "deny-ingress"})) + + objs = append(objs, policy.NetworkPolicy, client.Pod, serverDenyIngress.Pod, server.Pod) + + for _, stack := range getStack() { + + denySvc := NewService("server-deny-ingress-"+stack, config.Namespace(), map[string]string{"app": "deny-ingress"}).WithIPFamily(stack).ExposePort(80, "http") + + normalSvc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}).WithIPFamily(stack).ExposePort(80, "http") + + objs = append(objs, denySvc.Service, normalSvc.Service) + } + + for _, obj := range objs { + err := config.Client().Resources().Create(ctx, obj) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Check ingress policy", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + client := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: config.Namespace()}} + server := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server", Namespace: config.Namespace()}} + serverDenyIngress := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server-deny-ingress", Namespace: config.Namespace()}} + err := waitPodsReady(config.Client(), client, server, serverDenyIngress) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pull(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + + err = pullFail(config.Client(), client.Namespace, client.Name, "client", "server-deny-ingress-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + return ctx + }).Feature() + + denyEgressSameNode := features.New(fmt.Sprintf("NetworkPolicy/denyEgressSameNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + policy := NewNetworkPolicy("deny-ingress", config.Namespace()). + WithPolicyType(networkingv1.PolicyTypeEgress). + WithPodSelector(map[string]string{"app": "client"}) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAffinity(map[string]string{"app": "server"}) + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + objs = append(objs, policy.NetworkPolicy, client.Pod, server.Pod) + + for _, stack := range getStack() { + + normalSvc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}).WithIPFamily(stack).ExposePort(80, "http") + + objs = append(objs, normalSvc.Service) + } + + for _, obj := range objs { + err := config.Client().Resources().Create(ctx, obj) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Check ingress policy", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + client := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: config.Namespace()}} + server := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server", Namespace: config.Namespace()}} + err := waitPodsReady(config.Client(), client, server) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pullFail(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + return ctx + }).Feature() + + denyEgressOtherNode := features.New(fmt.Sprintf("NetworkPolicy/denyEgressOtherNode-%s", name)). + Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + var objs []k8s.Object + + policy := NewNetworkPolicy("deny-ingress", config.Namespace()). + WithPolicyType(networkingv1.PolicyTypeEgress). + WithPodSelector(map[string]string{"app": "client"}) + + client := fn(NewPod("client", config.Namespace()). + WithLabels(map[string]string{"app": "client"}). + WithContainer("client", nginxImage, nil)). + WithPodAntiAffinity(map[string]string{"app": "server"}) + + server := fn(NewPod("server", config.Namespace()). + WithLabels(map[string]string{"app": "server"}). + WithContainer("server", nginxImage, nil)) + + objs = append(objs, policy.NetworkPolicy, client.Pod, server.Pod) + + for _, stack := range getStack() { + + normalSvc := NewService("server-"+stack, config.Namespace(), map[string]string{"app": "server"}).WithIPFamily(stack).ExposePort(80, "http") + + objs = append(objs, normalSvc.Service) + } + + for _, obj := range objs { + err := config.Client().Resources().Create(ctx, obj) + if err != nil { + t.Error(err) + t.FailNow() + } + } + + ctx = context.WithValue(ctx, resourceKey, objs) + + return ctx + }). + Assess("Check ingress policy", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + client := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: config.Namespace()}} + server := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "server", Namespace: config.Namespace()}} + err := waitPodsReady(config.Client(), client, server) + if err != nil { + t.Error(err) + t.FailNow() + } + + for _, stack := range getStack() { + err = pullFail(config.Client(), client.Namespace, client.Name, "client", "server-"+stack) + if err != nil { + t.Error(err) + t.FailNow() + } + } + return ctx + }).Feature() + + feats = append(feats, healthCheck, denyIngressSameNode, denyIngressotherNode, denyEgressSameNode, denyEgressOtherNode) + } + + testenv.Test(t, feats...) + if t.Failed() { + isFailed.Store(true) + } +} + +func pull(client klient.Client, namespace, name, container, target string) error { + errors := []error{} + + err := wait.For(func(ctx context.Context) (done bool, err error) { + var stdout, stderr bytes.Buffer + cmd := []string{"curl", "-m", "2", "--retry", "3", "-I", target} + err = client.Resources().ExecInPod(ctx, namespace, name, container, cmd, &stdout, &stderr) + if err != nil { + errors = append(errors, fmt.Errorf("failed %s %w", cmd, err)) + return false, nil + } + httpStatus := strings.Split(stdout.String(), "\n")[0] + if !strings.Contains(httpStatus, "200") { + return false, nil + } + return true, nil + }, + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + + if err != nil { + return utilerrors.NewAggregate(errors) + } + return nil +} + +func pullFail(client klient.Client, namespace, name, container, target string) error { + errors := []error{} + + err := wait.For(func(ctx context.Context) (done bool, err error) { + var stdout, stderr bytes.Buffer + cmd := []string{"curl", "-m", "2", "--retry", "3", "-I", target} + + err = client.Resources().ExecInPod(ctx, namespace, name, container, cmd, &stdout, &stderr) + if err != nil { + return true, nil + } + + errors = append(errors, fmt.Errorf("connect success, expect fail %s %s", cmd, stdout.String())) + + return false, nil + }, + wait.WithTimeout(7*time.Second), + wait.WithInterval(1*time.Second)) + + if err != nil { + return utilerrors.NewAggregate(errors) + } + return nil +} + +func waitPodsReady(client klient.Client, pods ...*corev1.Pod) error { + for _, pod := range pods { + err := wait.For(conditions.New(client.Resources()).PodReady(pod), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + return err + } + } + return nil +} diff --git a/tests/connection_test.go b/tests/connection_test.go deleted file mode 100644 index 50e7069d..00000000 --- a/tests/connection_test.go +++ /dev/null @@ -1,567 +0,0 @@ -//go:build e2e -// +build e2e - -package tests - -import ( - "context" - "fmt" - "net" - "strings" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - - "github.com/AliyunContainerService/terway/pkg/generated/clientset/versioned" - "github.com/AliyunContainerService/terway/pkg/utils/k8sclient" -) - -func TestConnectionTestSuite(t *testing.T) { - suite.Run(t, new(ConnectionTestSuite)) -} - -type ConnectionTestSuite struct { - suite.Suite - - RestConf *rest.Config - ClientSet kubernetes.Interface - PodNetworkingClientSet *versioned.Clientset - err error - - TestCase []TestCase -} - -func (s *ConnectionTestSuite) SetupSuite() { - s.RestConf = ctrl.GetConfigOrDie() - k8sclient.RegisterClients(s.RestConf) - s.ClientSet = k8sclient.K8sClient - s.PodNetworkingClientSet, _ = versioned.NewForConfig(s.RestConf) - - ctx := context.Background() - s.T().Logf("test image: %v", image) - s.T().Logf("test namespace: %s", testNamespace) - s.T().Logf("enable trunk: %v", enableTrunk) - s.T().Logf("enable policy: %v", enablePolicy) - s.T().Logf("creating namespace %s", testNamespace) - if err := EnsureNamespace(ctx, s.ClientSet, testNamespace); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create namespace %s", testNamespace)) - } - - if enableTrunk { - s.T().Logf("creating %s pod networking", elasticPodNetWorking.Name) - if _, err := EnsurePodNetworking(ctx, s.PodNetworkingClientSet, elasticPodNetWorking); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod networking %s", elasticPodNetWorking.Name)) - } - - s.T().Logf("creating %s pod networking", fixedPodNetWorking.Name) - if _, err := EnsurePodNetworking(ctx, s.PodNetworkingClientSet, fixedPodNetWorking); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod networking %s", fixedPodNetWorking.Name)) - } - } - - s.T().Logf("creating %s", podConnA.Name) - if _, err := EnsureDeployment(ctx, s.ClientSet, podConnA); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod %s", podConnA.Name)) - } - - s.T().Logf("creating %s", podConnB.Name) - if _, err := EnsureDeployment(ctx, s.ClientSet, podConnB); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod %s", podConnB.Name)) - } - - s.T().Logf("creating %s", podConnC.Name) - if _, err := EnsureDeployment(ctx, s.ClientSet, podConnC); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod %s", podConnC.Name)) - } - - s.T().Logf("creating %s", podConnD.Name) - if _, err := EnsureStatefulSet(ctx, s.ClientSet, podConnD); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create pod %s", podConnD.Name)) - } - - s.T().Logf("creating %s", podConnPolicy.Name) - if _, err := EnsureDeployment(ctx, s.ClientSet, podConnPolicy); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create network policy %s", podConnPolicy.Name)) - } - - s.T().Logf("creating %s for %s", clusterIPService.Name, podConnC.Name) - if _, err := EnsureService(ctx, s.ClientSet, clusterIPService); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create service %s", clusterIPService.Name)) - } - - s.T().Logf("creating %s for %s", nodePortService.Name, podConnC.Name) - if _, err := EnsureService(ctx, s.ClientSet, nodePortService); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create service %s", nodePortService.Name)) - } - - s.T().Logf("creating %s for %s", loadBalancerService.Name, podConnC.Name) - if _, err := EnsureService(ctx, s.ClientSet, loadBalancerService); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create service %s", loadBalancerService.Name)) - } - - s.T().Logf("creating %s for %s", headlessService.Name, podConnC.Name) - if _, err := EnsureService(ctx, s.ClientSet, headlessService); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create service %s", headlessService.Name)) - } - - if enablePolicy { - s.T().Logf("creating network policy %s", networkPolicy.Name) - if _, err := EnsureNetworkPolicy(ctx, s.ClientSet, networkPolicy); err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to create network policy %s", networkPolicy.Name)) - } - } - - s.TestCase = []TestCase{ - { - Type: TestTypePodToPod, - Skip: false, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - }, - }, - Status: true, - }, - { - Type: TestTypePodToPod, - Skip: enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "host-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - }, - }, - Status: true, - }, - { - Type: TestTypePodToPod, - Skip: !enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-policy-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "app": "container-network-pod-dst", - "e2e": "true", - }, - }, - Status: false, - }, - { - Type: TestTypePodToServiceIP, - Skip: false, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: true, - }, - { - Type: TestTypePodToServiceIP, - Skip: enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "host-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: true, - }, - { - Type: TestTypePodToServiceIP, - Skip: !enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-policy-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: false, - }, - { - Type: TestTypePodToServiceName, - Skip: false, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: true, - }, - { - Type: TestTypePodToServiceName, - Skip: enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "host-network-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: false, - }, - { - Type: TestTypePodToServiceName, - Skip: !enablePolicy, - Src: Resource{ - Label: map[string]string{ - "app": "container-network-policy-pod-src", - "e2e": "true", - }, - }, - Dst: Resource{ - Label: map[string]string{ - "svc": "container-network-svc-dst", - "e2e": "true", - }, - }, - Status: false, - }, - } -} - -func (s *ConnectionTestSuite) TearDownSuite() { - - if s.err != nil { - s.T().Error(errors.Wrapf(s.err, "skip tear down resource in namespace %s, because of an error occurred.", testNamespace)) - } - - s.PrintEvents(testNamespace) - s.PrintPods(testNamespace) - - ctx := context.Background() - if enablePolicy { - s.T().Logf("delete %s", networkPolicy.Name) - if err := DeleteNetworkPolicy(ctx, s.ClientSet, testNamespace, networkPolicy.Name); err != nil { - s.T().Logf("fail to delete network policy %s: %v", networkPolicy.Name, err) - } - } - - s.T().Logf("delete %s", clusterIPService.Name) - if err := DeleteService(ctx, s.ClientSet, testNamespace, clusterIPService.Name); err != nil { - s.T().Logf("fail to delete service %s: %v", clusterIPService.Name, err) - } - - s.T().Logf("delete %s", nodePortService.Name) - if err := DeleteService(ctx, s.ClientSet, testNamespace, nodePortService.Name); err != nil { - s.T().Logf("fail to delete service %s: %v", nodePortService.Name, err) - } - - s.T().Logf("delete %s", loadBalancerService.Name) - if err := DeleteService(ctx, s.ClientSet, testNamespace, loadBalancerService.Name); err != nil { - s.T().Logf("fail to delete service %s: %v", loadBalancerService.Name, err) - } - - s.T().Logf("delete %s", headlessService.Name) - if err := DeleteService(ctx, s.ClientSet, testNamespace, headlessService.Name); err != nil { - s.T().Logf("fail to delete service %s: %v", headlessService.Name, err) - } - - s.T().Logf("delete %s", podConnA.Name) - if err := DeleteDeployment(ctx, s.ClientSet, testNamespace, podConnA.Name); err != nil { - s.T().Logf("fail to delete pod %s: %v", podConnA.Name, err) - } - - s.T().Logf("delete %s", podConnB.Name) - if err := DeleteDeployment(ctx, s.ClientSet, testNamespace, podConnB.Name); err != nil { - s.T().Logf("fail to delete pod %s: %v", podConnB.Name, err) - } - - s.T().Logf("delete %s", podConnC.Name) - if err := DeleteDeployment(ctx, s.ClientSet, testNamespace, podConnC.Name); err != nil { - s.T().Logf("fail to delete pod %s: %v", podConnC.Name, err) - } - - s.T().Logf("delete %s", podConnD.Name) - if err := DeleteStatefulSet(ctx, s.ClientSet, testNamespace, podConnD.Name); err != nil { - s.T().Logf("fail to delete pod %s: %v", podConnD.Name, err) - } - - s.T().Logf("delete %s", podConnPolicy.Name) - if err := DeleteDeployment(ctx, s.ClientSet, testNamespace, podConnPolicy.Name); err != nil { - s.T().Logf("fail to delete pod %s: %v", podConnPolicy.Name, err) - } - - if enableTrunk { - s.T().Logf("delete %s pod networking", elasticPodNetWorking.Name) - if err := DeletePodNetworking(ctx, s.PodNetworkingClientSet, elasticPodNetWorking.Name); err != nil { - s.T().Logf("fail to delete pod networking %s: %v", elasticPodNetWorking.Name, err) - } - - s.T().Logf("delete %s pod networking", fixedPodNetWorking.Name) - if err := DeletePodNetworking(ctx, s.PodNetworkingClientSet, fixedPodNetWorking.Name); err != nil { - s.T().Logf("fail to delete pod networking %s: %v", fixedPodNetWorking.Name, err) - } - } - - s.T().Logf("delete ns") - if err := DeleteNamespace(ctx, s.ClientSet, testNamespace); err != nil { - s.T().Logf("fail to delete namespace %s: %v", testNamespace, err) - } -} - -func (s *ConnectionTestSuite) TestPod2Pod() { - for _, c := range s.TestCase { - if c.Type != TestTypePodToPod || c.Skip { - continue - } - ctx := context.Background() - var srcPods []corev1.Pod - var err error - srcPods, err = ListPods(ctx, s.ClientSet, testNamespace, c.Src.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list src pod %v", c.Src.Label)) - } - - var dstPods []corev1.Pod - dstPods, err = ListPods(ctx, s.ClientSet, testNamespace, c.Dst.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list dst pod %v", c.Dst.Label)) - } - for _, src := range srcPods { - for _, dst := range dstPods { - s.T().Logf("src %s -> dst %s", podInfo(&src), podInfo(&dst)) - for _, ip := range podIPs(&dst) { - addr := net.JoinHostPort(ip, fmt.Sprintf("%d", httpTestPort)) - l := fmt.Sprintf("src %s -> dst %s", podInfo(&src), addr) - stdout, stdErrOut, err := s.ExecHTTPGet(src.Namespace, src.Name, curlAddr(addr)) - s.Expected(c.Status, stdout, stdErrOut, err, l) - } - } - } - } -} - -func (s *ConnectionTestSuite) TestPod2ServiceIP() { - for _, c := range s.TestCase { - if c.Type != TestTypePodToServiceIP || c.Skip { - continue - } - ctx := context.Background() - var srcPods []corev1.Pod - var err error - srcPods, err = ListPods(ctx, s.ClientSet, testNamespace, c.Src.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list src pod %v", c.Src.Label)) - } - - var dstServices []corev1.Service - dstServices, err = ListServices(ctx, s.ClientSet, testNamespace, c.Dst.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list dst pod %v", c.Dst.Label)) - } - - var nodeIPs []net.IP - nodeIPs, err = ListNodeIPs(context.Background(), s.ClientSet) - if err != nil { - s.err = err - s.T().Error(errors.Wrap(err, "fail to list node ip")) - } - - for _, src := range srcPods { - for _, svc := range dstServices { - var addrs []string - if svc.Spec.ClusterIP != corev1.ClusterIPNone { - if len(svc.Spec.Ports) < 1 { - s.err = errors.New("fail to allocate service port") - s.T().Error(s.err.Error()) - continue - } - addrs = append(addrs, net.JoinHostPort(svc.Spec.ClusterIP, fmt.Sprintf("%d", svc.Spec.Ports[0].Port))) - } - - if !enablePolicy { - if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { - if len(svc.Status.LoadBalancer.Ingress) < 1 || len(svc.Spec.Ports) < 1 { - s.err = errors.New("fail to allocate load balancer ip or port") - s.T().Error(s.err.Error()) - continue - } - addrs = append(addrs, net.JoinHostPort(svc.Status.LoadBalancer.Ingress[0].IP, fmt.Sprintf("%d", svc.Spec.Ports[0].Port))) - } - - if svc.Spec.Type == corev1.ServiceTypeNodePort || svc.Spec.Type == corev1.ServiceTypeLoadBalancer { - for _, nodeIP := range nodeIPs { - if len(svc.Spec.Ports) < 1 { - s.err = errors.New("fail to allocate service node port") - s.T().Error(s.err.Error()) - continue - } - addrs = append(addrs, net.JoinHostPort(nodeIP.String(), fmt.Sprintf("%d", svc.Spec.Ports[0].NodePort))) - } - } - } - - for _, addr := range addrs { - l := fmt.Sprintf("src %s -> dst svc name %s, addr %s", podInfo(&src), svc.Name, addr) - stdout, stdErrOut, err := s.ExecHTTPGet(src.Namespace, src.Name, curlAddr(addr)) - s.Expected(c.Status, stdout, stdErrOut, err, l) - } - } - } - } -} - -func (s *ConnectionTestSuite) TestPod2ServiceName() { - for _, c := range s.TestCase { - if c.Type != TestTypePodToServiceName || c.Skip { - continue - } - ctx := context.Background() - var srcPods []corev1.Pod - var err error - srcPods, err = ListPods(ctx, s.ClientSet, testNamespace, c.Src.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list src pod %v", c.Src.Label)) - } - - var dstServices []corev1.Service - dstServices, err = ListServices(ctx, s.ClientSet, testNamespace, c.Dst.Label) - if err != nil { - s.err = err - s.T().Error(errors.Wrapf(err, "fail to list dst service %v", c.Dst.Label)) - } - - for _, src := range srcPods { - for _, svc := range dstServices { - l := fmt.Sprintf("src %s -> dst svc name %s", podInfo(&src), svc.Name) - stdout, stdErrOut, err := s.ExecHTTPGet(src.Namespace, src.Name, net.JoinHostPort(svc.Name, fmt.Sprintf("%d", httpTestPort))) - s.Expected(c.Status, stdout, stdErrOut, err, l) - } - } - } -} - -func (s *ConnectionTestSuite) Expected(status bool, stdout []byte, stdErrOut []byte, err error, msg string) { - s.T().Logf("stdOut: %s, errOut: %s", string(stdout), string(stdErrOut)) - if status { - if assert.NoError(s.T(), err, msg) && assert.Equal(s.T(), 0, len(stdErrOut), msg) { - s.T().Logf(msg + ", test pass") - } else { - s.err = err - s.T().Error(errors.Wrapf(err, "%s, test failed, expected connection success, but connection failure", msg)) - } - } else { - if assert.Error(s.T(), err, msg) { - s.T().Logf(msg + ", test pass") - } else { - s.T().Errorf(msg + ", test failed, expected connection failure, but connection success") - } - } -} - -func (s *ConnectionTestSuite) ExecHTTPGet(namespace, name string, dst string) ([]byte, []byte, error) { - cmd := fmt.Sprintf("curl -o /dev/null -s -w %%{http_code} %%{time_connect} %%{time_total} --connect-timeout 6 --retry 2 %s", dst) - s.T().Log(cmd) - return Exec(s.ClientSet, s.RestConf, namespace, name, []string{ - "/usr/bin/bash", - "-c", - cmd, - }) -} - -func (s *ConnectionTestSuite) PrintEvents(namespace string) { - events, _ := s.ClientSet.CoreV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) - for i, e := range events.Items { - s.T().Logf("event #%d: %v", i, e) - } -} - -func (s *ConnectionTestSuite) PrintPods(namespace string) { - pods, _ := s.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) - for i, e := range pods.Items { - s.T().Logf("pod #%d: %v", i, e) - } -} - -func podInfo(pod *corev1.Pod) string { - return fmt.Sprintf("%s(%s)", pod.Name, pod.Spec.NodeName) -} - -func podIPs(pod *corev1.Pod) []string { - result := []string{pod.Status.PodIP} - if len(pod.Status.PodIPs) == 2 { - result = append(result, pod.Status.PodIPs[1].IP) - } - return result -} - -func curlAddr(addr string) string { - if strings.Contains(addr, "[") { - return fmt.Sprintf("-g -6 http://%s", addr) - } - return addr -} diff --git a/tests/helpers.bash b/tests/helpers.bash deleted file mode 100644 index f644ed49..00000000 --- a/tests/helpers.bash +++ /dev/null @@ -1,163 +0,0 @@ -#!/bin/bash - -# Print run outputs to test.out -function debug_output() { - echo test: $BATS_TEST_NAME line: $BATS_TEST_NUMBER - echo test: $BATS_TEST_NAME line: $BATS_TEST_NUMBER >> test.out - printf '%s\n' "${lines[@]}" - printf '%s\n' "${lines[@]}" >> test.out -} - -function debug_echo() { - echo test: $BATS_TEST_NAME line: $BATS_TEST_NUMBER >> test.out - echo "$@" >> test.out -} - -# Retry a command $1 times until it succeeds. Wait $2 seconds between retries. -function retry() { - local attempts=$1 - shift - local delay=$1 - shift - local i - - for ((i=0; i < attempts; i++)); do - run "$@" - if [[ "$status" -eq 0 ]] ; then - return 0 - fi - sleep $delay - done - - echo "Command \"$@\" failed $attempts times. Output: $output" - false -} - -function object_exist() { - run kubectl get $@ - if [[ "$status" -eq 0 ]] && [[ ${#lines[@]} -gt 1 ]]; then - return 0 - fi - echo "object $@ not ready, status: $status, lines: ${#lines[@]} output $output" - false -} - -function pod_running() { - run kubectl get $@ - if [[ "$status" -eq 0 ]] && [[ ${#lines[@]} -gt 1 ]] && echo $output | grep -q "Running"; then - return 0 - fi - echo "object $@ not ready, status: $status, lines: ${#lines[@]} output $output" - false -} - -function pods_all_running() { - run kubectl get $@ --no-headers - if [[ "$status" -eq 0 ]] && [[ ${#lines[@]} -gt 0 ]]; then - local running - local all - running=$(echo "$output" | grep -c "Running") - all=$(echo "$output" | wc -l) - if [[ "$running" -eq "$all" ]]; then - return 0 - fi - fi - echo "object $@ not ready, status: $status, lines: ${#lines[@]} output $output" - false -} - -function object_not_exist() { - run kubectl get $@ - if [[ "$status" -gt 0 ]] || [[ ${#lines[@]} -eq 1 ]]; then - return 0 - fi - echo "object $@ exist, status: $status, lines: ${#lines[@]} output $output" - false -} - -function loadbalancer_ready() { - run kubectl get $@ - if [[ "$status" -eq 0 ]] && [[ ${#lines[@]} -gt 1 ]]; then - if echo $output | grep -q "pending"; then - false - echo "object $@ pending, status: $status, lines: ${#lines[@]} output $output" - return 1 - fi - return 0 - fi - echo "object $@ exist, status: $status, lines: ${#lines[@]} output $output" - false -} - -function deployment_ready() { - run kubectl get $@ -o json - if [[ "$status" -eq 0 ]] && [[ ${#lines[@]} -gt 1 ]] && echo $output | jq ".status.replicas == .status.readyReplicas" | grep "true"; then - return 0 - fi - echo "deployment $@ not ready, status: $status, lines: ${#lines[@]}" - false -} - -# Prepare curl operation -function prepare_curl_options() { - if [ x"$DOCKER_TLS_VERIFY" = x"1" ]; then - DOCKER_HOST_HTTPS=`echo $DOCKER_HOST | sed 's/tcp:\/\//https:\/\//g'` - CURL_OPTION="-sw \\\\n%{http_code} --insecure --cert \"$DOCKER_CERT_PATH/cert.pem\" --key \"$DOCKER_CERT_PATH/key.pem\"" - #DOCKER_URL=$DOCKER_HOST_HTTPS/$API_VERSION - DOCKER_URL=$DOCKER_HOST_HTTPS - else - DOCKER_HOST_HTTP=`echo $DOCKER_HOST | sed 's/tcp:\/\//http:\/\//g'` - CURL_OPTION="-sw \\\\n%{http_code}" - #DOCKER_URL=$DOCKER_HOST_HTTP/$API_VERSION - DOCKER_URL=$DOCKER_HOST_HTTP - fi -} - -function curl_get() { - echo "curl $CURL_OPTION $DOCKER_URL$@" >> test.out - echo $CURL_OPTION $DOCKER_URL$@ | xargs -t curl -} - -function curl_post_json() { - url="$DOCKER_URL$1" - shift - - echo $CURL_OPTION $url -d "'$@'"| xargs -t curl -X POST --header "Content-Type:application/json;charset=UTF-8" - -} - -function curl_post_yaml() { - url="$DOCKER_URL$1" - shift - echo $CURL_OPTION $url $@| xargs -t curl -X POST --header "Content-Type:text/yaml;charset=UTF-8" -} - -function curl_post() { - echo $CURL_OPTION $DOCKER_URL$@ | xargs -t curl -X POST -} - -function curl_delete() { - echo $CURL_OPTION $DOCKER_URL$@ | xargs -t curl -X DELETE -} - -function status_code() { - echo ${lines[${#lines[@]}-1]} -} - -function parse_args() { - while [[ $# -ge 1 ]]; do - key=$1 - shift - case "$key" in - --trunk) - export trunk=1 - ;; - *) - echo 'invalid argument' - exit 1 - ;; - esac - done -} - -prepare_curl_options \ No newline at end of file diff --git a/tests/install_env.sh b/tests/install_env.sh deleted file mode 100755 index 870cc93e..00000000 --- a/tests/install_env.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash -set -e - -export cluster_id="" -export access_key="" -export access_secret="" -# vpc or eni-multi-ip -export category="" -export region="" - -usage() { - echo "usage: $0 --cluster-id --access-key --access-secret \ ---region --category --image " -} - -install_env() { - if [ -z ${cluster_id} ] || [ -z ${access_key} ] || [ -z ${access_secret} ] || [ -z ${category} ]; then - echo "invaild argument" - usage - exit 1; - fi - - if [ -z "${terway_image}" ]; then - export terway_image="registry.aliyuncs.com/acs/terway:v1.0.9.10-gfc1045e-aliyun" - fi - - export ACCESS_KEY_ID=${access_key} - export ACCESS_KEY_SECRET=${access_secret} - export REGION=${region} - temp_dir=$(mktemp -d) - export temp_dir - aliyun_cluster=$(aliyun cs GET /clusters/${cluster_id}) - export aliyun_cluster - security_group=$(echo "${aliyun_cluster}" | jq .security_group_id | tr -d '"') - export security_group - vswitch=$(echo "${aliyun_cluster}" | jq .vswitch_id | tr -d '"') - export vswitch - aliyun cs GET /k8s/"${cluster_id}"/user_config | jq -r .config > "${temp_dir}"/kubeconfig.yaml - export KUBECONFIG=${temp_dir}/kubeconfig.yaml - service_cidr=$(aliyun cs GET /clusters/"${cluster_id}" | jq .parameters.ServiceCIDR | tr -d '"') - export service_cidr - pod_cidr=$(aliyun cs GET /clusters/"${cluster_id}" | jq .parameters.ContainerCIDR | tr -d '"') - export pod_cidr - - if ! kubectl get ds terway -n kube-system; then - echo "invaild kubeconfig for cluster or not a terway cluster" - exit 1; - fi - - install_terway -} - -install_terway() { - terway_template="" - case ${category} in - vpc) - terway_template='terway-vpc.yml' - ;; - eni-multi-ip) - terway_template='terway-multiip.yml' - ;; - eni-only) - terway_template='terway-eni-only.yml' - ;; - *) - echo "invaild category "${category} - exit 1; - esac - cp templates/terway/${terway_template} "${temp_dir}"/ - sed -e "s#ACCESS_KEY#${access_key}#g" \ - -e "s#ACCESS_SECRET#${access_secret}#g" \ - -e "s#SERVICE_CIDR#${service_cidr}#g" \ - -e "s#SECURITY_GROUP#${security_group}#g" \ - -e "s#VSWITCH#${vswitch}#g" \ - -e "s#POD_CIDR#${pod_cidr}#g" \ - -e "s#TERWAY_IMAGE#${terway_image}#g" \ - -i "${temp_dir}/${terway_template}" - - kubectl apply -f "${temp_dir}/${terway_template}" - kubectl delete pod -n kube-system -l app=terway - sleep 30 -} - -while [[ $# -ge 1 ]] -do - key=$1 - shift - case "$key" in - --cluster-id) - export cluster_id=$1 - shift - ;; - --access-key) - export access_key=$1 - shift - ;; - --access-secret) - export access_secret=$1 - shift - ;; - --region) - export region=$1 - shift - ;; - --category) - export category=$1 - shift - ;; - --image) - export terway_image=$1 - shift - ;; - *) - usage - exit 1 - ;; - esac -done - -install_env diff --git a/tests/main_test.go b/tests/main_test.go new file mode 100644 index 00000000..00bc74bf --- /dev/null +++ b/tests/main_test.go @@ -0,0 +1,283 @@ +//go:build e2e + +package tests + +import ( + "context" + "encoding/json" + "flag" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + k8sErr "k8s.io/apimachinery/pkg/api/errors" + + "go.uber.org/atomic" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/pkg/features" + + networkv1beta1 "github.com/AliyunContainerService/terway/pkg/apis/network.alibabacloud.com/v1beta1" +) + +var ( + testenv env.Environment +) + +var ( + testIPv4 bool + testIPv6 bool + testNetworkPolicy bool + testTrunk bool + testPodLevelConfig bool + + ipvlan bool + + terway string + + vSwitchIDs string + securityGroupIDs string + + repo string + timeout string + parsedTimeout time.Duration + + lock sync.Mutex + + isFailed atomic.Bool + + nginxImage string +) + +func init() { + flag.StringVar(&repo, "repo", "registry.cn-hangzhou.aliyuncs.com/build-test", "image repo") + flag.StringVar(&timeout, "timeout", "2m", "2m") + flag.StringVar(&vSwitchIDs, "vswitch-ids", "", "extra vSwitchIDs") + flag.StringVar(&securityGroupIDs, "security-group-ids", "", "extra securityGroupIDs") +} + +func TestMain(m *testing.M) { + flag.Parse() + + nginxImage = repo + "/nginx:1.23.1" + var err error + parsedTimeout, err = time.ParseDuration(timeout) + if err != nil { + panic("error parse timeout") + } + _ = clientgoscheme.AddToScheme(scheme.Scheme) + _ = networkv1beta1.AddToScheme(scheme.Scheme) + + home, err := os.UserHomeDir() + if err != nil { + panic("error get home path") + } + + envCfg := envconf.NewWithKubeConfig(filepath.Join(home, ".kube", "config")). + WithRandomNamespace() + + testenv = env.NewWithConfig(envCfg) + testenv.Setup( + envfuncs.CreateNamespace(envCfg.Namespace()), + patchNamespace, + checkENIConfig, + setNodeLabel, + setPodNetworking, + ) + testenv.AfterEachFeature(func(ctx context.Context, config *envconf.Config, t *testing.T, feature features.Feature) (context.Context, error) { + objs, ok := ctx.Value(resourceKey).([]k8s.Object) + if !ok { + return ctx, nil + } + for _, obj := range objs { + _ = config.Client().Resources().Delete(ctx, obj) + + err = wait.For(conditions.New(config.Client().Resources()).ResourceDeleted(obj), wait.WithContext(ctx), + wait.WithTimeout(parsedTimeout), + wait.WithInterval(1*time.Second)) + if err != nil { + t.Error(err) + t.FailNow() + } + } + return ctx, nil + }) + + testenv.Finish(func(ctx context.Context, config *envconf.Config) (context.Context, error) { + if !isFailed.Load() { + return envfuncs.DeleteNamespace(envCfg.Namespace())(ctx, config) + } + + pn := &networkv1beta1.PodNetworking{} + pn.Name = "trunk" + + _ = config.Client().Resources().Delete(ctx, pn) + return ctx, nil + }) + + os.Exit(testenv.Run(m)) +} + +func checkENIConfig(ctx context.Context, config *envconf.Config) (context.Context, error) { + ds := &appsv1.DaemonSetList{} + err := config.Client().Resources().WithNamespace("kube-system").List(ctx, ds) + if err != nil { + return ctx, err + } + for _, d := range ds.Items { + switch d.Name { + case "terway", "terway-eni", "terway-eniip": + terway = d.Name + break + } + } + + // we can determine cluster config by terway eni-conifg + cm := &corev1.ConfigMap{} + err = config.Client().Resources().Get(ctx, "eni-config", "kube-system", cm) + if err != nil { + if errors.IsNotFound(err) { + testIPv4 = true + return ctx, nil + } + return ctx, err + } + if strings.Contains(cm.Data["10-terway.conf"], "IPVlan") { + ipvlan = true + } + if strings.Contains(cm.Data["10-terway.conf"], "datapathv2") { + ipvlan = true + } + switch cm.Data["disable_network_policy"] { + case "", "false": + testNetworkPolicy = true + } + cfg := &Config{} + err = json.Unmarshal([]byte(cm.Data["eni_conf"]), cfg) + if err != nil { + return nil, err + } + if cfg.EnableENITrunking { + testTrunk = true + testPodLevelConfig = true + } + if cfg.IPAMType == "crd" { + testPodLevelConfig = true + } + if cfg.IPStack == "" || cfg.IPStack == "ipv4" || cfg.IPStack == "dual" { + testIPv4 = true + } + if cfg.IPStack == "dual" || cfg.IPStack == "ipv6" { + testIPv6 = true + } + + return ctx, nil +} + +func setNodeLabel(ctx context.Context, config *envconf.Config) (context.Context, error) { + nodes := &corev1.NodeList{} + err := config.Client().Resources().List(ctx, nodes) + if err != nil { + return nil, err + } + for _, node := range nodes.Items { + val := "" + if strings.Contains(node.Status.NodeInfo.OSImage, "Soaring Falcon") { + val = "alinux3" + } + if strings.Contains(node.Status.NodeInfo.OSImage, "Hunting Beagle") { + val = "alinux2" + } + mergePatch, _ := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "e2e-os": val, + }, + }, + }) + err = config.Client().Resources().Patch(ctx, &node, k8s.Patch{PatchType: types.StrategicMergePatchType, Data: mergePatch}) + if err != nil { + return nil, err + } + } + return ctx, nil +} + +func setPodNetworking(ctx context.Context, config *envconf.Config) (context.Context, error) { + if !testTrunk { + return ctx, nil + } + + pn := &networkv1beta1.PodNetworking{} + pn.Name = "trunk" + pn.Spec.Selector = networkv1beta1.Selector{PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"trunk": "enable"}, + }} + if securityGroupIDs != "" { + pn.Spec.SecurityGroupIDs = strings.Split(securityGroupIDs, ",") + } + if vSwitchIDs != "" { + pn.Spec.VSwitchOptions = strings.Split(vSwitchIDs, ",") + } + + err := config.Client().Resources().Create(ctx, pn) + if err != nil { + if !k8sErr.IsAlreadyExists(err) { + return nil, err + } + } + + err = wait.For(func(ctx context.Context) (bool, error) { + pn := &networkv1beta1.PodNetworking{} + err := config.Client().Resources().Get(ctx, "trunk", "", pn) + if err != nil { + return false, err + } + if pn.Status.Status != networkv1beta1.NetworkingStatusReady { + return false, nil + } + return true, nil + }, + wait.WithTimeout(10*time.Second), + wait.WithInterval(1*time.Second)) + + return ctx, err +} + +func patchNamespace(ctx context.Context, config *envconf.Config) (context.Context, error) { + ns := &corev1.Namespace{} + err := config.Client().Resources().Get(ctx, config.Namespace(), "", ns) + if err != nil { + return ctx, err + } + mergePatch, _ := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "ns": config.Namespace(), + "node-local-dns-injection": "enabled", + }, + }, + }) + err = config.Client().Resources().Patch(ctx, ns, k8s.Patch{PatchType: types.StrategicMergePatchType, Data: mergePatch}) + return ctx, err +} + +type Config struct { + EnableENITrunking bool `json:"enable_eni_trunking"` + IPStack string `json:"ip_stack"` + IPAMType string `json:"ipam_type"` +} diff --git a/tests/network_policy.bats b/tests/network_policy.bats deleted file mode 100644 index 133b68ea..00000000 --- a/tests/network_policy.bats +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bats -load helpers - -function setup() { - kubectl delete ns network-test || true - retry 30 3 object_not_exist ns network-test -} - -@test "network policy" { - # eni not support network policy - if [ "$category" != "eni-only" ]; then - kubectl apply -f templates/testcases/network_policy/network-policy.yml - retry 5 20 bash -c "kubectl get pod -n network-test policy-cli | grep Completed" - retry 5 20 bash -c "kubectl get pod -n network-test non-policy-cli | grep Completed" - result=`kubectl get pod -n network-test -o jsonpath='{range .status.containerStatuses[*]}{.state.terminated.reason}{end}' policy-cli` - [ "$result" = "CompletedCompleted" ] - result=`kubectl get pod -n network-test -o jsonpath='{range .status.containerStatuses[*]}{.state.terminated.reason}{end}' non-policy-cli` - [ "$result" = "CompletedError" ] - kubectl delete -f templates/testcases/network_policy/network-policy.yml - retry 30 2 object_not_exist ns network-test - fi -} \ No newline at end of file diff --git a/tests/pod_connection.bats b/tests/pod_connection.bats deleted file mode 100644 index 5320b457..00000000 --- a/tests/pod_connection.bats +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bats -load helpers - -function delete_deploy() { - retry 20 2 object_not_exist svc tomcat-service - retry 20 2 object_not_exist svc nginx-service - retry 20 2 object_not_exist pod -l app=vpc-samehost-nginx - retry 20 2 object_not_exist pod -l app=vpc-samehost-tomcat - retry 20 2 object_not_exist pod -l app=vpc-crosshost-nginx - retry 20 2 object_not_exist pod -l app=vpc-crosshost-tomcat - retry 20 2 object_not_exist pod -l app=vpc-tomcat - retry 20 2 object_not_exist pod -l app=vpc-eni-nginx - retry 20 2 object_not_exist pod -l app=eni-tomcat - retry 20 2 object_not_exist pod -l app=eni-nginx - sleep 10 -} - -function setup() { - kubectl delete -f templates/testcases/network_connection/samehost.yml || true - kubectl delete -f templates/testcases/network_connection/crosshost.yml || true - kubectl delete -f templates/testcases/network_connection/eni.yml || true - kubectl delete -f templates/testcases/network_connection/vpc-eni.yml || true - delete_deploy || true -} - -function request_hostport { - run "kubectl get pod -n kube-system -l app=terway -o name | cut -d '/' -f 2 | xargs -n1 -I {} kubectl exec -it {} -n kube-system curl 127.0.0.1:$1" - if [[ "$status" -eq 0 ]]; then - return 0 - fi - false - echo "node port result: "$result -} - -@test "pod connection same host" { - retry 5 5 kubectl apply -f templates/testcases/network_connection/samehost.yml - - retry 20 2 object_exist svc tomcat-service - retry 20 2 object_exist svc nginx-service - retry 20 5 pod_running pod -l app=vpc-samehost-tomcat - retry 20 5 object_exist pod -l app=vpc-samehost-nginx - # wait nginx startup - sleep 20 - retry 3 2 request_hostport 30080 - [ "$status" -eq "0" ] - kubectl delete -f templates/testcases/network_connection/samehost.yml || true - delete_deploy -} - -@test "pod connection cross host" { - retry 5 5 kubectl apply -f templates/testcases/network_connection/crosshost.yml - retry 20 2 object_exist svc tomcat-service - retry 20 2 object_exist svc nginx-service - retry 20 5 pod_running pod -l app=vpc-crosshost-tomcat - retry 20 5 object_exist pod -l app=vpc-crosshost-nginx - # wait nginx startup - sleep 20 - retry 3 2 request_hostport 30090 - [ "$status" -eq "0" ] - kubectl delete -f templates/testcases/network_connection/crosshost.yml || true - delete_deploy -} - -@test "pod connection eni" { - if [ "$category" = "vpc" ]; then - retry 5 5 kubectl apply -f templates/testcases/network_connection/eni.yml - retry 20 2 object_exist svc tomcat-service - retry 20 2 object_exist svc nginx-service - retry 20 5 pod_running pod -l app=eni-tomcat - retry 20 5 object_exist pod -l app=eni-nginx - # wait nginx startup - sleep 20 - retry 3 2 request_hostport 30100 - [ "$status" -eq "0" ] - kubectl delete -f templates/testcases/network_connection/eni.yml || true - delete_deploy - fi -} - -@test "pod connection vpc -> eni" { - if [ "$category" = "vpc" ]; then - retry 5 5 kubectl apply -f templates/testcases/network_connection/vpc-eni.yml - retry 20 2 object_exist svc tomcat-service - retry 20 2 object_exist svc nginx-service - retry 20 5 pod_running pod -l app=vpc-tomcat - retry 20 5 object_exist pod -l app=vpc-eni-nginx - # wait nginx startup - sleep 20 - retry 3 2 request_hostport 30110 - [ "$status" -eq "0" ] - kubectl delete -f templates/testcases/network_connection/vpc-eni.yml || true - delete_deploy - fi -} diff --git a/tests/prepare_test.go b/tests/prepare_test.go deleted file mode 100644 index 8515d2f9..00000000 --- a/tests/prepare_test.go +++ /dev/null @@ -1,554 +0,0 @@ -//go:build e2e -// +build e2e - -package tests - -import ( - "bytes" - "context" - "fmt" - "net" - "time" - - "github.com/sirupsen/logrus" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sLabels "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" - - "github.com/AliyunContainerService/terway/pkg/apis/network.alibabacloud.com/v1beta1" - "github.com/AliyunContainerService/terway/pkg/generated/clientset/versioned" -) - -var backoff = wait.Backoff{ - Duration: 2 * time.Second, - Factor: 1, - Jitter: 0, - Steps: 150, -} - -func EnsureNamespace(ctx context.Context, cs kubernetes.Interface, name string) error { - _, err := cs.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - _, err = cs.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "project": "network-test", - }, - }}, metav1.CreateOptions{}) - return err - } - } - return err -} - -func EnsurePodNetworking(ctx context.Context, cs *versioned.Clientset, cfg PodNetworkingConfig) (*v1beta1.PodNetworking, error) { - pnTpl := &v1beta1.PodNetworking{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.Name, - }, - Spec: v1beta1.PodNetworkingSpec{ - AllocationType: cfg.IPType, - Selector: v1beta1.Selector{ - PodSelector: &metav1.LabelSelector{ - MatchLabels: cfg.PodSelectLabels, - }, - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.NamespaceLabels, - }, - }, - }, - } - _, err := cs.NetworkV1beta1().PodNetworkings().Create(ctx, pnTpl, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - return nil, err - } - var pn *v1beta1.PodNetworking - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - pn, err = cs.NetworkV1beta1().PodNetworkings().Get(ctx, cfg.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - if pn.Status.Status != v1beta1.NetworkingStatusReady { - return false, nil - } - return true, nil - }) - return pn, err -} - -func EnsureNetworkPolicy(ctx context.Context, cs kubernetes.Interface, cfg NetworkPolicyConfig) (*v1.NetworkPolicy, error) { - networkPolicyTpl := &v1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.Name, - Namespace: cfg.Namespace, - }, - Spec: v1.NetworkPolicySpec{ - PodSelector: metav1.LabelSelector{ - MatchLabels: cfg.PodSelectLabels, - }, - Ingress: []v1.NetworkPolicyIngressRule{ - { - From: []v1.NetworkPolicyPeer{ - { - PodSelector: &metav1.LabelSelector{ - MatchLabels: cfg.IngressPodLabels, - }, - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.IngressNamespaceLabels, - }, - }, - }, - }, - }, - }, - } - _, err := cs.NetworkingV1().NetworkPolicies(cfg.Namespace).Create(ctx, networkPolicyTpl, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - return nil, err - } - var np *v1.NetworkPolicy - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - np, err = cs.NetworkingV1().NetworkPolicies(cfg.Namespace).Get(ctx, cfg.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - return true, nil - }) - return np, err -} - -func EnsureDeployment(ctx context.Context, cs kubernetes.Interface, cfg PodResConfig) ([]corev1.Pod, error) { - deplTml := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.Name, - Namespace: cfg.Namespace, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: cfg.Labels, - }, - Replicas: func(a int32) *int32 { return &a }(cfg.Replicas), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: cfg.Labels, - }, - Spec: corev1.PodSpec{ - HostNetwork: cfg.HostNetwork, - TerminationGracePeriodSeconds: func(a int64) *int64 { return &a }(0), - Containers: []corev1.Container{ - { - Name: "echo", - Image: image, - Args: []string{"--http-bind-address", fmt.Sprintf(":%d", httpTestPort), - "--https-bind-address", fmt.Sprintf(":%d", httpsTestPort)}, - ImagePullPolicy: corev1.PullAlways, - }, - }, - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "type", - Operator: corev1.NodeSelectorOpNotIn, - Values: []string{ - "virtual-kubelet", - }, - }, - }, - }, - }, - }, - }, - }, - TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: corev1.LabelHostname, - WhenUnsatisfiable: corev1.ScheduleAnyway, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: cfg.Labels, - }, - }, - }, - Tolerations: []corev1.Toleration{ - { - Key: "kubernetes.io/arch", - Value: "arm64", - }, - }, - }, - }, - }, - } - _, err := cs.AppsV1().Deployments(cfg.Namespace).Create(ctx, deplTml, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - return nil, err - } - uid := "" - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - delp, err := cs.AppsV1().Deployments(cfg.Namespace).Get(ctx, cfg.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - uid = string(delp.UID) - return delp.Status.Replicas == *delp.Spec.Replicas && delp.Status.Replicas == delp.Status.AvailableReplicas, nil - }) - if err != nil { - return nil, err - } - pods, err := GetPodsByRef(ctx, cs, uid) - if err != nil { - return nil, err - } - return pods, nil -} - -func EnsureStatefulSet(ctx context.Context, cs kubernetes.Interface, cfg PodResConfig) ([]corev1.Pod, error) { - stsTml := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.Name, - Namespace: cfg.Namespace, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: cfg.Labels, - }, - Replicas: func(a int32) *int32 { return &a }(cfg.Replicas), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: cfg.Labels, - }, - Spec: corev1.PodSpec{ - HostNetwork: cfg.HostNetwork, - TerminationGracePeriodSeconds: func(a int64) *int64 { return &a }(0), - Containers: []corev1.Container{ - { - Name: "echo", - Image: image, - Args: []string{"--http-bind-address", fmt.Sprintf(":%d", httpTestPort), - "--https-bind-address", fmt.Sprintf(":%d", httpsTestPort)}, - ImagePullPolicy: corev1.PullAlways, - }, - }, - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "type", - Operator: corev1.NodeSelectorOpNotIn, - Values: []string{ - "virtual-kubelet", - }, - }, - }, - }, - }, - }, - }, - }, - TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: corev1.LabelHostname, - WhenUnsatisfiable: corev1.ScheduleAnyway, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: cfg.Labels, - }, - }, - }, - Tolerations: []corev1.Toleration{ - { - Key: "kubernetes.io/arch", - Value: "arm64", - }, - }, - }, - }, - }, - } - _, err := cs.AppsV1().StatefulSets(cfg.Namespace).Create(ctx, stsTml, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - return nil, err - } - uid := "" - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - sts, err := cs.AppsV1().StatefulSets(cfg.Namespace).Get(ctx, cfg.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - uid = string(sts.UID) - return sts.Status.Replicas == *sts.Spec.Replicas && sts.Status.Replicas == sts.Status.ReadyReplicas, nil - }) - if err != nil { - return nil, err - } - pods, err := GetPodsByRef(ctx, cs, uid) - if err != nil { - return nil, err - } - return pods, nil -} - -func EnsureService(ctx context.Context, cs kubernetes.Interface, cfg ServiceResConfig) (*corev1.Service, error) { - svc, err := cs.CoreV1().Services(cfg.Namespace).Get(ctx, cfg.Name, metav1.GetOptions{}) - if err == nil { - return svc, nil - } - svcTpl := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.Name, - Namespace: cfg.Namespace, - Labels: cfg.Labels, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "http-server", - Port: int32(httpTestPort), - TargetPort: intstr.FromInt(httpTestPort), - }, - }, - Selector: cfg.PodSelectLabels, - SessionAffinity: corev1.ServiceAffinityNone, - ExternalTrafficPolicy: "", - IPFamilies: nil, - Type: cfg.Type, - }, - } - - if cfg.Headless { - svcTpl.Spec.ClusterIP = "None" - } - svc, err = cs.CoreV1().Services(cfg.Namespace).Create(ctx, svcTpl, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - svc, err = cs.CoreV1().Services(cfg.Namespace).Get(ctx, cfg.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - if cfg.Type == corev1.ServiceTypeLoadBalancer { - if len(svc.Status.LoadBalancer.Ingress) > 0 { - return true, nil - } - return false, nil - } - return true, nil - }) - - return svc, err -} - -func DeletePodNetworking(ctx context.Context, cs *versioned.Clientset, name string) error { - err := cs.NetworkV1beta1().PodNetworkings().Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.NetworkV1beta1().PodNetworkings().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func DeleteNetworkPolicy(ctx context.Context, cs kubernetes.Interface, namespace, name string) error { - err := cs.NetworkingV1().NetworkPolicies(namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.NetworkingV1().NetworkPolicies(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func DeleteDeployment(ctx context.Context, cs kubernetes.Interface, namespace, name string) error { - err := cs.AppsV1().Deployments(namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func DeleteStatefulSet(ctx context.Context, cs kubernetes.Interface, namespace, name string) error { - err := cs.AppsV1().StatefulSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func DeleteService(ctx context.Context, cs kubernetes.Interface, namespace, name string) error { - err := cs.CoreV1().Services(namespace).Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func DeleteNamespace(ctx context.Context, cs kubernetes.Interface, name string) error { - err := cs.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - _, err := cs.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) - return err -} - -func GetPodsByRef(ctx context.Context, cs kubernetes.Interface, uid string) ([]corev1.Pod, error) { - pods, err := cs.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - var result []corev1.Pod - for _, pod := range pods.Items { - if MatchOwnerReference(pod, uid) { - result = append(result, pod) - } - } - return result, nil -} - -func MatchOwnerReference(pod corev1.Pod, uid string) bool { - for _, ref := range pod.OwnerReferences { - if string(ref.UID) == uid { - return true - } - } - return false -} - -func ListNodeIPs(ctx context.Context, cs kubernetes.Interface) ([]net.IP, error) { - nodes, err := cs.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - var result []net.IP - for _, node := range nodes.Items { - if _, ok := node.Labels["type"]; ok { - if node.Labels["type"] == "virtual-kubelet" { - logrus.Infof("skip virtual node %s", node.Name) - continue - } - } - for _, v := range node.Status.Conditions { - if v.Type == corev1.NodeReady { - if v.Status == corev1.ConditionFalse { - logrus.Infof("skip notready node %s", node.Name) - continue - } - } - } - for _, addr := range node.Status.Addresses { - if addr.Type != corev1.NodeInternalIP { - continue - } - result = append(result, net.ParseIP(addr.Address)) - } - } - return result, nil -} - -func Exec(cs kubernetes.Interface, restConf *rest.Config, podNamespace, podName string, cmd []string) ([]byte, []byte, error) { - req := cs.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(podName). - Namespace(podNamespace). - SubResource("exec"). - VersionedParams(&corev1.PodExecOptions{ - Command: cmd, - Stdin: false, - Stdout: true, - Stderr: true, - TTY: true, - }, scheme.ParameterCodec) - - exec, err := remotecommand.NewSPDYExecutor(restConf, "POST", req.URL()) - if err != nil { - return nil, nil, err - } - - buf := &bytes.Buffer{} - errBuf := &bytes.Buffer{} - - if err = exec.Stream(remotecommand.StreamOptions{ - Stdout: buf, - Stderr: errBuf, - Tty: false, - }); err != nil { - return nil, nil, err - } - return buf.Bytes(), errBuf.Bytes(), nil -} - -func ListPods(ctx context.Context, cs kubernetes.Interface, namespace string, labels map[string]string) ([]corev1.Pod, error) { - labelSelector := metav1.LabelSelector{MatchLabels: labels} - - pods, err := cs.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: k8sLabels.Set(labelSelector.MatchLabels).String()}) - if err != nil { - return nil, err - } - return pods.Items, nil -} - -func ListServices(ctx context.Context, cs kubernetes.Interface, namespace string, labels map[string]string) ([]corev1.Service, error) { - labelSelector := metav1.LabelSelector{MatchLabels: labels} - - svcs, err := cs.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: k8sLabels.Set(labelSelector.MatchLabels).String()}) - if err != nil { - return nil, err - } - return svcs.Items, nil -} diff --git a/tests/service.bats b/tests/service.bats deleted file mode 100644 index 2807541c..00000000 --- a/tests/service.bats +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bats -load helpers - -function setup() { - kubectl delete -f templates/testcases/service/loadbalancer.yml || true - retry 20 2 object_not_exist svc -l test=lbservice - kubectl apply -f templates/testcases/service/loadbalancer.yml - retry 20 2 object_exist svc -l test=lbservice - retry 10 5 loadbalancer_ready svc -l test=lbservice - - retry 20 5 pod_running pod -l app=spod -} - -function teardown() { - kubectl delete -f templates/testcases/service/loadbalancer.yml || true - retry 20 2 object_not_exist svc -l test=lbservice -} - -@test "test loadbalancer service" { - ip_addr=$(kubectl get svc loadbalancercluster -o jsonpath='{range .status.loadBalancer.ingress[*]}{.ip}{end}') - retry 5 5 curl $ip_addr - [ "$status" -eq 0 ] -} - -@test "test loadbalancer service traffic local" { - # eni not support local traffic policy - if [ "$category" != "eni-only" ]; then - ip_addr=$(kubectl get svc loadbalancerlocal -o jsonpath='{range .status.loadBalancer.ingress[*]}{.ip}{end}') - retry 5 5 curl $ip_addr - [ "$status" -eq 0 ] - fi -} \ No newline at end of file diff --git a/tests/test.sh b/tests/test.sh deleted file mode 100755 index 4e9a9c51..00000000 --- a/tests/test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# example: ./test.sh --cluster-id cxxx --access-key LTXXX --access-secret XXX --region cn-hangzhou --category vpc --image registry.aliyuncs.com/acs/terway:v1.0.9.10-gfc1045e-aliyun - -set -e - -source install_env.sh "$@" - -# test terway pod ready and device plugin -bats cni_ready.bats -# test pod connection between diff type of pods -bats pod_connection.bats -# test network policy -bats network_policy.bats -# test service or loadbalancer -bats service.bats diff --git a/tests/utils_test.go b/tests/utils_test.go new file mode 100644 index 00000000..c7192d98 --- /dev/null +++ b/tests/utils_test.go @@ -0,0 +1,252 @@ +//go:build e2e + +package tests + +import ( + "net/netip" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" +) + +type Pod struct { + *corev1.Pod +} + +func NewPod(name, namespace string) *Pod { + return &Pod{ + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: func() *int64 { + i := int64(0) + return &i + }(), + }, + }, + } +} + +func (p *Pod) WithLabels(labels map[string]string) *Pod { + if p.Labels == nil { + p.Labels = make(map[string]string) + } + for k, v := range labels { + p.Labels[k] = v + } + + return p +} + +func (p *Pod) WithContainer(name, image string, command []string) *Pod { + p.Spec.Containers = []corev1.Container{ + { + Name: name, + Image: image, + ImagePullPolicy: "IfNotPresent", + Command: command, + }, + } + return p +} +func (p *Pod) WithNodeAffinity(labels map[string]string) *Pod { + var nodeSelectorTerms []corev1.NodeSelectorRequirement + for k, v := range labels { + nodeSelectorTerms = append(nodeSelectorTerms, corev1.NodeSelectorRequirement{ + Key: k, + Operator: corev1.NodeSelectorOpIn, + Values: []string{v}, + }) + } + + if len(nodeSelectorTerms) == 0 { + return p + } + + p.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: nodeSelectorTerms, + MatchFields: nil, + }, + }, + }, + }, + } + + return p +} +func (p *Pod) WithPodAffinity(labels map[string]string) *Pod { + p.Spec.Affinity = &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + + return p +} + +func (p *Pod) WithPodAntiAffinity(labels map[string]string) *Pod { + p.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + + return p +} + +func (p *Pod) WithHostNetwork() *Pod { + p.Spec.HostNetwork = true + return p +} + +func (p *Pod) WithHealthCheck(port int32) *Pod { + p.Spec.Containers[0].LivenessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(int(port)), + }, + }, + InitialDelaySeconds: 1, + TimeoutSeconds: 2, + PeriodSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 3, + } + + return p +} + +func (p *Pod) WithDNSPolicy(policy corev1.DNSPolicy) *Pod { + p.Spec.DNSPolicy = policy + return p +} + +type Service struct { + *corev1.Service +} + +func NewService(name, namespace string, selector map[string]string) *Service { + single := corev1.IPFamilyPolicySingleStack + return &Service{ + Service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + IPFamilyPolicy: &single, + Selector: selector, + }, + }, + } +} + +func (s *Service) WithIPFamily(stack string) *Service { + if stack == "ipv4" { + s.Spec.IPFamilies = []corev1.IPFamily{corev1.IPv4Protocol} + } + if stack == "ipv6" { + s.Spec.IPFamilies = []corev1.IPFamily{corev1.IPv6Protocol} + } + return s +} + +func (s *Service) ExposePort(port int32, name string) *Service { + s.Spec.Ports = append(s.Spec.Ports, corev1.ServicePort{Port: port, Name: name}) + return s +} + +func (s *Service) ExposeLoadBalancer(public, local bool) *Service { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations["service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec"] = "slb.s1.small" + if public { + s.Annotations["service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type"] = "internet" + } else { + s.Annotations["service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type"] = "intranet" + } + + s.Spec.Type = corev1.ServiceTypeLoadBalancer + if local { + s.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal + } + return s +} + +func (s *Service) ExposeNodePort() *Service { + s.Spec.Type = corev1.ServiceTypeNodePort + return s +} + +type NetworkPolicy struct { + *networkingv1.NetworkPolicy +} + +func NewNetworkPolicy(name, namespace string) *NetworkPolicy { + return &NetworkPolicy{ + NetworkPolicy: &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, + } +} + +func (s *NetworkPolicy) WithPodSelector(matchPodLabels map[string]string) *NetworkPolicy { + s.Spec.PodSelector = metav1.LabelSelector{MatchLabels: matchPodLabels} + return s +} + +func (s *NetworkPolicy) WithPolicyType(policyTypes ...networkingv1.PolicyType) *NetworkPolicy { + for _, policyType := range policyTypes { + s.Spec.PolicyTypes = append(s.Spec.PolicyTypes, policyType) + } + + return s +} + +func getIP(pod *corev1.Pod) (v4 netip.Addr, v6 netip.Addr) { + ips := sets.New[string](pod.Status.PodIP) + for _, ip := range pod.Status.PodIPs { + ips.Insert(ip.IP) + } + for ip := range ips { + addr, err := netip.ParseAddr(ip) + if err != nil { + continue + } + + if addr.Is4() { + v4 = addr + } else { + v6 = addr + } + } + return +}